switchx2.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765
  1. /*
  2. * drivers/net/ethernet/mellanox/mlxsw/switchx2.c
  3. * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
  5. * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
  6. * Copyright (c) 2015-2016 Elad Raz <eladr@mellanox.com>
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are met:
  10. *
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. Neither the names of the copyright holders nor the names of its
  17. * contributors may be used to endorse or promote products derived from
  18. * this software without specific prior written permission.
  19. *
  20. * Alternatively, this software may be distributed under the terms of the
  21. * GNU General Public License ("GPL") version 2 as published by the Free
  22. * Software Foundation.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34. * POSSIBILITY OF SUCH DAMAGE.
  35. */
  36. #include <linux/kernel.h>
  37. #include <linux/module.h>
  38. #include <linux/types.h>
  39. #include <linux/pci.h>
  40. #include <linux/netdevice.h>
  41. #include <linux/etherdevice.h>
  42. #include <linux/slab.h>
  43. #include <linux/device.h>
  44. #include <linux/skbuff.h>
  45. #include <linux/if_vlan.h>
  46. #include <net/switchdev.h>
  47. #include "pci.h"
  48. #include "core.h"
  49. #include "reg.h"
  50. #include "port.h"
  51. #include "trap.h"
  52. #include "txheader.h"
  53. #include "ib.h"
  54. static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2";
  55. static const char mlxsw_sx_driver_version[] = "1.0";
  56. struct mlxsw_sx_port;
  57. struct mlxsw_sx {
  58. struct mlxsw_sx_port **ports;
  59. struct mlxsw_core *core;
  60. const struct mlxsw_bus_info *bus_info;
  61. u8 hw_id[ETH_ALEN];
  62. };
  63. struct mlxsw_sx_port_pcpu_stats {
  64. u64 rx_packets;
  65. u64 rx_bytes;
  66. u64 tx_packets;
  67. u64 tx_bytes;
  68. struct u64_stats_sync syncp;
  69. u32 tx_dropped;
  70. };
  71. struct mlxsw_sx_port {
  72. struct net_device *dev;
  73. struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
  74. struct mlxsw_sx *mlxsw_sx;
  75. u8 local_port;
  76. struct {
  77. u8 module;
  78. } mapping;
  79. };
  80. /* tx_hdr_version
  81. * Tx header version.
  82. * Must be set to 0.
  83. */
  84. MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
  85. /* tx_hdr_ctl
  86. * Packet control type.
  87. * 0 - Ethernet control (e.g. EMADs, LACP)
  88. * 1 - Ethernet data
  89. */
  90. MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
  91. /* tx_hdr_proto
  92. * Packet protocol type. Must be set to 1 (Ethernet).
  93. */
  94. MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
  95. /* tx_hdr_etclass
  96. * Egress TClass to be used on the egress device on the egress port.
  97. * The MSB is specified in the 'ctclass3' field.
  98. * Range is 0-15, where 15 is the highest priority.
  99. */
  100. MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3);
  101. /* tx_hdr_swid
  102. * Switch partition ID.
  103. */
  104. MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
  105. /* tx_hdr_port_mid
  106. * Destination local port for unicast packets.
  107. * Destination multicast ID for multicast packets.
  108. *
  109. * Control packets are directed to a specific egress port, while data
  110. * packets are transmitted through the CPU port (0) into the switch partition,
  111. * where forwarding rules are applied.
  112. */
  113. MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
  114. /* tx_hdr_ctclass3
  115. * See field 'etclass'.
  116. */
  117. MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1);
  118. /* tx_hdr_rdq
  119. * RDQ for control packets sent to remote CPU.
  120. * Must be set to 0x1F for EMADs, otherwise 0.
  121. */
  122. MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5);
  123. /* tx_hdr_cpu_sig
  124. * Signature control for packets going to CPU. Must be set to 0.
  125. */
  126. MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9);
  127. /* tx_hdr_sig
  128. * Stacking protocl signature. Must be set to 0xE0E0.
  129. */
  130. MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16);
  131. /* tx_hdr_stclass
  132. * Stacking TClass.
  133. */
  134. MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3);
  135. /* tx_hdr_emad
  136. * EMAD bit. Must be set for EMADs.
  137. */
  138. MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1);
  139. /* tx_hdr_type
  140. * 0 - Data packets
  141. * 6 - Control packets
  142. */
  143. MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
  144. static void mlxsw_sx_txhdr_construct(struct sk_buff *skb,
  145. const struct mlxsw_tx_info *tx_info)
  146. {
  147. char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
  148. bool is_emad = tx_info->is_emad;
  149. memset(txhdr, 0, MLXSW_TXHDR_LEN);
  150. /* We currently set default values for the egress tclass (QoS). */
  151. mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0);
  152. mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
  153. mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
  154. mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 :
  155. MLXSW_TXHDR_ETCLASS_5);
  156. mlxsw_tx_hdr_swid_set(txhdr, 0);
  157. mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
  158. mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3);
  159. mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD :
  160. MLXSW_TXHDR_RDQ_OTHER);
  161. mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG);
  162. mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG);
  163. mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE);
  164. mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD :
  165. MLXSW_TXHDR_NOT_EMAD);
  166. mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
  167. }
  168. static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port,
  169. bool is_up)
  170. {
  171. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  172. char paos_pl[MLXSW_REG_PAOS_LEN];
  173. mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port,
  174. is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
  175. MLXSW_PORT_ADMIN_STATUS_DOWN);
  176. return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
  177. }
  178. static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
  179. bool *p_is_up)
  180. {
  181. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  182. char paos_pl[MLXSW_REG_PAOS_LEN];
  183. u8 oper_status;
  184. int err;
  185. mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0);
  186. err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
  187. if (err)
  188. return err;
  189. oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
  190. *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
  191. return 0;
  192. }
  193. static int __mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port,
  194. u16 mtu)
  195. {
  196. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  197. char pmtu_pl[MLXSW_REG_PMTU_LEN];
  198. int max_mtu;
  199. int err;
  200. mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0);
  201. err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
  202. if (err)
  203. return err;
  204. max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
  205. if (mtu > max_mtu)
  206. return -EINVAL;
  207. mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu);
  208. return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
  209. }
  210. static int mlxsw_sx_port_mtu_eth_set(struct mlxsw_sx_port *mlxsw_sx_port,
  211. u16 mtu)
  212. {
  213. mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
  214. return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
  215. }
  216. static int mlxsw_sx_port_mtu_ib_set(struct mlxsw_sx_port *mlxsw_sx_port,
  217. u16 mtu)
  218. {
  219. return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
  220. }
  221. static int mlxsw_sx_port_ib_port_set(struct mlxsw_sx_port *mlxsw_sx_port,
  222. u8 ib_port)
  223. {
  224. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  225. char plib_pl[MLXSW_REG_PLIB_LEN] = {0};
  226. int err;
  227. mlxsw_reg_plib_local_port_set(plib_pl, mlxsw_sx_port->local_port);
  228. mlxsw_reg_plib_ib_port_set(plib_pl, ib_port);
  229. err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(plib), plib_pl);
  230. return err;
  231. }
  232. static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
  233. {
  234. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  235. char pspa_pl[MLXSW_REG_PSPA_LEN];
  236. mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port);
  237. return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl);
  238. }
  239. static int
  240. mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port)
  241. {
  242. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  243. char sspr_pl[MLXSW_REG_SSPR_LEN];
  244. mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sx_port->local_port);
  245. return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl);
  246. }
  247. static int mlxsw_sx_port_module_info_get(struct mlxsw_sx *mlxsw_sx,
  248. u8 local_port, u8 *p_module,
  249. u8 *p_width)
  250. {
  251. char pmlp_pl[MLXSW_REG_PMLP_LEN];
  252. int err;
  253. mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
  254. err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl);
  255. if (err)
  256. return err;
  257. *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
  258. *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
  259. return 0;
  260. }
  261. static int mlxsw_sx_port_open(struct net_device *dev)
  262. {
  263. struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
  264. int err;
  265. err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
  266. if (err)
  267. return err;
  268. netif_start_queue(dev);
  269. return 0;
  270. }
  271. static int mlxsw_sx_port_stop(struct net_device *dev)
  272. {
  273. struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
  274. netif_stop_queue(dev);
  275. return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
  276. }
  277. static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
  278. struct net_device *dev)
  279. {
  280. struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
  281. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  282. struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
  283. const struct mlxsw_tx_info tx_info = {
  284. .local_port = mlxsw_sx_port->local_port,
  285. .is_emad = false,
  286. };
  287. u64 len;
  288. int err;
  289. if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
  290. return NETDEV_TX_BUSY;
  291. if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
  292. struct sk_buff *skb_orig = skb;
  293. skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
  294. if (!skb) {
  295. this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
  296. dev_kfree_skb_any(skb_orig);
  297. return NETDEV_TX_OK;
  298. }
  299. dev_consume_skb_any(skb_orig);
  300. }
  301. mlxsw_sx_txhdr_construct(skb, &tx_info);
  302. /* TX header is consumed by HW on the way so we shouldn't count its
  303. * bytes as being sent.
  304. */
  305. len = skb->len - MLXSW_TXHDR_LEN;
  306. /* Due to a race we might fail here because of a full queue. In that
  307. * unlikely case we simply drop the packet.
  308. */
  309. err = mlxsw_core_skb_transmit(mlxsw_sx->core, skb, &tx_info);
  310. if (!err) {
  311. pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
  312. u64_stats_update_begin(&pcpu_stats->syncp);
  313. pcpu_stats->tx_packets++;
  314. pcpu_stats->tx_bytes += len;
  315. u64_stats_update_end(&pcpu_stats->syncp);
  316. } else {
  317. this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
  318. dev_kfree_skb_any(skb);
  319. }
  320. return NETDEV_TX_OK;
  321. }
  322. static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
  323. {
  324. struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
  325. int err;
  326. err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, mtu);
  327. if (err)
  328. return err;
  329. dev->mtu = mtu;
  330. return 0;
  331. }
  332. static void
  333. mlxsw_sx_port_get_stats64(struct net_device *dev,
  334. struct rtnl_link_stats64 *stats)
  335. {
  336. struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
  337. struct mlxsw_sx_port_pcpu_stats *p;
  338. u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
  339. u32 tx_dropped = 0;
  340. unsigned int start;
  341. int i;
  342. for_each_possible_cpu(i) {
  343. p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i);
  344. do {
  345. start = u64_stats_fetch_begin_irq(&p->syncp);
  346. rx_packets = p->rx_packets;
  347. rx_bytes = p->rx_bytes;
  348. tx_packets = p->tx_packets;
  349. tx_bytes = p->tx_bytes;
  350. } while (u64_stats_fetch_retry_irq(&p->syncp, start));
  351. stats->rx_packets += rx_packets;
  352. stats->rx_bytes += rx_bytes;
  353. stats->tx_packets += tx_packets;
  354. stats->tx_bytes += tx_bytes;
  355. /* tx_dropped is u32, updated without syncp protection. */
  356. tx_dropped += p->tx_dropped;
  357. }
  358. stats->tx_dropped = tx_dropped;
  359. }
  360. static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name,
  361. size_t len)
  362. {
  363. struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
  364. int err;
  365. err = snprintf(name, len, "p%d", mlxsw_sx_port->mapping.module + 1);
  366. if (err >= len)
  367. return -EINVAL;
  368. return 0;
  369. }
  370. static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
  371. .ndo_open = mlxsw_sx_port_open,
  372. .ndo_stop = mlxsw_sx_port_stop,
  373. .ndo_start_xmit = mlxsw_sx_port_xmit,
  374. .ndo_change_mtu = mlxsw_sx_port_change_mtu,
  375. .ndo_get_stats64 = mlxsw_sx_port_get_stats64,
  376. .ndo_get_phys_port_name = mlxsw_sx_port_get_phys_port_name,
  377. };
  378. static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
  379. struct ethtool_drvinfo *drvinfo)
  380. {
  381. struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
  382. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  383. strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver));
  384. strlcpy(drvinfo->version, mlxsw_sx_driver_version,
  385. sizeof(drvinfo->version));
  386. snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
  387. "%d.%d.%d",
  388. mlxsw_sx->bus_info->fw_rev.major,
  389. mlxsw_sx->bus_info->fw_rev.minor,
  390. mlxsw_sx->bus_info->fw_rev.subminor);
  391. strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name,
  392. sizeof(drvinfo->bus_info));
  393. }
  394. struct mlxsw_sx_port_hw_stats {
  395. char str[ETH_GSTRING_LEN];
  396. u64 (*getter)(const char *payload);
  397. };
  398. static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = {
  399. {
  400. .str = "a_frames_transmitted_ok",
  401. .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
  402. },
  403. {
  404. .str = "a_frames_received_ok",
  405. .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
  406. },
  407. {
  408. .str = "a_frame_check_sequence_errors",
  409. .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
  410. },
  411. {
  412. .str = "a_alignment_errors",
  413. .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
  414. },
  415. {
  416. .str = "a_octets_transmitted_ok",
  417. .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
  418. },
  419. {
  420. .str = "a_octets_received_ok",
  421. .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
  422. },
  423. {
  424. .str = "a_multicast_frames_xmitted_ok",
  425. .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
  426. },
  427. {
  428. .str = "a_broadcast_frames_xmitted_ok",
  429. .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
  430. },
  431. {
  432. .str = "a_multicast_frames_received_ok",
  433. .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
  434. },
  435. {
  436. .str = "a_broadcast_frames_received_ok",
  437. .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
  438. },
  439. {
  440. .str = "a_in_range_length_errors",
  441. .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
  442. },
  443. {
  444. .str = "a_out_of_range_length_field",
  445. .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
  446. },
  447. {
  448. .str = "a_frame_too_long_errors",
  449. .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
  450. },
  451. {
  452. .str = "a_symbol_error_during_carrier",
  453. .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
  454. },
  455. {
  456. .str = "a_mac_control_frames_transmitted",
  457. .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
  458. },
  459. {
  460. .str = "a_mac_control_frames_received",
  461. .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
  462. },
  463. {
  464. .str = "a_unsupported_opcodes_received",
  465. .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
  466. },
  467. {
  468. .str = "a_pause_mac_ctrl_frames_received",
  469. .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
  470. },
  471. {
  472. .str = "a_pause_mac_ctrl_frames_xmitted",
  473. .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
  474. },
  475. };
  476. #define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
  477. static void mlxsw_sx_port_get_strings(struct net_device *dev,
  478. u32 stringset, u8 *data)
  479. {
  480. u8 *p = data;
  481. int i;
  482. switch (stringset) {
  483. case ETH_SS_STATS:
  484. for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) {
  485. memcpy(p, mlxsw_sx_port_hw_stats[i].str,
  486. ETH_GSTRING_LEN);
  487. p += ETH_GSTRING_LEN;
  488. }
  489. break;
  490. }
  491. }
  492. static void mlxsw_sx_port_get_stats(struct net_device *dev,
  493. struct ethtool_stats *stats, u64 *data)
  494. {
  495. struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
  496. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  497. char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
  498. int i;
  499. int err;
  500. mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port,
  501. MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
  502. err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
  503. for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
  504. data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
  505. }
  506. static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset)
  507. {
  508. switch (sset) {
  509. case ETH_SS_STATS:
  510. return MLXSW_SX_PORT_HW_STATS_LEN;
  511. default:
  512. return -EOPNOTSUPP;
  513. }
  514. }
  515. struct mlxsw_sx_port_link_mode {
  516. u32 mask;
  517. u32 supported;
  518. u32 advertised;
  519. u32 speed;
  520. };
  521. static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
  522. {
  523. .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
  524. .supported = SUPPORTED_100baseT_Full,
  525. .advertised = ADVERTISED_100baseT_Full,
  526. .speed = 100,
  527. },
  528. {
  529. .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
  530. .speed = 100,
  531. },
  532. {
  533. .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
  534. MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
  535. .supported = SUPPORTED_1000baseKX_Full,
  536. .advertised = ADVERTISED_1000baseKX_Full,
  537. .speed = 1000,
  538. },
  539. {
  540. .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
  541. .supported = SUPPORTED_10000baseT_Full,
  542. .advertised = ADVERTISED_10000baseT_Full,
  543. .speed = 10000,
  544. },
  545. {
  546. .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
  547. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
  548. .supported = SUPPORTED_10000baseKX4_Full,
  549. .advertised = ADVERTISED_10000baseKX4_Full,
  550. .speed = 10000,
  551. },
  552. {
  553. .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
  554. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
  555. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
  556. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
  557. .supported = SUPPORTED_10000baseKR_Full,
  558. .advertised = ADVERTISED_10000baseKR_Full,
  559. .speed = 10000,
  560. },
  561. {
  562. .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
  563. .supported = SUPPORTED_20000baseKR2_Full,
  564. .advertised = ADVERTISED_20000baseKR2_Full,
  565. .speed = 20000,
  566. },
  567. {
  568. .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
  569. .supported = SUPPORTED_40000baseCR4_Full,
  570. .advertised = ADVERTISED_40000baseCR4_Full,
  571. .speed = 40000,
  572. },
  573. {
  574. .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
  575. .supported = SUPPORTED_40000baseKR4_Full,
  576. .advertised = ADVERTISED_40000baseKR4_Full,
  577. .speed = 40000,
  578. },
  579. {
  580. .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
  581. .supported = SUPPORTED_40000baseSR4_Full,
  582. .advertised = ADVERTISED_40000baseSR4_Full,
  583. .speed = 40000,
  584. },
  585. {
  586. .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
  587. .supported = SUPPORTED_40000baseLR4_Full,
  588. .advertised = ADVERTISED_40000baseLR4_Full,
  589. .speed = 40000,
  590. },
  591. {
  592. .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
  593. MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
  594. MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
  595. .speed = 25000,
  596. },
  597. {
  598. .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
  599. MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
  600. MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
  601. .speed = 50000,
  602. },
  603. {
  604. .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
  605. .supported = SUPPORTED_56000baseKR4_Full,
  606. .advertised = ADVERTISED_56000baseKR4_Full,
  607. .speed = 56000,
  608. },
  609. {
  610. .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
  611. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
  612. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
  613. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
  614. .speed = 100000,
  615. },
  616. };
  617. #define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
  618. #define MLXSW_SX_PORT_BASE_SPEED 10000 /* Mb/s */
  619. static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)
  620. {
  621. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
  622. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
  623. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
  624. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
  625. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
  626. MLXSW_REG_PTYS_ETH_SPEED_SGMII))
  627. return SUPPORTED_FIBRE;
  628. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
  629. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
  630. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
  631. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
  632. MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
  633. return SUPPORTED_Backplane;
  634. return 0;
  635. }
  636. static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)
  637. {
  638. u32 modes = 0;
  639. int i;
  640. for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
  641. if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
  642. modes |= mlxsw_sx_port_link_mode[i].supported;
  643. }
  644. return modes;
  645. }
  646. static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
  647. {
  648. u32 modes = 0;
  649. int i;
  650. for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
  651. if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
  652. modes |= mlxsw_sx_port_link_mode[i].advertised;
  653. }
  654. return modes;
  655. }
  656. static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
  657. struct ethtool_link_ksettings *cmd)
  658. {
  659. u32 speed = SPEED_UNKNOWN;
  660. u8 duplex = DUPLEX_UNKNOWN;
  661. int i;
  662. if (!carrier_ok)
  663. goto out;
  664. for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
  665. if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) {
  666. speed = mlxsw_sx_port_link_mode[i].speed;
  667. duplex = DUPLEX_FULL;
  668. break;
  669. }
  670. }
  671. out:
  672. cmd->base.speed = speed;
  673. cmd->base.duplex = duplex;
  674. }
  675. static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
  676. {
  677. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
  678. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
  679. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
  680. MLXSW_REG_PTYS_ETH_SPEED_SGMII))
  681. return PORT_FIBRE;
  682. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
  683. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
  684. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
  685. return PORT_DA;
  686. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
  687. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
  688. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
  689. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
  690. return PORT_NONE;
  691. return PORT_OTHER;
  692. }
  693. static int
  694. mlxsw_sx_port_get_link_ksettings(struct net_device *dev,
  695. struct ethtool_link_ksettings *cmd)
  696. {
  697. struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
  698. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  699. char ptys_pl[MLXSW_REG_PTYS_LEN];
  700. u32 eth_proto_cap;
  701. u32 eth_proto_admin;
  702. u32 eth_proto_oper;
  703. u32 supported, advertising, lp_advertising;
  704. int err;
  705. mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
  706. err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
  707. if (err) {
  708. netdev_err(dev, "Failed to get proto");
  709. return err;
  710. }
  711. mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap,
  712. &eth_proto_admin, &eth_proto_oper);
  713. supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
  714. mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
  715. SUPPORTED_Pause | SUPPORTED_Asym_Pause;
  716. advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
  717. mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
  718. eth_proto_oper, cmd);
  719. eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
  720. cmd->base.port = mlxsw_sx_port_connector_port(eth_proto_oper);
  721. lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
  722. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
  723. supported);
  724. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
  725. advertising);
  726. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
  727. lp_advertising);
  728. return 0;
  729. }
  730. static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising)
  731. {
  732. u32 ptys_proto = 0;
  733. int i;
  734. for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
  735. if (advertising & mlxsw_sx_port_link_mode[i].advertised)
  736. ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
  737. }
  738. return ptys_proto;
  739. }
  740. static u32 mlxsw_sx_to_ptys_speed(u32 speed)
  741. {
  742. u32 ptys_proto = 0;
  743. int i;
  744. for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
  745. if (speed == mlxsw_sx_port_link_mode[i].speed)
  746. ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
  747. }
  748. return ptys_proto;
  749. }
  750. static u32 mlxsw_sx_to_ptys_upper_speed(u32 upper_speed)
  751. {
  752. u32 ptys_proto = 0;
  753. int i;
  754. for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
  755. if (mlxsw_sx_port_link_mode[i].speed <= upper_speed)
  756. ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
  757. }
  758. return ptys_proto;
  759. }
  760. static int
  761. mlxsw_sx_port_set_link_ksettings(struct net_device *dev,
  762. const struct ethtool_link_ksettings *cmd)
  763. {
  764. struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
  765. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  766. char ptys_pl[MLXSW_REG_PTYS_LEN];
  767. u32 speed;
  768. u32 eth_proto_new;
  769. u32 eth_proto_cap;
  770. u32 eth_proto_admin;
  771. u32 advertising;
  772. bool is_up;
  773. int err;
  774. speed = cmd->base.speed;
  775. ethtool_convert_link_mode_to_legacy_u32(&advertising,
  776. cmd->link_modes.advertising);
  777. eth_proto_new = cmd->base.autoneg == AUTONEG_ENABLE ?
  778. mlxsw_sx_to_ptys_advert_link(advertising) :
  779. mlxsw_sx_to_ptys_speed(speed);
  780. mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
  781. err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
  782. if (err) {
  783. netdev_err(dev, "Failed to get proto");
  784. return err;
  785. }
  786. mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
  787. NULL);
  788. eth_proto_new = eth_proto_new & eth_proto_cap;
  789. if (!eth_proto_new) {
  790. netdev_err(dev, "Not supported proto admin requested");
  791. return -EINVAL;
  792. }
  793. if (eth_proto_new == eth_proto_admin)
  794. return 0;
  795. mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
  796. eth_proto_new);
  797. err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
  798. if (err) {
  799. netdev_err(dev, "Failed to set proto admin");
  800. return err;
  801. }
  802. err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up);
  803. if (err) {
  804. netdev_err(dev, "Failed to get oper status");
  805. return err;
  806. }
  807. if (!is_up)
  808. return 0;
  809. err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
  810. if (err) {
  811. netdev_err(dev, "Failed to set admin status");
  812. return err;
  813. }
  814. err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
  815. if (err) {
  816. netdev_err(dev, "Failed to set admin status");
  817. return err;
  818. }
  819. return 0;
  820. }
  821. static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
  822. .get_drvinfo = mlxsw_sx_port_get_drvinfo,
  823. .get_link = ethtool_op_get_link,
  824. .get_strings = mlxsw_sx_port_get_strings,
  825. .get_ethtool_stats = mlxsw_sx_port_get_stats,
  826. .get_sset_count = mlxsw_sx_port_get_sset_count,
  827. .get_link_ksettings = mlxsw_sx_port_get_link_ksettings,
  828. .set_link_ksettings = mlxsw_sx_port_set_link_ksettings,
  829. };
  830. static int mlxsw_sx_port_attr_get(struct net_device *dev,
  831. struct switchdev_attr *attr)
  832. {
  833. struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
  834. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  835. switch (attr->id) {
  836. case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
  837. attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
  838. memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
  839. break;
  840. default:
  841. return -EOPNOTSUPP;
  842. }
  843. return 0;
  844. }
  845. static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = {
  846. .switchdev_port_attr_get = mlxsw_sx_port_attr_get,
  847. };
  848. static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
  849. {
  850. char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
  851. int err;
  852. err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl);
  853. if (err)
  854. return err;
  855. mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id);
  856. return 0;
  857. }
  858. static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port)
  859. {
  860. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  861. struct net_device *dev = mlxsw_sx_port->dev;
  862. char ppad_pl[MLXSW_REG_PPAD_LEN];
  863. int err;
  864. mlxsw_reg_ppad_pack(ppad_pl, false, 0);
  865. err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl);
  866. if (err)
  867. return err;
  868. mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
  869. /* The last byte value in base mac address is guaranteed
  870. * to be such it does not overflow when adding local_port
  871. * value.
  872. */
  873. dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port;
  874. return 0;
  875. }
  876. static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
  877. u16 vid, enum mlxsw_reg_spms_state state)
  878. {
  879. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  880. char *spms_pl;
  881. int err;
  882. spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
  883. if (!spms_pl)
  884. return -ENOMEM;
  885. mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port);
  886. mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
  887. err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
  888. kfree(spms_pl);
  889. return err;
  890. }
  891. static int mlxsw_sx_port_ib_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
  892. u16 speed, u16 width)
  893. {
  894. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  895. char ptys_pl[MLXSW_REG_PTYS_LEN];
  896. mlxsw_reg_ptys_ib_pack(ptys_pl, mlxsw_sx_port->local_port, speed,
  897. width);
  898. return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
  899. }
  900. static int
  901. mlxsw_sx_port_speed_by_width_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 width)
  902. {
  903. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  904. u32 upper_speed = MLXSW_SX_PORT_BASE_SPEED * width;
  905. char ptys_pl[MLXSW_REG_PTYS_LEN];
  906. u32 eth_proto_admin;
  907. eth_proto_admin = mlxsw_sx_to_ptys_upper_speed(upper_speed);
  908. mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
  909. eth_proto_admin);
  910. return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
  911. }
  912. static int
  913. mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
  914. enum mlxsw_reg_spmlr_learn_mode mode)
  915. {
  916. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  917. char spmlr_pl[MLXSW_REG_SPMLR_LEN];
  918. mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode);
  919. return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl);
  920. }
  921. static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
  922. u8 module, u8 width)
  923. {
  924. struct mlxsw_sx_port *mlxsw_sx_port;
  925. struct net_device *dev;
  926. int err;
  927. dev = alloc_etherdev(sizeof(struct mlxsw_sx_port));
  928. if (!dev)
  929. return -ENOMEM;
  930. SET_NETDEV_DEV(dev, mlxsw_sx->bus_info->dev);
  931. mlxsw_sx_port = netdev_priv(dev);
  932. mlxsw_sx_port->dev = dev;
  933. mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
  934. mlxsw_sx_port->local_port = local_port;
  935. mlxsw_sx_port->mapping.module = module;
  936. mlxsw_sx_port->pcpu_stats =
  937. netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats);
  938. if (!mlxsw_sx_port->pcpu_stats) {
  939. err = -ENOMEM;
  940. goto err_alloc_stats;
  941. }
  942. dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
  943. dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
  944. dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops;
  945. err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
  946. if (err) {
  947. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n",
  948. mlxsw_sx_port->local_port);
  949. goto err_dev_addr_get;
  950. }
  951. netif_carrier_off(dev);
  952. dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
  953. NETIF_F_VLAN_CHALLENGED;
  954. dev->min_mtu = 0;
  955. dev->max_mtu = ETH_MAX_MTU;
  956. /* Each packet needs to have a Tx header (metadata) on top all other
  957. * headers.
  958. */
  959. dev->needed_headroom = MLXSW_TXHDR_LEN;
  960. err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
  961. if (err) {
  962. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
  963. mlxsw_sx_port->local_port);
  964. goto err_port_system_port_mapping_set;
  965. }
  966. err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0);
  967. if (err) {
  968. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
  969. mlxsw_sx_port->local_port);
  970. goto err_port_swid_set;
  971. }
  972. err = mlxsw_sx_port_speed_by_width_set(mlxsw_sx_port, width);
  973. if (err) {
  974. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
  975. mlxsw_sx_port->local_port);
  976. goto err_port_speed_set;
  977. }
  978. err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, ETH_DATA_LEN);
  979. if (err) {
  980. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
  981. mlxsw_sx_port->local_port);
  982. goto err_port_mtu_set;
  983. }
  984. err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
  985. if (err)
  986. goto err_port_admin_status_set;
  987. err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port,
  988. MLXSW_PORT_DEFAULT_VID,
  989. MLXSW_REG_SPMS_STATE_FORWARDING);
  990. if (err) {
  991. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n",
  992. mlxsw_sx_port->local_port);
  993. goto err_port_stp_state_set;
  994. }
  995. err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port,
  996. MLXSW_REG_SPMLR_LEARN_MODE_DISABLE);
  997. if (err) {
  998. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n",
  999. mlxsw_sx_port->local_port);
  1000. goto err_port_mac_learning_mode_set;
  1001. }
  1002. err = register_netdev(dev);
  1003. if (err) {
  1004. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n",
  1005. mlxsw_sx_port->local_port);
  1006. goto err_register_netdev;
  1007. }
  1008. mlxsw_core_port_eth_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
  1009. mlxsw_sx_port, dev, false, 0);
  1010. mlxsw_sx->ports[local_port] = mlxsw_sx_port;
  1011. return 0;
  1012. err_register_netdev:
  1013. err_port_mac_learning_mode_set:
  1014. err_port_stp_state_set:
  1015. err_port_admin_status_set:
  1016. err_port_mtu_set:
  1017. err_port_speed_set:
  1018. mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
  1019. err_port_swid_set:
  1020. err_port_system_port_mapping_set:
  1021. err_dev_addr_get:
  1022. free_percpu(mlxsw_sx_port->pcpu_stats);
  1023. err_alloc_stats:
  1024. free_netdev(dev);
  1025. return err;
  1026. }
  1027. static int mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
  1028. u8 module, u8 width)
  1029. {
  1030. int err;
  1031. err = mlxsw_core_port_init(mlxsw_sx->core, local_port);
  1032. if (err) {
  1033. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n",
  1034. local_port);
  1035. return err;
  1036. }
  1037. err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module, width);
  1038. if (err)
  1039. goto err_port_create;
  1040. return 0;
  1041. err_port_create:
  1042. mlxsw_core_port_fini(mlxsw_sx->core, local_port);
  1043. return err;
  1044. }
  1045. static void __mlxsw_sx_port_eth_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
  1046. {
  1047. struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
  1048. mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx);
  1049. unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
  1050. mlxsw_sx->ports[local_port] = NULL;
  1051. mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
  1052. free_percpu(mlxsw_sx_port->pcpu_stats);
  1053. free_netdev(mlxsw_sx_port->dev);
  1054. }
  1055. static bool mlxsw_sx_port_created(struct mlxsw_sx *mlxsw_sx, u8 local_port)
  1056. {
  1057. return mlxsw_sx->ports[local_port] != NULL;
  1058. }
  1059. static int __mlxsw_sx_port_ib_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
  1060. u8 module, u8 width)
  1061. {
  1062. struct mlxsw_sx_port *mlxsw_sx_port;
  1063. int err;
  1064. mlxsw_sx_port = kzalloc(sizeof(*mlxsw_sx_port), GFP_KERNEL);
  1065. if (!mlxsw_sx_port)
  1066. return -ENOMEM;
  1067. mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
  1068. mlxsw_sx_port->local_port = local_port;
  1069. mlxsw_sx_port->mapping.module = module;
  1070. err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
  1071. if (err) {
  1072. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
  1073. mlxsw_sx_port->local_port);
  1074. goto err_port_system_port_mapping_set;
  1075. }
  1076. /* Adding port to Infiniband swid (1) */
  1077. err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 1);
  1078. if (err) {
  1079. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
  1080. mlxsw_sx_port->local_port);
  1081. goto err_port_swid_set;
  1082. }
  1083. /* Expose the IB port number as it's front panel name */
  1084. err = mlxsw_sx_port_ib_port_set(mlxsw_sx_port, module + 1);
  1085. if (err) {
  1086. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set IB port\n",
  1087. mlxsw_sx_port->local_port);
  1088. goto err_port_ib_set;
  1089. }
  1090. /* Supports all speeds from SDR to FDR (bitmask) and support bus width
  1091. * of 1x, 2x and 4x (3 bits bitmask)
  1092. */
  1093. err = mlxsw_sx_port_ib_speed_set(mlxsw_sx_port,
  1094. MLXSW_REG_PTYS_IB_SPEED_EDR - 1,
  1095. BIT(3) - 1);
  1096. if (err) {
  1097. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
  1098. mlxsw_sx_port->local_port);
  1099. goto err_port_speed_set;
  1100. }
  1101. /* Change to the maximum MTU the device supports, the SMA will take
  1102. * care of the active MTU
  1103. */
  1104. err = mlxsw_sx_port_mtu_ib_set(mlxsw_sx_port, MLXSW_IB_DEFAULT_MTU);
  1105. if (err) {
  1106. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
  1107. mlxsw_sx_port->local_port);
  1108. goto err_port_mtu_set;
  1109. }
  1110. err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
  1111. if (err) {
  1112. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to change admin state to UP\n",
  1113. mlxsw_sx_port->local_port);
  1114. goto err_port_admin_set;
  1115. }
  1116. mlxsw_core_port_ib_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
  1117. mlxsw_sx_port);
  1118. mlxsw_sx->ports[local_port] = mlxsw_sx_port;
  1119. return 0;
  1120. err_port_admin_set:
  1121. err_port_mtu_set:
  1122. err_port_speed_set:
  1123. err_port_ib_set:
  1124. mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
  1125. err_port_swid_set:
  1126. err_port_system_port_mapping_set:
  1127. kfree(mlxsw_sx_port);
  1128. return err;
  1129. }
  1130. static void __mlxsw_sx_port_ib_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
  1131. {
  1132. struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
  1133. mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx);
  1134. mlxsw_sx->ports[local_port] = NULL;
  1135. mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
  1136. mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
  1137. kfree(mlxsw_sx_port);
  1138. }
  1139. static void __mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
  1140. {
  1141. enum devlink_port_type port_type =
  1142. mlxsw_core_port_type_get(mlxsw_sx->core, local_port);
  1143. if (port_type == DEVLINK_PORT_TYPE_ETH)
  1144. __mlxsw_sx_port_eth_remove(mlxsw_sx, local_port);
  1145. else if (port_type == DEVLINK_PORT_TYPE_IB)
  1146. __mlxsw_sx_port_ib_remove(mlxsw_sx, local_port);
  1147. }
  1148. static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
  1149. {
  1150. __mlxsw_sx_port_remove(mlxsw_sx, local_port);
  1151. mlxsw_core_port_fini(mlxsw_sx->core, local_port);
  1152. }
  1153. static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
  1154. {
  1155. int i;
  1156. for (i = 1; i < mlxsw_core_max_ports(mlxsw_sx->core); i++)
  1157. if (mlxsw_sx_port_created(mlxsw_sx, i))
  1158. mlxsw_sx_port_remove(mlxsw_sx, i);
  1159. kfree(mlxsw_sx->ports);
  1160. }
  1161. static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
  1162. {
  1163. unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sx->core);
  1164. size_t alloc_size;
  1165. u8 module, width;
  1166. int i;
  1167. int err;
  1168. alloc_size = sizeof(struct mlxsw_sx_port *) * max_ports;
  1169. mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
  1170. if (!mlxsw_sx->ports)
  1171. return -ENOMEM;
  1172. for (i = 1; i < max_ports; i++) {
  1173. err = mlxsw_sx_port_module_info_get(mlxsw_sx, i, &module,
  1174. &width);
  1175. if (err)
  1176. goto err_port_module_info_get;
  1177. if (!width)
  1178. continue;
  1179. err = mlxsw_sx_port_eth_create(mlxsw_sx, i, module, width);
  1180. if (err)
  1181. goto err_port_create;
  1182. }
  1183. return 0;
  1184. err_port_create:
  1185. err_port_module_info_get:
  1186. for (i--; i >= 1; i--)
  1187. if (mlxsw_sx_port_created(mlxsw_sx, i))
  1188. mlxsw_sx_port_remove(mlxsw_sx, i);
  1189. kfree(mlxsw_sx->ports);
  1190. return err;
  1191. }
  1192. static void mlxsw_sx_pude_eth_event_func(struct mlxsw_sx_port *mlxsw_sx_port,
  1193. enum mlxsw_reg_pude_oper_status status)
  1194. {
  1195. if (status == MLXSW_PORT_OPER_STATUS_UP) {
  1196. netdev_info(mlxsw_sx_port->dev, "link up\n");
  1197. netif_carrier_on(mlxsw_sx_port->dev);
  1198. } else {
  1199. netdev_info(mlxsw_sx_port->dev, "link down\n");
  1200. netif_carrier_off(mlxsw_sx_port->dev);
  1201. }
  1202. }
  1203. static void mlxsw_sx_pude_ib_event_func(struct mlxsw_sx_port *mlxsw_sx_port,
  1204. enum mlxsw_reg_pude_oper_status status)
  1205. {
  1206. if (status == MLXSW_PORT_OPER_STATUS_UP)
  1207. pr_info("ib link for port %d - up\n",
  1208. mlxsw_sx_port->mapping.module + 1);
  1209. else
  1210. pr_info("ib link for port %d - down\n",
  1211. mlxsw_sx_port->mapping.module + 1);
  1212. }
  1213. static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
  1214. char *pude_pl, void *priv)
  1215. {
  1216. struct mlxsw_sx *mlxsw_sx = priv;
  1217. struct mlxsw_sx_port *mlxsw_sx_port;
  1218. enum mlxsw_reg_pude_oper_status status;
  1219. enum devlink_port_type port_type;
  1220. u8 local_port;
  1221. local_port = mlxsw_reg_pude_local_port_get(pude_pl);
  1222. mlxsw_sx_port = mlxsw_sx->ports[local_port];
  1223. if (!mlxsw_sx_port) {
  1224. dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n",
  1225. local_port);
  1226. return;
  1227. }
  1228. status = mlxsw_reg_pude_oper_status_get(pude_pl);
  1229. port_type = mlxsw_core_port_type_get(mlxsw_sx->core, local_port);
  1230. if (port_type == DEVLINK_PORT_TYPE_ETH)
  1231. mlxsw_sx_pude_eth_event_func(mlxsw_sx_port, status);
  1232. else if (port_type == DEVLINK_PORT_TYPE_IB)
  1233. mlxsw_sx_pude_ib_event_func(mlxsw_sx_port, status);
  1234. }
  1235. static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
  1236. void *priv)
  1237. {
  1238. struct mlxsw_sx *mlxsw_sx = priv;
  1239. struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
  1240. struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
  1241. if (unlikely(!mlxsw_sx_port)) {
  1242. dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
  1243. local_port);
  1244. return;
  1245. }
  1246. skb->dev = mlxsw_sx_port->dev;
  1247. pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
  1248. u64_stats_update_begin(&pcpu_stats->syncp);
  1249. pcpu_stats->rx_packets++;
  1250. pcpu_stats->rx_bytes += skb->len;
  1251. u64_stats_update_end(&pcpu_stats->syncp);
  1252. skb->protocol = eth_type_trans(skb, skb->dev);
  1253. netif_receive_skb(skb);
  1254. }
  1255. static int mlxsw_sx_port_type_set(struct mlxsw_core *mlxsw_core, u8 local_port,
  1256. enum devlink_port_type new_type)
  1257. {
  1258. struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
  1259. u8 module, width;
  1260. int err;
  1261. if (new_type == DEVLINK_PORT_TYPE_AUTO)
  1262. return -EOPNOTSUPP;
  1263. __mlxsw_sx_port_remove(mlxsw_sx, local_port);
  1264. err = mlxsw_sx_port_module_info_get(mlxsw_sx, local_port, &module,
  1265. &width);
  1266. if (err)
  1267. goto err_port_module_info_get;
  1268. if (new_type == DEVLINK_PORT_TYPE_ETH)
  1269. err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module,
  1270. width);
  1271. else if (new_type == DEVLINK_PORT_TYPE_IB)
  1272. err = __mlxsw_sx_port_ib_create(mlxsw_sx, local_port, module,
  1273. width);
  1274. err_port_module_info_get:
  1275. return err;
  1276. }
  1277. #define MLXSW_SX_RXL(_trap_id) \
  1278. MLXSW_RXL(mlxsw_sx_rx_listener_func, _trap_id, TRAP_TO_CPU, \
  1279. false, SX2_RX, FORWARD)
  1280. static const struct mlxsw_listener mlxsw_sx_listener[] = {
  1281. MLXSW_EVENTL(mlxsw_sx_pude_event_func, PUDE, EMAD),
  1282. MLXSW_SX_RXL(FDB_MC),
  1283. MLXSW_SX_RXL(STP),
  1284. MLXSW_SX_RXL(LACP),
  1285. MLXSW_SX_RXL(EAPOL),
  1286. MLXSW_SX_RXL(LLDP),
  1287. MLXSW_SX_RXL(MMRP),
  1288. MLXSW_SX_RXL(MVRP),
  1289. MLXSW_SX_RXL(RPVST),
  1290. MLXSW_SX_RXL(DHCP),
  1291. MLXSW_SX_RXL(IGMP_QUERY),
  1292. MLXSW_SX_RXL(IGMP_V1_REPORT),
  1293. MLXSW_SX_RXL(IGMP_V2_REPORT),
  1294. MLXSW_SX_RXL(IGMP_V2_LEAVE),
  1295. MLXSW_SX_RXL(IGMP_V3_REPORT),
  1296. };
  1297. static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
  1298. {
  1299. char htgt_pl[MLXSW_REG_HTGT_LEN];
  1300. int i;
  1301. int err;
  1302. mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX,
  1303. MLXSW_REG_HTGT_INVALID_POLICER,
  1304. MLXSW_REG_HTGT_DEFAULT_PRIORITY,
  1305. MLXSW_REG_HTGT_DEFAULT_TC);
  1306. mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
  1307. MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_RX);
  1308. err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
  1309. if (err)
  1310. return err;
  1311. mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL,
  1312. MLXSW_REG_HTGT_INVALID_POLICER,
  1313. MLXSW_REG_HTGT_DEFAULT_PRIORITY,
  1314. MLXSW_REG_HTGT_DEFAULT_TC);
  1315. mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
  1316. MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_CTRL);
  1317. err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
  1318. if (err)
  1319. return err;
  1320. for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) {
  1321. err = mlxsw_core_trap_register(mlxsw_sx->core,
  1322. &mlxsw_sx_listener[i],
  1323. mlxsw_sx);
  1324. if (err)
  1325. goto err_listener_register;
  1326. }
  1327. return 0;
  1328. err_listener_register:
  1329. for (i--; i >= 0; i--) {
  1330. mlxsw_core_trap_unregister(mlxsw_sx->core,
  1331. &mlxsw_sx_listener[i],
  1332. mlxsw_sx);
  1333. }
  1334. return err;
  1335. }
  1336. static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
  1337. {
  1338. int i;
  1339. for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) {
  1340. mlxsw_core_trap_unregister(mlxsw_sx->core,
  1341. &mlxsw_sx_listener[i],
  1342. mlxsw_sx);
  1343. }
  1344. }
  1345. static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
  1346. {
  1347. char sfgc_pl[MLXSW_REG_SFGC_LEN];
  1348. char sgcr_pl[MLXSW_REG_SGCR_LEN];
  1349. char *sftr_pl;
  1350. int err;
  1351. /* Configure a flooding table, which includes only CPU port. */
  1352. sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
  1353. if (!sftr_pl)
  1354. return -ENOMEM;
  1355. mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0,
  1356. MLXSW_PORT_CPU_PORT, true);
  1357. err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
  1358. kfree(sftr_pl);
  1359. if (err)
  1360. return err;
  1361. /* Flood different packet types using the flooding table. */
  1362. mlxsw_reg_sfgc_pack(sfgc_pl,
  1363. MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
  1364. MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
  1365. MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
  1366. 0);
  1367. err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
  1368. if (err)
  1369. return err;
  1370. mlxsw_reg_sfgc_pack(sfgc_pl,
  1371. MLXSW_REG_SFGC_TYPE_BROADCAST,
  1372. MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
  1373. MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
  1374. 0);
  1375. err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
  1376. if (err)
  1377. return err;
  1378. mlxsw_reg_sfgc_pack(sfgc_pl,
  1379. MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
  1380. MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
  1381. MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
  1382. 0);
  1383. err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
  1384. if (err)
  1385. return err;
  1386. mlxsw_reg_sfgc_pack(sfgc_pl,
  1387. MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
  1388. MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
  1389. MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
  1390. 0);
  1391. err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
  1392. if (err)
  1393. return err;
  1394. mlxsw_reg_sfgc_pack(sfgc_pl,
  1395. MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
  1396. MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
  1397. MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
  1398. 0);
  1399. err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
  1400. if (err)
  1401. return err;
  1402. mlxsw_reg_sgcr_pack(sgcr_pl, true);
  1403. return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
  1404. }
  1405. static int mlxsw_sx_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
  1406. {
  1407. char htgt_pl[MLXSW_REG_HTGT_LEN];
  1408. mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
  1409. MLXSW_REG_HTGT_INVALID_POLICER,
  1410. MLXSW_REG_HTGT_DEFAULT_PRIORITY,
  1411. MLXSW_REG_HTGT_DEFAULT_TC);
  1412. mlxsw_reg_htgt_swid_set(htgt_pl, MLXSW_PORT_SWID_ALL_SWIDS);
  1413. mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
  1414. MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_EMAD);
  1415. return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
  1416. }
  1417. static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core,
  1418. const struct mlxsw_bus_info *mlxsw_bus_info)
  1419. {
  1420. struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
  1421. int err;
  1422. mlxsw_sx->core = mlxsw_core;
  1423. mlxsw_sx->bus_info = mlxsw_bus_info;
  1424. err = mlxsw_sx_hw_id_get(mlxsw_sx);
  1425. if (err) {
  1426. dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n");
  1427. return err;
  1428. }
  1429. err = mlxsw_sx_ports_create(mlxsw_sx);
  1430. if (err) {
  1431. dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n");
  1432. return err;
  1433. }
  1434. err = mlxsw_sx_traps_init(mlxsw_sx);
  1435. if (err) {
  1436. dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps\n");
  1437. goto err_listener_register;
  1438. }
  1439. err = mlxsw_sx_flood_init(mlxsw_sx);
  1440. if (err) {
  1441. dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n");
  1442. goto err_flood_init;
  1443. }
  1444. return 0;
  1445. err_flood_init:
  1446. mlxsw_sx_traps_fini(mlxsw_sx);
  1447. err_listener_register:
  1448. mlxsw_sx_ports_remove(mlxsw_sx);
  1449. return err;
  1450. }
  1451. static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core)
  1452. {
  1453. struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
  1454. mlxsw_sx_traps_fini(mlxsw_sx);
  1455. mlxsw_sx_ports_remove(mlxsw_sx);
  1456. }
  1457. static struct mlxsw_config_profile mlxsw_sx_config_profile = {
  1458. .used_max_vepa_channels = 1,
  1459. .max_vepa_channels = 0,
  1460. .used_max_mid = 1,
  1461. .max_mid = 7000,
  1462. .used_max_pgt = 1,
  1463. .max_pgt = 0,
  1464. .used_max_system_port = 1,
  1465. .max_system_port = 48000,
  1466. .used_max_vlan_groups = 1,
  1467. .max_vlan_groups = 127,
  1468. .used_max_regions = 1,
  1469. .max_regions = 400,
  1470. .used_flood_tables = 1,
  1471. .max_flood_tables = 2,
  1472. .max_vid_flood_tables = 1,
  1473. .used_flood_mode = 1,
  1474. .flood_mode = 3,
  1475. .used_max_ib_mc = 1,
  1476. .max_ib_mc = 6,
  1477. .used_max_pkey = 1,
  1478. .max_pkey = 0,
  1479. .swid_config = {
  1480. {
  1481. .used_type = 1,
  1482. .type = MLXSW_PORT_SWID_TYPE_ETH,
  1483. },
  1484. {
  1485. .used_type = 1,
  1486. .type = MLXSW_PORT_SWID_TYPE_IB,
  1487. }
  1488. },
  1489. .resource_query_enable = 0,
  1490. };
  1491. static struct mlxsw_driver mlxsw_sx_driver = {
  1492. .kind = mlxsw_sx_driver_name,
  1493. .priv_size = sizeof(struct mlxsw_sx),
  1494. .init = mlxsw_sx_init,
  1495. .fini = mlxsw_sx_fini,
  1496. .basic_trap_groups_set = mlxsw_sx_basic_trap_groups_set,
  1497. .txhdr_construct = mlxsw_sx_txhdr_construct,
  1498. .txhdr_len = MLXSW_TXHDR_LEN,
  1499. .profile = &mlxsw_sx_config_profile,
  1500. .port_type_set = mlxsw_sx_port_type_set,
  1501. };
  1502. static const struct pci_device_id mlxsw_sx_pci_id_table[] = {
  1503. {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
  1504. {0, },
  1505. };
  1506. static struct pci_driver mlxsw_sx_pci_driver = {
  1507. .name = mlxsw_sx_driver_name,
  1508. .id_table = mlxsw_sx_pci_id_table,
  1509. };
  1510. static int __init mlxsw_sx_module_init(void)
  1511. {
  1512. int err;
  1513. err = mlxsw_core_driver_register(&mlxsw_sx_driver);
  1514. if (err)
  1515. return err;
  1516. err = mlxsw_pci_driver_register(&mlxsw_sx_pci_driver);
  1517. if (err)
  1518. goto err_pci_driver_register;
  1519. return 0;
  1520. err_pci_driver_register:
  1521. mlxsw_core_driver_unregister(&mlxsw_sx_driver);
  1522. return err;
  1523. }
  1524. static void __exit mlxsw_sx_module_exit(void)
  1525. {
  1526. mlxsw_pci_driver_unregister(&mlxsw_sx_pci_driver);
  1527. mlxsw_core_driver_unregister(&mlxsw_sx_driver);
  1528. }
  1529. module_init(mlxsw_sx_module_init);
  1530. module_exit(mlxsw_sx_module_exit);
  1531. MODULE_LICENSE("Dual BSD/GPL");
  1532. MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
  1533. MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
  1534. MODULE_DEVICE_TABLE(pci, mlxsw_sx_pci_id_table);