bcm_sf2.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744
  1. /*
  2. * Broadcom Starfighter 2 DSA switch driver
  3. *
  4. * Copyright (C) 2014, Broadcom Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/list.h>
  12. #include <linux/module.h>
  13. #include <linux/netdevice.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/of.h>
  17. #include <linux/phy.h>
  18. #include <linux/phy_fixed.h>
  19. #include <linux/mii.h>
  20. #include <linux/of.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/of_address.h>
  23. #include <linux/of_net.h>
  24. #include <linux/of_mdio.h>
  25. #include <net/dsa.h>
  26. #include <linux/ethtool.h>
  27. #include <linux/if_bridge.h>
  28. #include <linux/brcmphy.h>
  29. #include <linux/etherdevice.h>
  30. #include <net/switchdev.h>
  31. #include "bcm_sf2.h"
  32. #include "bcm_sf2_regs.h"
  33. /* String, offset, and register size in bytes if different from 4 bytes */
  34. static const struct bcm_sf2_hw_stats bcm_sf2_mib[] = {
  35. { "TxOctets", 0x000, 8 },
  36. { "TxDropPkts", 0x020 },
  37. { "TxQPKTQ0", 0x030 },
  38. { "TxBroadcastPkts", 0x040 },
  39. { "TxMulticastPkts", 0x050 },
  40. { "TxUnicastPKts", 0x060 },
  41. { "TxCollisions", 0x070 },
  42. { "TxSingleCollision", 0x080 },
  43. { "TxMultipleCollision", 0x090 },
  44. { "TxDeferredCollision", 0x0a0 },
  45. { "TxLateCollision", 0x0b0 },
  46. { "TxExcessiveCollision", 0x0c0 },
  47. { "TxFrameInDisc", 0x0d0 },
  48. { "TxPausePkts", 0x0e0 },
  49. { "TxQPKTQ1", 0x0f0 },
  50. { "TxQPKTQ2", 0x100 },
  51. { "TxQPKTQ3", 0x110 },
  52. { "TxQPKTQ4", 0x120 },
  53. { "TxQPKTQ5", 0x130 },
  54. { "RxOctets", 0x140, 8 },
  55. { "RxUndersizePkts", 0x160 },
  56. { "RxPausePkts", 0x170 },
  57. { "RxPkts64Octets", 0x180 },
  58. { "RxPkts65to127Octets", 0x190 },
  59. { "RxPkts128to255Octets", 0x1a0 },
  60. { "RxPkts256to511Octets", 0x1b0 },
  61. { "RxPkts512to1023Octets", 0x1c0 },
  62. { "RxPkts1024toMaxPktsOctets", 0x1d0 },
  63. { "RxOversizePkts", 0x1e0 },
  64. { "RxJabbers", 0x1f0 },
  65. { "RxAlignmentErrors", 0x200 },
  66. { "RxFCSErrors", 0x210 },
  67. { "RxGoodOctets", 0x220, 8 },
  68. { "RxDropPkts", 0x240 },
  69. { "RxUnicastPkts", 0x250 },
  70. { "RxMulticastPkts", 0x260 },
  71. { "RxBroadcastPkts", 0x270 },
  72. { "RxSAChanges", 0x280 },
  73. { "RxFragments", 0x290 },
  74. { "RxJumboPkt", 0x2a0 },
  75. { "RxSymblErr", 0x2b0 },
  76. { "InRangeErrCount", 0x2c0 },
  77. { "OutRangeErrCount", 0x2d0 },
  78. { "EEELpiEvent", 0x2e0 },
  79. { "EEELpiDuration", 0x2f0 },
  80. { "RxDiscard", 0x300, 8 },
  81. { "TxQPKTQ6", 0x320 },
  82. { "TxQPKTQ7", 0x330 },
  83. { "TxPkts64Octets", 0x340 },
  84. { "TxPkts65to127Octets", 0x350 },
  85. { "TxPkts128to255Octets", 0x360 },
  86. { "TxPkts256to511Ocets", 0x370 },
  87. { "TxPkts512to1023Ocets", 0x380 },
  88. { "TxPkts1024toMaxPktOcets", 0x390 },
  89. };
  90. #define BCM_SF2_STATS_SIZE ARRAY_SIZE(bcm_sf2_mib)
  91. static void bcm_sf2_sw_get_strings(struct dsa_switch *ds,
  92. int port, uint8_t *data)
  93. {
  94. unsigned int i;
  95. for (i = 0; i < BCM_SF2_STATS_SIZE; i++)
  96. memcpy(data + i * ETH_GSTRING_LEN,
  97. bcm_sf2_mib[i].string, ETH_GSTRING_LEN);
  98. }
  99. static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds,
  100. int port, uint64_t *data)
  101. {
  102. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  103. const struct bcm_sf2_hw_stats *s;
  104. unsigned int i;
  105. u64 val = 0;
  106. u32 offset;
  107. mutex_lock(&priv->stats_mutex);
  108. /* Now fetch the per-port counters */
  109. for (i = 0; i < BCM_SF2_STATS_SIZE; i++) {
  110. s = &bcm_sf2_mib[i];
  111. /* Do a latched 64-bit read if needed */
  112. offset = s->reg + CORE_P_MIB_OFFSET(port);
  113. if (s->sizeof_stat == 8)
  114. val = core_readq(priv, offset);
  115. else
  116. val = core_readl(priv, offset);
  117. data[i] = (u64)val;
  118. }
  119. mutex_unlock(&priv->stats_mutex);
  120. }
  121. static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds)
  122. {
  123. return BCM_SF2_STATS_SIZE;
  124. }
  125. static const char *bcm_sf2_sw_drv_probe(struct device *dsa_dev,
  126. struct device *host_dev, int sw_addr,
  127. void **_priv)
  128. {
  129. struct bcm_sf2_priv *priv;
  130. priv = devm_kzalloc(dsa_dev, sizeof(*priv), GFP_KERNEL);
  131. if (!priv)
  132. return NULL;
  133. *_priv = priv;
  134. return "Broadcom Starfighter 2";
  135. }
  136. static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
  137. {
  138. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  139. unsigned int i;
  140. u32 reg;
  141. /* Enable the IMP Port to be in the same VLAN as the other ports
  142. * on a per-port basis such that we only have Port i and IMP in
  143. * the same VLAN.
  144. */
  145. for (i = 0; i < priv->hw_params.num_ports; i++) {
  146. if (!((1 << i) & ds->enabled_port_mask))
  147. continue;
  148. reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
  149. reg |= (1 << cpu_port);
  150. core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
  151. }
  152. }
  153. static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
  154. {
  155. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  156. u32 reg, val;
  157. /* Enable the port memories */
  158. reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
  159. reg &= ~P_TXQ_PSM_VDD(port);
  160. core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
  161. /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
  162. reg = core_readl(priv, CORE_IMP_CTL);
  163. reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
  164. reg &= ~(RX_DIS | TX_DIS);
  165. core_writel(priv, reg, CORE_IMP_CTL);
  166. /* Enable forwarding */
  167. core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
  168. /* Enable IMP port in dumb mode */
  169. reg = core_readl(priv, CORE_SWITCH_CTRL);
  170. reg |= MII_DUMB_FWDG_EN;
  171. core_writel(priv, reg, CORE_SWITCH_CTRL);
  172. /* Resolve which bit controls the Broadcom tag */
  173. switch (port) {
  174. case 8:
  175. val = BRCM_HDR_EN_P8;
  176. break;
  177. case 7:
  178. val = BRCM_HDR_EN_P7;
  179. break;
  180. case 5:
  181. val = BRCM_HDR_EN_P5;
  182. break;
  183. default:
  184. val = 0;
  185. break;
  186. }
  187. /* Enable Broadcom tags for IMP port */
  188. reg = core_readl(priv, CORE_BRCM_HDR_CTRL);
  189. reg |= val;
  190. core_writel(priv, reg, CORE_BRCM_HDR_CTRL);
  191. /* Enable reception Broadcom tag for CPU TX (switch RX) to
  192. * allow us to tag outgoing frames
  193. */
  194. reg = core_readl(priv, CORE_BRCM_HDR_RX_DIS);
  195. reg &= ~(1 << port);
  196. core_writel(priv, reg, CORE_BRCM_HDR_RX_DIS);
  197. /* Enable transmission of Broadcom tags from the switch (CPU RX) to
  198. * allow delivering frames to the per-port net_devices
  199. */
  200. reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS);
  201. reg &= ~(1 << port);
  202. core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS);
  203. /* Force link status for IMP port */
  204. reg = core_readl(priv, CORE_STS_OVERRIDE_IMP);
  205. reg |= (MII_SW_OR | LINK_STS);
  206. core_writel(priv, reg, CORE_STS_OVERRIDE_IMP);
  207. }
  208. static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
  209. {
  210. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  211. u32 reg;
  212. reg = core_readl(priv, CORE_EEE_EN_CTRL);
  213. if (enable)
  214. reg |= 1 << port;
  215. else
  216. reg &= ~(1 << port);
  217. core_writel(priv, reg, CORE_EEE_EN_CTRL);
  218. }
  219. static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
  220. {
  221. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  222. u32 reg;
  223. reg = reg_readl(priv, REG_SPHY_CNTRL);
  224. if (enable) {
  225. reg |= PHY_RESET;
  226. reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS);
  227. reg_writel(priv, reg, REG_SPHY_CNTRL);
  228. udelay(21);
  229. reg = reg_readl(priv, REG_SPHY_CNTRL);
  230. reg &= ~PHY_RESET;
  231. } else {
  232. reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
  233. reg_writel(priv, reg, REG_SPHY_CNTRL);
  234. mdelay(1);
  235. reg |= CK25_DIS;
  236. }
  237. reg_writel(priv, reg, REG_SPHY_CNTRL);
  238. /* Use PHY-driven LED signaling */
  239. if (!enable) {
  240. reg = reg_readl(priv, REG_LED_CNTRL(0));
  241. reg |= SPDLNK_SRC_SEL;
  242. reg_writel(priv, reg, REG_LED_CNTRL(0));
  243. }
  244. }
  245. static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv,
  246. int port)
  247. {
  248. unsigned int off;
  249. switch (port) {
  250. case 7:
  251. off = P7_IRQ_OFF;
  252. break;
  253. case 0:
  254. /* Port 0 interrupts are located on the first bank */
  255. intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF));
  256. return;
  257. default:
  258. off = P_IRQ_OFF(port);
  259. break;
  260. }
  261. intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
  262. }
  263. static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
  264. int port)
  265. {
  266. unsigned int off;
  267. switch (port) {
  268. case 7:
  269. off = P7_IRQ_OFF;
  270. break;
  271. case 0:
  272. /* Port 0 interrupts are located on the first bank */
  273. intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
  274. intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR);
  275. return;
  276. default:
  277. off = P_IRQ_OFF(port);
  278. break;
  279. }
  280. intrl2_1_mask_set(priv, P_IRQ_MASK(off));
  281. intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR);
  282. }
  283. static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
  284. struct phy_device *phy)
  285. {
  286. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  287. s8 cpu_port = ds->dst[ds->index].cpu_port;
  288. u32 reg;
  289. /* Clear the memory power down */
  290. reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
  291. reg &= ~P_TXQ_PSM_VDD(port);
  292. core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
  293. /* Clear the Rx and Tx disable bits and set to no spanning tree */
  294. core_writel(priv, 0, CORE_G_PCTL_PORT(port));
  295. /* Re-enable the GPHY and re-apply workarounds */
  296. if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
  297. bcm_sf2_gphy_enable_set(ds, true);
  298. if (phy) {
  299. /* if phy_stop() has been called before, phy
  300. * will be in halted state, and phy_start()
  301. * will call resume.
  302. *
  303. * the resume path does not configure back
  304. * autoneg settings, and since we hard reset
  305. * the phy manually here, we need to reset the
  306. * state machine also.
  307. */
  308. phy->state = PHY_READY;
  309. phy_init_hw(phy);
  310. }
  311. }
  312. /* Enable MoCA port interrupts to get notified */
  313. if (port == priv->moca_port)
  314. bcm_sf2_port_intr_enable(priv, port);
  315. /* Set this port, and only this one to be in the default VLAN,
  316. * if member of a bridge, restore its membership prior to
  317. * bringing down this port.
  318. */
  319. reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
  320. reg &= ~PORT_VLAN_CTRL_MASK;
  321. reg |= (1 << port);
  322. reg |= priv->port_sts[port].vlan_ctl_mask;
  323. core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port));
  324. bcm_sf2_imp_vlan_setup(ds, cpu_port);
  325. /* If EEE was enabled, restore it */
  326. if (priv->port_sts[port].eee.eee_enabled)
  327. bcm_sf2_eee_enable_set(ds, port, true);
  328. return 0;
  329. }
  330. static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
  331. struct phy_device *phy)
  332. {
  333. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  334. u32 off, reg;
  335. if (priv->wol_ports_mask & (1 << port))
  336. return;
  337. if (port == priv->moca_port)
  338. bcm_sf2_port_intr_disable(priv, port);
  339. if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
  340. bcm_sf2_gphy_enable_set(ds, false);
  341. if (dsa_is_cpu_port(ds, port))
  342. off = CORE_IMP_CTL;
  343. else
  344. off = CORE_G_PCTL_PORT(port);
  345. reg = core_readl(priv, off);
  346. reg |= RX_DIS | TX_DIS;
  347. core_writel(priv, reg, off);
  348. /* Power down the port memory */
  349. reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
  350. reg |= P_TXQ_PSM_VDD(port);
  351. core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
  352. }
  353. /* Returns 0 if EEE was not enabled, or 1 otherwise
  354. */
  355. static int bcm_sf2_eee_init(struct dsa_switch *ds, int port,
  356. struct phy_device *phy)
  357. {
  358. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  359. struct ethtool_eee *p = &priv->port_sts[port].eee;
  360. int ret;
  361. p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full);
  362. ret = phy_init_eee(phy, 0);
  363. if (ret)
  364. return 0;
  365. bcm_sf2_eee_enable_set(ds, port, true);
  366. return 1;
  367. }
  368. static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port,
  369. struct ethtool_eee *e)
  370. {
  371. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  372. struct ethtool_eee *p = &priv->port_sts[port].eee;
  373. u32 reg;
  374. reg = core_readl(priv, CORE_EEE_LPI_INDICATE);
  375. e->eee_enabled = p->eee_enabled;
  376. e->eee_active = !!(reg & (1 << port));
  377. return 0;
  378. }
  379. static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port,
  380. struct phy_device *phydev,
  381. struct ethtool_eee *e)
  382. {
  383. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  384. struct ethtool_eee *p = &priv->port_sts[port].eee;
  385. p->eee_enabled = e->eee_enabled;
  386. if (!p->eee_enabled) {
  387. bcm_sf2_eee_enable_set(ds, port, false);
  388. } else {
  389. p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
  390. if (!p->eee_enabled)
  391. return -EOPNOTSUPP;
  392. }
  393. return 0;
  394. }
  395. static int bcm_sf2_fast_age_op(struct bcm_sf2_priv *priv)
  396. {
  397. unsigned int timeout = 1000;
  398. u32 reg;
  399. reg = core_readl(priv, CORE_FAST_AGE_CTRL);
  400. reg |= EN_AGE_PORT | EN_AGE_VLAN | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
  401. core_writel(priv, reg, CORE_FAST_AGE_CTRL);
  402. do {
  403. reg = core_readl(priv, CORE_FAST_AGE_CTRL);
  404. if (!(reg & FAST_AGE_STR_DONE))
  405. break;
  406. cpu_relax();
  407. } while (timeout--);
  408. if (!timeout)
  409. return -ETIMEDOUT;
  410. core_writel(priv, 0, CORE_FAST_AGE_CTRL);
  411. return 0;
  412. }
  413. /* Fast-ageing of ARL entries for a given port, equivalent to an ARL
  414. * flush for that port.
  415. */
  416. static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
  417. {
  418. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  419. core_writel(priv, port, CORE_FAST_AGE_PORT);
  420. return bcm_sf2_fast_age_op(priv);
  421. }
  422. static int bcm_sf2_sw_fast_age_vlan(struct bcm_sf2_priv *priv, u16 vid)
  423. {
  424. core_writel(priv, vid, CORE_FAST_AGE_VID);
  425. return bcm_sf2_fast_age_op(priv);
  426. }
  427. static int bcm_sf2_vlan_op_wait(struct bcm_sf2_priv *priv)
  428. {
  429. unsigned int timeout = 10;
  430. u32 reg;
  431. do {
  432. reg = core_readl(priv, CORE_ARLA_VTBL_RWCTRL);
  433. if (!(reg & ARLA_VTBL_STDN))
  434. return 0;
  435. usleep_range(1000, 2000);
  436. } while (timeout--);
  437. return -ETIMEDOUT;
  438. }
  439. static int bcm_sf2_vlan_op(struct bcm_sf2_priv *priv, u8 op)
  440. {
  441. core_writel(priv, ARLA_VTBL_STDN | op, CORE_ARLA_VTBL_RWCTRL);
  442. return bcm_sf2_vlan_op_wait(priv);
  443. }
  444. static void bcm_sf2_set_vlan_entry(struct bcm_sf2_priv *priv, u16 vid,
  445. struct bcm_sf2_vlan *vlan)
  446. {
  447. int ret;
  448. core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR);
  449. core_writel(priv, vlan->untag << UNTAG_MAP_SHIFT | vlan->members,
  450. CORE_ARLA_VTBL_ENTRY);
  451. ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_WRITE);
  452. if (ret)
  453. pr_err("failed to write VLAN entry\n");
  454. }
  455. static int bcm_sf2_get_vlan_entry(struct bcm_sf2_priv *priv, u16 vid,
  456. struct bcm_sf2_vlan *vlan)
  457. {
  458. u32 entry;
  459. int ret;
  460. core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR);
  461. ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_READ);
  462. if (ret)
  463. return ret;
  464. entry = core_readl(priv, CORE_ARLA_VTBL_ENTRY);
  465. vlan->members = entry & FWD_MAP_MASK;
  466. vlan->untag = (entry >> UNTAG_MAP_SHIFT) & UNTAG_MAP_MASK;
  467. return 0;
  468. }
  469. static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port,
  470. struct net_device *bridge)
  471. {
  472. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  473. s8 cpu_port = ds->dst->cpu_port;
  474. unsigned int i;
  475. u32 reg, p_ctl;
  476. /* Make this port leave the all VLANs join since we will have proper
  477. * VLAN entries from now on
  478. */
  479. reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN);
  480. reg &= ~BIT(port);
  481. if ((reg & BIT(cpu_port)) == BIT(cpu_port))
  482. reg &= ~BIT(cpu_port);
  483. core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN);
  484. priv->port_sts[port].bridge_dev = bridge;
  485. p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
  486. for (i = 0; i < priv->hw_params.num_ports; i++) {
  487. if (priv->port_sts[i].bridge_dev != bridge)
  488. continue;
  489. /* Add this local port to the remote port VLAN control
  490. * membership and update the remote port bitmask
  491. */
  492. reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
  493. reg |= 1 << port;
  494. core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
  495. priv->port_sts[i].vlan_ctl_mask = reg;
  496. p_ctl |= 1 << i;
  497. }
  498. /* Configure the local port VLAN control membership to include
  499. * remote ports and update the local port bitmask
  500. */
  501. core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
  502. priv->port_sts[port].vlan_ctl_mask = p_ctl;
  503. return 0;
  504. }
  505. static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port)
  506. {
  507. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  508. struct net_device *bridge = priv->port_sts[port].bridge_dev;
  509. s8 cpu_port = ds->dst->cpu_port;
  510. unsigned int i;
  511. u32 reg, p_ctl;
  512. p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
  513. for (i = 0; i < priv->hw_params.num_ports; i++) {
  514. /* Don't touch the remaining ports */
  515. if (priv->port_sts[i].bridge_dev != bridge)
  516. continue;
  517. reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
  518. reg &= ~(1 << port);
  519. core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
  520. priv->port_sts[port].vlan_ctl_mask = reg;
  521. /* Prevent self removal to preserve isolation */
  522. if (port != i)
  523. p_ctl &= ~(1 << i);
  524. }
  525. core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
  526. priv->port_sts[port].vlan_ctl_mask = p_ctl;
  527. priv->port_sts[port].bridge_dev = NULL;
  528. /* Make this port join all VLANs without VLAN entries */
  529. reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN);
  530. reg |= BIT(port);
  531. if (!(reg & BIT(cpu_port)))
  532. reg |= BIT(cpu_port);
  533. core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN);
  534. }
  535. static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
  536. u8 state)
  537. {
  538. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  539. u8 hw_state, cur_hw_state;
  540. u32 reg;
  541. reg = core_readl(priv, CORE_G_PCTL_PORT(port));
  542. cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
  543. switch (state) {
  544. case BR_STATE_DISABLED:
  545. hw_state = G_MISTP_DIS_STATE;
  546. break;
  547. case BR_STATE_LISTENING:
  548. hw_state = G_MISTP_LISTEN_STATE;
  549. break;
  550. case BR_STATE_LEARNING:
  551. hw_state = G_MISTP_LEARN_STATE;
  552. break;
  553. case BR_STATE_FORWARDING:
  554. hw_state = G_MISTP_FWD_STATE;
  555. break;
  556. case BR_STATE_BLOCKING:
  557. hw_state = G_MISTP_BLOCK_STATE;
  558. break;
  559. default:
  560. pr_err("%s: invalid STP state: %d\n", __func__, state);
  561. return;
  562. }
  563. /* Fast-age ARL entries if we are moving a port from Learning or
  564. * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
  565. * state (hw_state)
  566. */
  567. if (cur_hw_state != hw_state) {
  568. if (cur_hw_state >= G_MISTP_LEARN_STATE &&
  569. hw_state <= G_MISTP_LISTEN_STATE) {
  570. if (bcm_sf2_sw_fast_age_port(ds, port)) {
  571. pr_err("%s: fast-ageing failed\n", __func__);
  572. return;
  573. }
  574. }
  575. }
  576. reg = core_readl(priv, CORE_G_PCTL_PORT(port));
  577. reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
  578. reg |= hw_state;
  579. core_writel(priv, reg, CORE_G_PCTL_PORT(port));
  580. }
  581. /* Address Resolution Logic routines */
  582. static int bcm_sf2_arl_op_wait(struct bcm_sf2_priv *priv)
  583. {
  584. unsigned int timeout = 10;
  585. u32 reg;
  586. do {
  587. reg = core_readl(priv, CORE_ARLA_RWCTL);
  588. if (!(reg & ARL_STRTDN))
  589. return 0;
  590. usleep_range(1000, 2000);
  591. } while (timeout--);
  592. return -ETIMEDOUT;
  593. }
  594. static int bcm_sf2_arl_rw_op(struct bcm_sf2_priv *priv, unsigned int op)
  595. {
  596. u32 cmd;
  597. if (op > ARL_RW)
  598. return -EINVAL;
  599. cmd = core_readl(priv, CORE_ARLA_RWCTL);
  600. cmd &= ~IVL_SVL_SELECT;
  601. cmd |= ARL_STRTDN;
  602. if (op)
  603. cmd |= ARL_RW;
  604. else
  605. cmd &= ~ARL_RW;
  606. core_writel(priv, cmd, CORE_ARLA_RWCTL);
  607. return bcm_sf2_arl_op_wait(priv);
  608. }
  609. static int bcm_sf2_arl_read(struct bcm_sf2_priv *priv, u64 mac,
  610. u16 vid, struct bcm_sf2_arl_entry *ent, u8 *idx,
  611. bool is_valid)
  612. {
  613. unsigned int i;
  614. int ret;
  615. ret = bcm_sf2_arl_op_wait(priv);
  616. if (ret)
  617. return ret;
  618. /* Read the 4 bins */
  619. for (i = 0; i < 4; i++) {
  620. u64 mac_vid;
  621. u32 fwd_entry;
  622. mac_vid = core_readq(priv, CORE_ARLA_MACVID_ENTRY(i));
  623. fwd_entry = core_readl(priv, CORE_ARLA_FWD_ENTRY(i));
  624. bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
  625. if (ent->is_valid && is_valid) {
  626. *idx = i;
  627. return 0;
  628. }
  629. /* This is the MAC we just deleted */
  630. if (!is_valid && (mac_vid & mac))
  631. return 0;
  632. }
  633. return -ENOENT;
  634. }
  635. static int bcm_sf2_arl_op(struct bcm_sf2_priv *priv, int op, int port,
  636. const unsigned char *addr, u16 vid, bool is_valid)
  637. {
  638. struct bcm_sf2_arl_entry ent;
  639. u32 fwd_entry;
  640. u64 mac, mac_vid = 0;
  641. u8 idx = 0;
  642. int ret;
  643. /* Convert the array into a 64-bit MAC */
  644. mac = bcm_sf2_mac_to_u64(addr);
  645. /* Perform a read for the given MAC and VID */
  646. core_writeq(priv, mac, CORE_ARLA_MAC);
  647. core_writel(priv, vid, CORE_ARLA_VID);
  648. /* Issue a read operation for this MAC */
  649. ret = bcm_sf2_arl_rw_op(priv, 1);
  650. if (ret)
  651. return ret;
  652. ret = bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
  653. /* If this is a read, just finish now */
  654. if (op)
  655. return ret;
  656. /* We could not find a matching MAC, so reset to a new entry */
  657. if (ret) {
  658. fwd_entry = 0;
  659. idx = 0;
  660. }
  661. memset(&ent, 0, sizeof(ent));
  662. ent.port = port;
  663. ent.is_valid = is_valid;
  664. ent.vid = vid;
  665. ent.is_static = true;
  666. memcpy(ent.mac, addr, ETH_ALEN);
  667. bcm_sf2_arl_from_entry(&mac_vid, &fwd_entry, &ent);
  668. core_writeq(priv, mac_vid, CORE_ARLA_MACVID_ENTRY(idx));
  669. core_writel(priv, fwd_entry, CORE_ARLA_FWD_ENTRY(idx));
  670. ret = bcm_sf2_arl_rw_op(priv, 0);
  671. if (ret)
  672. return ret;
  673. /* Re-read the entry to check */
  674. return bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
  675. }
  676. static int bcm_sf2_sw_fdb_prepare(struct dsa_switch *ds, int port,
  677. const struct switchdev_obj_port_fdb *fdb,
  678. struct switchdev_trans *trans)
  679. {
  680. /* We do not need to do anything specific here yet */
  681. return 0;
  682. }
  683. static void bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port,
  684. const struct switchdev_obj_port_fdb *fdb,
  685. struct switchdev_trans *trans)
  686. {
  687. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  688. if (bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true))
  689. pr_err("%s: failed to add MAC address\n", __func__);
  690. }
  691. static int bcm_sf2_sw_fdb_del(struct dsa_switch *ds, int port,
  692. const struct switchdev_obj_port_fdb *fdb)
  693. {
  694. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  695. return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, false);
  696. }
  697. static int bcm_sf2_arl_search_wait(struct bcm_sf2_priv *priv)
  698. {
  699. unsigned timeout = 1000;
  700. u32 reg;
  701. do {
  702. reg = core_readl(priv, CORE_ARLA_SRCH_CTL);
  703. if (!(reg & ARLA_SRCH_STDN))
  704. return 0;
  705. if (reg & ARLA_SRCH_VLID)
  706. return 0;
  707. usleep_range(1000, 2000);
  708. } while (timeout--);
  709. return -ETIMEDOUT;
  710. }
  711. static void bcm_sf2_arl_search_rd(struct bcm_sf2_priv *priv, u8 idx,
  712. struct bcm_sf2_arl_entry *ent)
  713. {
  714. u64 mac_vid;
  715. u32 fwd_entry;
  716. mac_vid = core_readq(priv, CORE_ARLA_SRCH_RSLT_MACVID(idx));
  717. fwd_entry = core_readl(priv, CORE_ARLA_SRCH_RSLT(idx));
  718. bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
  719. }
  720. static int bcm_sf2_sw_fdb_copy(struct net_device *dev, int port,
  721. const struct bcm_sf2_arl_entry *ent,
  722. struct switchdev_obj_port_fdb *fdb,
  723. int (*cb)(struct switchdev_obj *obj))
  724. {
  725. if (!ent->is_valid)
  726. return 0;
  727. if (port != ent->port)
  728. return 0;
  729. ether_addr_copy(fdb->addr, ent->mac);
  730. fdb->vid = ent->vid;
  731. fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE;
  732. return cb(&fdb->obj);
  733. }
  734. static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port,
  735. struct switchdev_obj_port_fdb *fdb,
  736. int (*cb)(struct switchdev_obj *obj))
  737. {
  738. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  739. struct net_device *dev = ds->ports[port].netdev;
  740. struct bcm_sf2_arl_entry results[2];
  741. unsigned int count = 0;
  742. int ret;
  743. /* Start search operation */
  744. core_writel(priv, ARLA_SRCH_STDN, CORE_ARLA_SRCH_CTL);
  745. do {
  746. ret = bcm_sf2_arl_search_wait(priv);
  747. if (ret)
  748. return ret;
  749. /* Read both entries, then return their values back */
  750. bcm_sf2_arl_search_rd(priv, 0, &results[0]);
  751. ret = bcm_sf2_sw_fdb_copy(dev, port, &results[0], fdb, cb);
  752. if (ret)
  753. return ret;
  754. bcm_sf2_arl_search_rd(priv, 1, &results[1]);
  755. ret = bcm_sf2_sw_fdb_copy(dev, port, &results[1], fdb, cb);
  756. if (ret)
  757. return ret;
  758. if (!results[0].is_valid && !results[1].is_valid)
  759. break;
  760. } while (count++ < CORE_ARLA_NUM_ENTRIES);
  761. return 0;
  762. }
  763. static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr,
  764. int regnum, u16 val)
  765. {
  766. int ret = 0;
  767. u32 reg;
  768. reg = reg_readl(priv, REG_SWITCH_CNTRL);
  769. reg |= MDIO_MASTER_SEL;
  770. reg_writel(priv, reg, REG_SWITCH_CNTRL);
  771. /* Page << 8 | offset */
  772. reg = 0x70;
  773. reg <<= 2;
  774. core_writel(priv, addr, reg);
  775. /* Page << 8 | offset */
  776. reg = 0x80 << 8 | regnum << 1;
  777. reg <<= 2;
  778. if (op)
  779. ret = core_readl(priv, reg);
  780. else
  781. core_writel(priv, val, reg);
  782. reg = reg_readl(priv, REG_SWITCH_CNTRL);
  783. reg &= ~MDIO_MASTER_SEL;
  784. reg_writel(priv, reg, REG_SWITCH_CNTRL);
  785. return ret & 0xffff;
  786. }
  787. static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
  788. {
  789. struct bcm_sf2_priv *priv = bus->priv;
  790. /* Intercept reads from Broadcom pseudo-PHY address, else, send
  791. * them to our master MDIO bus controller
  792. */
  793. if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
  794. return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
  795. else
  796. return mdiobus_read(priv->master_mii_bus, addr, regnum);
  797. }
  798. static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
  799. u16 val)
  800. {
  801. struct bcm_sf2_priv *priv = bus->priv;
  802. /* Intercept writes to the Broadcom pseudo-PHY address, else,
  803. * send them to our master MDIO bus controller
  804. */
  805. if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
  806. bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
  807. else
  808. mdiobus_write(priv->master_mii_bus, addr, regnum, val);
  809. return 0;
  810. }
  811. static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
  812. {
  813. struct bcm_sf2_priv *priv = dev_id;
  814. priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
  815. ~priv->irq0_mask;
  816. intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
  817. return IRQ_HANDLED;
  818. }
  819. static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
  820. {
  821. struct bcm_sf2_priv *priv = dev_id;
  822. priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
  823. ~priv->irq1_mask;
  824. intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
  825. if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF))
  826. priv->port_sts[7].link = 1;
  827. if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF))
  828. priv->port_sts[7].link = 0;
  829. return IRQ_HANDLED;
  830. }
  831. static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
  832. {
  833. unsigned int timeout = 1000;
  834. u32 reg;
  835. reg = core_readl(priv, CORE_WATCHDOG_CTRL);
  836. reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
  837. core_writel(priv, reg, CORE_WATCHDOG_CTRL);
  838. do {
  839. reg = core_readl(priv, CORE_WATCHDOG_CTRL);
  840. if (!(reg & SOFTWARE_RESET))
  841. break;
  842. usleep_range(1000, 2000);
  843. } while (timeout-- > 0);
  844. if (timeout == 0)
  845. return -ETIMEDOUT;
  846. return 0;
  847. }
  848. static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
  849. {
  850. intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
  851. intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
  852. intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
  853. intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
  854. intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
  855. intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
  856. }
  857. static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
  858. struct device_node *dn)
  859. {
  860. struct device_node *port;
  861. const char *phy_mode_str;
  862. int mode;
  863. unsigned int port_num;
  864. int ret;
  865. priv->moca_port = -1;
  866. for_each_available_child_of_node(dn, port) {
  867. if (of_property_read_u32(port, "reg", &port_num))
  868. continue;
  869. /* Internal PHYs get assigned a specific 'phy-mode' property
  870. * value: "internal" to help flag them before MDIO probing
  871. * has completed, since they might be turned off at that
  872. * time
  873. */
  874. mode = of_get_phy_mode(port);
  875. if (mode < 0) {
  876. ret = of_property_read_string(port, "phy-mode",
  877. &phy_mode_str);
  878. if (ret < 0)
  879. continue;
  880. if (!strcasecmp(phy_mode_str, "internal"))
  881. priv->int_phy_mask |= 1 << port_num;
  882. }
  883. if (mode == PHY_INTERFACE_MODE_MOCA)
  884. priv->moca_port = port_num;
  885. }
  886. }
  887. static int bcm_sf2_mdio_register(struct dsa_switch *ds)
  888. {
  889. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  890. struct device_node *dn;
  891. static int index;
  892. int err;
  893. /* Find our integrated MDIO bus node */
  894. dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
  895. priv->master_mii_bus = of_mdio_find_bus(dn);
  896. if (!priv->master_mii_bus)
  897. return -EPROBE_DEFER;
  898. get_device(&priv->master_mii_bus->dev);
  899. priv->master_mii_dn = dn;
  900. priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
  901. if (!priv->slave_mii_bus)
  902. return -ENOMEM;
  903. priv->slave_mii_bus->priv = priv;
  904. priv->slave_mii_bus->name = "sf2 slave mii";
  905. priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read;
  906. priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write;
  907. snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
  908. index++);
  909. priv->slave_mii_bus->dev.of_node = dn;
  910. /* Include the pseudo-PHY address to divert reads towards our
  911. * workaround. This is only required for 7445D0, since 7445E0
  912. * disconnects the internal switch pseudo-PHY such that we can use the
  913. * regular SWITCH_MDIO master controller instead.
  914. *
  915. * Here we flag the pseudo PHY as needing special treatment and would
  916. * otherwise make all other PHY read/writes go to the master MDIO bus
  917. * controller that comes with this switch backed by the "mdio-unimac"
  918. * driver.
  919. */
  920. if (of_machine_is_compatible("brcm,bcm7445d0"))
  921. priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR);
  922. else
  923. priv->indir_phy_mask = 0;
  924. ds->phys_mii_mask = priv->indir_phy_mask;
  925. ds->slave_mii_bus = priv->slave_mii_bus;
  926. priv->slave_mii_bus->parent = ds->dev->parent;
  927. priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
  928. if (dn)
  929. err = of_mdiobus_register(priv->slave_mii_bus, dn);
  930. else
  931. err = mdiobus_register(priv->slave_mii_bus);
  932. if (err)
  933. of_node_put(dn);
  934. return err;
  935. }
  936. static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
  937. {
  938. mdiobus_unregister(priv->slave_mii_bus);
  939. if (priv->master_mii_dn)
  940. of_node_put(priv->master_mii_dn);
  941. }
  942. static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr)
  943. {
  944. return 0;
  945. }
  946. static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
  947. {
  948. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  949. /* The BCM7xxx PHY driver expects to find the integrated PHY revision
  950. * in bits 15:8 and the patch level in bits 7:0 which is exactly what
  951. * the REG_PHY_REVISION register layout is.
  952. */
  953. return priv->hw_params.gphy_rev;
  954. }
  955. static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
  956. struct phy_device *phydev)
  957. {
  958. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  959. u32 id_mode_dis = 0, port_mode;
  960. const char *str = NULL;
  961. u32 reg;
  962. switch (phydev->interface) {
  963. case PHY_INTERFACE_MODE_RGMII:
  964. str = "RGMII (no delay)";
  965. id_mode_dis = 1;
  966. case PHY_INTERFACE_MODE_RGMII_TXID:
  967. if (!str)
  968. str = "RGMII (TX delay)";
  969. port_mode = EXT_GPHY;
  970. break;
  971. case PHY_INTERFACE_MODE_MII:
  972. str = "MII";
  973. port_mode = EXT_EPHY;
  974. break;
  975. case PHY_INTERFACE_MODE_REVMII:
  976. str = "Reverse MII";
  977. port_mode = EXT_REVMII;
  978. break;
  979. default:
  980. /* All other PHYs: internal and MoCA */
  981. goto force_link;
  982. }
  983. /* If the link is down, just disable the interface to conserve power */
  984. if (!phydev->link) {
  985. reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
  986. reg &= ~RGMII_MODE_EN;
  987. reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
  988. goto force_link;
  989. }
  990. /* Clear id_mode_dis bit, and the existing port mode, but
  991. * make sure we enable the RGMII block for data to pass
  992. */
  993. reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
  994. reg &= ~ID_MODE_DIS;
  995. reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
  996. reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
  997. reg |= port_mode | RGMII_MODE_EN;
  998. if (id_mode_dis)
  999. reg |= ID_MODE_DIS;
  1000. if (phydev->pause) {
  1001. if (phydev->asym_pause)
  1002. reg |= TX_PAUSE_EN;
  1003. reg |= RX_PAUSE_EN;
  1004. }
  1005. reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
  1006. pr_info("Port %d configured for %s\n", port, str);
  1007. force_link:
  1008. /* Force link settings detected from the PHY */
  1009. reg = SW_OVERRIDE;
  1010. switch (phydev->speed) {
  1011. case SPEED_1000:
  1012. reg |= SPDSTS_1000 << SPEED_SHIFT;
  1013. break;
  1014. case SPEED_100:
  1015. reg |= SPDSTS_100 << SPEED_SHIFT;
  1016. break;
  1017. }
  1018. if (phydev->link)
  1019. reg |= LINK_STS;
  1020. if (phydev->duplex == DUPLEX_FULL)
  1021. reg |= DUPLX_MODE;
  1022. core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
  1023. }
  1024. static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
  1025. struct fixed_phy_status *status)
  1026. {
  1027. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1028. u32 duplex, pause;
  1029. u32 reg;
  1030. duplex = core_readl(priv, CORE_DUPSTS);
  1031. pause = core_readl(priv, CORE_PAUSESTS);
  1032. status->link = 0;
  1033. /* MoCA port is special as we do not get link status from CORE_LNKSTS,
  1034. * which means that we need to force the link at the port override
  1035. * level to get the data to flow. We do use what the interrupt handler
  1036. * did determine before.
  1037. *
  1038. * For the other ports, we just force the link status, since this is
  1039. * a fixed PHY device.
  1040. */
  1041. if (port == priv->moca_port) {
  1042. status->link = priv->port_sts[port].link;
  1043. /* For MoCA interfaces, also force a link down notification
  1044. * since some version of the user-space daemon (mocad) use
  1045. * cmd->autoneg to force the link, which messes up the PHY
  1046. * state machine and make it go in PHY_FORCING state instead.
  1047. */
  1048. if (!status->link)
  1049. netif_carrier_off(ds->ports[port].netdev);
  1050. status->duplex = 1;
  1051. } else {
  1052. status->link = 1;
  1053. status->duplex = !!(duplex & (1 << port));
  1054. }
  1055. reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port));
  1056. reg |= SW_OVERRIDE;
  1057. if (status->link)
  1058. reg |= LINK_STS;
  1059. else
  1060. reg &= ~LINK_STS;
  1061. core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
  1062. if ((pause & (1 << port)) &&
  1063. (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
  1064. status->asym_pause = 1;
  1065. status->pause = 1;
  1066. }
  1067. if (pause & (1 << port))
  1068. status->pause = 1;
  1069. }
  1070. static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
  1071. {
  1072. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1073. unsigned int port;
  1074. bcm_sf2_intr_disable(priv);
  1075. /* Disable all ports physically present including the IMP
  1076. * port, the other ones have already been disabled during
  1077. * bcm_sf2_sw_setup
  1078. */
  1079. for (port = 0; port < DSA_MAX_PORTS; port++) {
  1080. if ((1 << port) & ds->enabled_port_mask ||
  1081. dsa_is_cpu_port(ds, port))
  1082. bcm_sf2_port_disable(ds, port, NULL);
  1083. }
  1084. return 0;
  1085. }
  1086. static int bcm_sf2_sw_resume(struct dsa_switch *ds)
  1087. {
  1088. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1089. unsigned int port;
  1090. int ret;
  1091. ret = bcm_sf2_sw_rst(priv);
  1092. if (ret) {
  1093. pr_err("%s: failed to software reset switch\n", __func__);
  1094. return ret;
  1095. }
  1096. if (priv->hw_params.num_gphy == 1)
  1097. bcm_sf2_gphy_enable_set(ds, true);
  1098. for (port = 0; port < DSA_MAX_PORTS; port++) {
  1099. if ((1 << port) & ds->enabled_port_mask)
  1100. bcm_sf2_port_setup(ds, port, NULL);
  1101. else if (dsa_is_cpu_port(ds, port))
  1102. bcm_sf2_imp_setup(ds, port);
  1103. }
  1104. return 0;
  1105. }
  1106. static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
  1107. struct ethtool_wolinfo *wol)
  1108. {
  1109. struct net_device *p = ds->dst[ds->index].master_netdev;
  1110. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1111. struct ethtool_wolinfo pwol;
  1112. /* Get the parent device WoL settings */
  1113. p->ethtool_ops->get_wol(p, &pwol);
  1114. /* Advertise the parent device supported settings */
  1115. wol->supported = pwol.supported;
  1116. memset(&wol->sopass, 0, sizeof(wol->sopass));
  1117. if (pwol.wolopts & WAKE_MAGICSECURE)
  1118. memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));
  1119. if (priv->wol_ports_mask & (1 << port))
  1120. wol->wolopts = pwol.wolopts;
  1121. else
  1122. wol->wolopts = 0;
  1123. }
  1124. static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
  1125. struct ethtool_wolinfo *wol)
  1126. {
  1127. struct net_device *p = ds->dst[ds->index].master_netdev;
  1128. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1129. s8 cpu_port = ds->dst[ds->index].cpu_port;
  1130. struct ethtool_wolinfo pwol;
  1131. p->ethtool_ops->get_wol(p, &pwol);
  1132. if (wol->wolopts & ~pwol.supported)
  1133. return -EINVAL;
  1134. if (wol->wolopts)
  1135. priv->wol_ports_mask |= (1 << port);
  1136. else
  1137. priv->wol_ports_mask &= ~(1 << port);
  1138. /* If we have at least one port enabled, make sure the CPU port
  1139. * is also enabled. If the CPU port is the last one enabled, we disable
  1140. * it since this configuration does not make sense.
  1141. */
  1142. if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
  1143. priv->wol_ports_mask |= (1 << cpu_port);
  1144. else
  1145. priv->wol_ports_mask &= ~(1 << cpu_port);
  1146. return p->ethtool_ops->set_wol(p, wol);
  1147. }
  1148. static void bcm_sf2_enable_vlan(struct bcm_sf2_priv *priv, bool enable)
  1149. {
  1150. u32 mgmt, vc0, vc1, vc4, vc5;
  1151. mgmt = core_readl(priv, CORE_SWMODE);
  1152. vc0 = core_readl(priv, CORE_VLAN_CTRL0);
  1153. vc1 = core_readl(priv, CORE_VLAN_CTRL1);
  1154. vc4 = core_readl(priv, CORE_VLAN_CTRL4);
  1155. vc5 = core_readl(priv, CORE_VLAN_CTRL5);
  1156. mgmt &= ~SW_FWDG_MODE;
  1157. if (enable) {
  1158. vc0 |= VLAN_EN | VLAN_LEARN_MODE_IVL;
  1159. vc1 |= EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP;
  1160. vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT);
  1161. vc4 |= INGR_VID_CHK_DROP;
  1162. vc5 |= DROP_VTABLE_MISS | EN_VID_FFF_FWD;
  1163. } else {
  1164. vc0 &= ~(VLAN_EN | VLAN_LEARN_MODE_IVL);
  1165. vc1 &= ~(EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP);
  1166. vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT);
  1167. vc5 &= ~(DROP_VTABLE_MISS | EN_VID_FFF_FWD);
  1168. vc4 |= INGR_VID_CHK_VID_VIOL_IMP;
  1169. }
  1170. core_writel(priv, vc0, CORE_VLAN_CTRL0);
  1171. core_writel(priv, vc1, CORE_VLAN_CTRL1);
  1172. core_writel(priv, 0, CORE_VLAN_CTRL3);
  1173. core_writel(priv, vc4, CORE_VLAN_CTRL4);
  1174. core_writel(priv, vc5, CORE_VLAN_CTRL5);
  1175. core_writel(priv, mgmt, CORE_SWMODE);
  1176. }
  1177. static void bcm_sf2_sw_configure_vlan(struct dsa_switch *ds)
  1178. {
  1179. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1180. unsigned int port;
  1181. /* Clear all VLANs */
  1182. bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_CLEAR);
  1183. for (port = 0; port < priv->hw_params.num_ports; port++) {
  1184. if (!((1 << port) & ds->enabled_port_mask))
  1185. continue;
  1186. core_writel(priv, 1, CORE_DEFAULT_1Q_TAG_P(port));
  1187. }
  1188. }
  1189. static int bcm_sf2_sw_vlan_filtering(struct dsa_switch *ds, int port,
  1190. bool vlan_filtering)
  1191. {
  1192. return 0;
  1193. }
  1194. static int bcm_sf2_sw_vlan_prepare(struct dsa_switch *ds, int port,
  1195. const struct switchdev_obj_port_vlan *vlan,
  1196. struct switchdev_trans *trans)
  1197. {
  1198. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1199. bcm_sf2_enable_vlan(priv, true);
  1200. return 0;
  1201. }
  1202. static void bcm_sf2_sw_vlan_add(struct dsa_switch *ds, int port,
  1203. const struct switchdev_obj_port_vlan *vlan,
  1204. struct switchdev_trans *trans)
  1205. {
  1206. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1207. bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
  1208. bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
  1209. s8 cpu_port = ds->dst->cpu_port;
  1210. struct bcm_sf2_vlan *vl;
  1211. u16 vid;
  1212. for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
  1213. vl = &priv->vlans[vid];
  1214. bcm_sf2_get_vlan_entry(priv, vid, vl);
  1215. vl->members |= BIT(port) | BIT(cpu_port);
  1216. if (untagged)
  1217. vl->untag |= BIT(port) | BIT(cpu_port);
  1218. else
  1219. vl->untag &= ~(BIT(port) | BIT(cpu_port));
  1220. bcm_sf2_set_vlan_entry(priv, vid, vl);
  1221. bcm_sf2_sw_fast_age_vlan(priv, vid);
  1222. }
  1223. if (pvid) {
  1224. core_writel(priv, vlan->vid_end, CORE_DEFAULT_1Q_TAG_P(port));
  1225. core_writel(priv, vlan->vid_end,
  1226. CORE_DEFAULT_1Q_TAG_P(cpu_port));
  1227. bcm_sf2_sw_fast_age_vlan(priv, vid);
  1228. }
  1229. }
  1230. static int bcm_sf2_sw_vlan_del(struct dsa_switch *ds, int port,
  1231. const struct switchdev_obj_port_vlan *vlan)
  1232. {
  1233. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1234. bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
  1235. s8 cpu_port = ds->dst->cpu_port;
  1236. struct bcm_sf2_vlan *vl;
  1237. u16 vid, pvid;
  1238. int ret;
  1239. pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port));
  1240. for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
  1241. vl = &priv->vlans[vid];
  1242. ret = bcm_sf2_get_vlan_entry(priv, vid, vl);
  1243. if (ret)
  1244. return ret;
  1245. vl->members &= ~BIT(port);
  1246. if ((vl->members & BIT(cpu_port)) == BIT(cpu_port))
  1247. vl->members = 0;
  1248. if (pvid == vid)
  1249. pvid = 0;
  1250. if (untagged) {
  1251. vl->untag &= ~BIT(port);
  1252. if ((vl->untag & BIT(port)) == BIT(cpu_port))
  1253. vl->untag = 0;
  1254. }
  1255. bcm_sf2_set_vlan_entry(priv, vid, vl);
  1256. bcm_sf2_sw_fast_age_vlan(priv, vid);
  1257. }
  1258. core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(port));
  1259. core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(cpu_port));
  1260. bcm_sf2_sw_fast_age_vlan(priv, vid);
  1261. return 0;
  1262. }
  1263. static int bcm_sf2_sw_vlan_dump(struct dsa_switch *ds, int port,
  1264. struct switchdev_obj_port_vlan *vlan,
  1265. int (*cb)(struct switchdev_obj *obj))
  1266. {
  1267. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1268. struct bcm_sf2_port_status *p = &priv->port_sts[port];
  1269. struct bcm_sf2_vlan *vl;
  1270. u16 vid, pvid;
  1271. int err = 0;
  1272. pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port));
  1273. for (vid = 0; vid < VLAN_N_VID; vid++) {
  1274. vl = &priv->vlans[vid];
  1275. if (!(vl->members & BIT(port)))
  1276. continue;
  1277. vlan->vid_begin = vlan->vid_end = vid;
  1278. vlan->flags = 0;
  1279. if (vl->untag & BIT(port))
  1280. vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  1281. if (p->pvid == vid)
  1282. vlan->flags |= BRIDGE_VLAN_INFO_PVID;
  1283. err = cb(&vlan->obj);
  1284. if (err)
  1285. break;
  1286. }
  1287. return err;
  1288. }
  1289. static int bcm_sf2_sw_setup(struct dsa_switch *ds)
  1290. {
  1291. const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
  1292. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1293. struct device_node *dn;
  1294. void __iomem **base;
  1295. unsigned int port;
  1296. unsigned int i;
  1297. u32 reg, rev;
  1298. int ret;
  1299. spin_lock_init(&priv->indir_lock);
  1300. mutex_init(&priv->stats_mutex);
  1301. /* All the interesting properties are at the parent device_node
  1302. * level
  1303. */
  1304. dn = ds->cd->of_node->parent;
  1305. bcm_sf2_identify_ports(priv, ds->cd->of_node);
  1306. priv->irq0 = irq_of_parse_and_map(dn, 0);
  1307. priv->irq1 = irq_of_parse_and_map(dn, 1);
  1308. base = &priv->core;
  1309. for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
  1310. *base = of_iomap(dn, i);
  1311. if (*base == NULL) {
  1312. pr_err("unable to find register: %s\n", reg_names[i]);
  1313. ret = -ENOMEM;
  1314. goto out_unmap;
  1315. }
  1316. base++;
  1317. }
  1318. ret = bcm_sf2_sw_rst(priv);
  1319. if (ret) {
  1320. pr_err("unable to software reset switch: %d\n", ret);
  1321. goto out_unmap;
  1322. }
  1323. ret = bcm_sf2_mdio_register(ds);
  1324. if (ret) {
  1325. pr_err("failed to register MDIO bus\n");
  1326. goto out_unmap;
  1327. }
  1328. /* Disable all interrupts and request them */
  1329. bcm_sf2_intr_disable(priv);
  1330. ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0,
  1331. "switch_0", priv);
  1332. if (ret < 0) {
  1333. pr_err("failed to request switch_0 IRQ\n");
  1334. goto out_mdio;
  1335. }
  1336. ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0,
  1337. "switch_1", priv);
  1338. if (ret < 0) {
  1339. pr_err("failed to request switch_1 IRQ\n");
  1340. goto out_free_irq0;
  1341. }
  1342. /* Reset the MIB counters */
  1343. reg = core_readl(priv, CORE_GMNCFGCFG);
  1344. reg |= RST_MIB_CNT;
  1345. core_writel(priv, reg, CORE_GMNCFGCFG);
  1346. reg &= ~RST_MIB_CNT;
  1347. core_writel(priv, reg, CORE_GMNCFGCFG);
  1348. /* Get the maximum number of ports for this switch */
  1349. priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
  1350. if (priv->hw_params.num_ports > DSA_MAX_PORTS)
  1351. priv->hw_params.num_ports = DSA_MAX_PORTS;
  1352. /* Assume a single GPHY setup if we can't read that property */
  1353. if (of_property_read_u32(dn, "brcm,num-gphy",
  1354. &priv->hw_params.num_gphy))
  1355. priv->hw_params.num_gphy = 1;
  1356. /* Enable all valid ports and disable those unused */
  1357. for (port = 0; port < priv->hw_params.num_ports; port++) {
  1358. /* IMP port receives special treatment */
  1359. if ((1 << port) & ds->enabled_port_mask)
  1360. bcm_sf2_port_setup(ds, port, NULL);
  1361. else if (dsa_is_cpu_port(ds, port))
  1362. bcm_sf2_imp_setup(ds, port);
  1363. else
  1364. bcm_sf2_port_disable(ds, port, NULL);
  1365. }
  1366. bcm_sf2_sw_configure_vlan(ds);
  1367. rev = reg_readl(priv, REG_SWITCH_REVISION);
  1368. priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
  1369. SWITCH_TOP_REV_MASK;
  1370. priv->hw_params.core_rev = (rev & SF2_REV_MASK);
  1371. rev = reg_readl(priv, REG_PHY_REVISION);
  1372. priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
  1373. pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
  1374. priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
  1375. priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
  1376. priv->core, priv->irq0, priv->irq1);
  1377. return 0;
  1378. out_free_irq0:
  1379. free_irq(priv->irq0, priv);
  1380. out_mdio:
  1381. bcm_sf2_mdio_unregister(priv);
  1382. out_unmap:
  1383. base = &priv->core;
  1384. for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
  1385. if (*base)
  1386. iounmap(*base);
  1387. base++;
  1388. }
  1389. return ret;
  1390. }
  1391. static struct dsa_switch_driver bcm_sf2_switch_driver = {
  1392. .tag_protocol = DSA_TAG_PROTO_BRCM,
  1393. .probe = bcm_sf2_sw_drv_probe,
  1394. .setup = bcm_sf2_sw_setup,
  1395. .set_addr = bcm_sf2_sw_set_addr,
  1396. .get_phy_flags = bcm_sf2_sw_get_phy_flags,
  1397. .get_strings = bcm_sf2_sw_get_strings,
  1398. .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats,
  1399. .get_sset_count = bcm_sf2_sw_get_sset_count,
  1400. .adjust_link = bcm_sf2_sw_adjust_link,
  1401. .fixed_link_update = bcm_sf2_sw_fixed_link_update,
  1402. .suspend = bcm_sf2_sw_suspend,
  1403. .resume = bcm_sf2_sw_resume,
  1404. .get_wol = bcm_sf2_sw_get_wol,
  1405. .set_wol = bcm_sf2_sw_set_wol,
  1406. .port_enable = bcm_sf2_port_setup,
  1407. .port_disable = bcm_sf2_port_disable,
  1408. .get_eee = bcm_sf2_sw_get_eee,
  1409. .set_eee = bcm_sf2_sw_set_eee,
  1410. .port_bridge_join = bcm_sf2_sw_br_join,
  1411. .port_bridge_leave = bcm_sf2_sw_br_leave,
  1412. .port_stp_state_set = bcm_sf2_sw_br_set_stp_state,
  1413. .port_fdb_prepare = bcm_sf2_sw_fdb_prepare,
  1414. .port_fdb_add = bcm_sf2_sw_fdb_add,
  1415. .port_fdb_del = bcm_sf2_sw_fdb_del,
  1416. .port_fdb_dump = bcm_sf2_sw_fdb_dump,
  1417. .port_vlan_filtering = bcm_sf2_sw_vlan_filtering,
  1418. .port_vlan_prepare = bcm_sf2_sw_vlan_prepare,
  1419. .port_vlan_add = bcm_sf2_sw_vlan_add,
  1420. .port_vlan_del = bcm_sf2_sw_vlan_del,
  1421. .port_vlan_dump = bcm_sf2_sw_vlan_dump,
  1422. };
  1423. static int __init bcm_sf2_init(void)
  1424. {
  1425. register_switch_driver(&bcm_sf2_switch_driver);
  1426. return 0;
  1427. }
  1428. module_init(bcm_sf2_init);
  1429. static void __exit bcm_sf2_exit(void)
  1430. {
  1431. unregister_switch_driver(&bcm_sf2_switch_driver);
  1432. }
  1433. module_exit(bcm_sf2_exit);
  1434. MODULE_AUTHOR("Broadcom Corporation");
  1435. MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
  1436. MODULE_LICENSE("GPL");
  1437. MODULE_ALIAS("platform:brcm-sf2");