bcm_sf2.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414
  1. /*
  2. * Broadcom Starfighter 2 DSA switch driver
  3. *
  4. * Copyright (C) 2014, Broadcom Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/list.h>
  12. #include <linux/module.h>
  13. #include <linux/netdevice.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/of.h>
  17. #include <linux/phy.h>
  18. #include <linux/phy_fixed.h>
  19. #include <linux/mii.h>
  20. #include <linux/of.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/of_address.h>
  23. #include <linux/of_net.h>
  24. #include <net/dsa.h>
  25. #include <linux/ethtool.h>
  26. #include <linux/if_bridge.h>
  27. #include <linux/brcmphy.h>
  28. #include <linux/etherdevice.h>
  29. #include <net/switchdev.h>
  30. #include "bcm_sf2.h"
  31. #include "bcm_sf2_regs.h"
  32. /* String, offset, and register size in bytes if different from 4 bytes */
  33. static const struct bcm_sf2_hw_stats bcm_sf2_mib[] = {
  34. { "TxOctets", 0x000, 8 },
  35. { "TxDropPkts", 0x020 },
  36. { "TxQPKTQ0", 0x030 },
  37. { "TxBroadcastPkts", 0x040 },
  38. { "TxMulticastPkts", 0x050 },
  39. { "TxUnicastPKts", 0x060 },
  40. { "TxCollisions", 0x070 },
  41. { "TxSingleCollision", 0x080 },
  42. { "TxMultipleCollision", 0x090 },
  43. { "TxDeferredCollision", 0x0a0 },
  44. { "TxLateCollision", 0x0b0 },
  45. { "TxExcessiveCollision", 0x0c0 },
  46. { "TxFrameInDisc", 0x0d0 },
  47. { "TxPausePkts", 0x0e0 },
  48. { "TxQPKTQ1", 0x0f0 },
  49. { "TxQPKTQ2", 0x100 },
  50. { "TxQPKTQ3", 0x110 },
  51. { "TxQPKTQ4", 0x120 },
  52. { "TxQPKTQ5", 0x130 },
  53. { "RxOctets", 0x140, 8 },
  54. { "RxUndersizePkts", 0x160 },
  55. { "RxPausePkts", 0x170 },
  56. { "RxPkts64Octets", 0x180 },
  57. { "RxPkts65to127Octets", 0x190 },
  58. { "RxPkts128to255Octets", 0x1a0 },
  59. { "RxPkts256to511Octets", 0x1b0 },
  60. { "RxPkts512to1023Octets", 0x1c0 },
  61. { "RxPkts1024toMaxPktsOctets", 0x1d0 },
  62. { "RxOversizePkts", 0x1e0 },
  63. { "RxJabbers", 0x1f0 },
  64. { "RxAlignmentErrors", 0x200 },
  65. { "RxFCSErrors", 0x210 },
  66. { "RxGoodOctets", 0x220, 8 },
  67. { "RxDropPkts", 0x240 },
  68. { "RxUnicastPkts", 0x250 },
  69. { "RxMulticastPkts", 0x260 },
  70. { "RxBroadcastPkts", 0x270 },
  71. { "RxSAChanges", 0x280 },
  72. { "RxFragments", 0x290 },
  73. { "RxJumboPkt", 0x2a0 },
  74. { "RxSymblErr", 0x2b0 },
  75. { "InRangeErrCount", 0x2c0 },
  76. { "OutRangeErrCount", 0x2d0 },
  77. { "EEELpiEvent", 0x2e0 },
  78. { "EEELpiDuration", 0x2f0 },
  79. { "RxDiscard", 0x300, 8 },
  80. { "TxQPKTQ6", 0x320 },
  81. { "TxQPKTQ7", 0x330 },
  82. { "TxPkts64Octets", 0x340 },
  83. { "TxPkts65to127Octets", 0x350 },
  84. { "TxPkts128to255Octets", 0x360 },
  85. { "TxPkts256to511Ocets", 0x370 },
  86. { "TxPkts512to1023Ocets", 0x380 },
  87. { "TxPkts1024toMaxPktOcets", 0x390 },
  88. };
  89. #define BCM_SF2_STATS_SIZE ARRAY_SIZE(bcm_sf2_mib)
  90. static void bcm_sf2_sw_get_strings(struct dsa_switch *ds,
  91. int port, uint8_t *data)
  92. {
  93. unsigned int i;
  94. for (i = 0; i < BCM_SF2_STATS_SIZE; i++)
  95. memcpy(data + i * ETH_GSTRING_LEN,
  96. bcm_sf2_mib[i].string, ETH_GSTRING_LEN);
  97. }
  98. static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds,
  99. int port, uint64_t *data)
  100. {
  101. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  102. const struct bcm_sf2_hw_stats *s;
  103. unsigned int i;
  104. u64 val = 0;
  105. u32 offset;
  106. mutex_lock(&priv->stats_mutex);
  107. /* Now fetch the per-port counters */
  108. for (i = 0; i < BCM_SF2_STATS_SIZE; i++) {
  109. s = &bcm_sf2_mib[i];
  110. /* Do a latched 64-bit read if needed */
  111. offset = s->reg + CORE_P_MIB_OFFSET(port);
  112. if (s->sizeof_stat == 8)
  113. val = core_readq(priv, offset);
  114. else
  115. val = core_readl(priv, offset);
  116. data[i] = (u64)val;
  117. }
  118. mutex_unlock(&priv->stats_mutex);
  119. }
  120. static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds)
  121. {
  122. return BCM_SF2_STATS_SIZE;
  123. }
  124. static char *bcm_sf2_sw_probe(struct device *host_dev, int sw_addr)
  125. {
  126. return "Broadcom Starfighter 2";
  127. }
  128. static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
  129. {
  130. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  131. unsigned int i;
  132. u32 reg;
  133. /* Enable the IMP Port to be in the same VLAN as the other ports
  134. * on a per-port basis such that we only have Port i and IMP in
  135. * the same VLAN.
  136. */
  137. for (i = 0; i < priv->hw_params.num_ports; i++) {
  138. if (!((1 << i) & ds->phys_port_mask))
  139. continue;
  140. reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
  141. reg |= (1 << cpu_port);
  142. core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
  143. }
  144. }
  145. static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
  146. {
  147. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  148. u32 reg, val;
  149. /* Enable the port memories */
  150. reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
  151. reg &= ~P_TXQ_PSM_VDD(port);
  152. core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
  153. /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
  154. reg = core_readl(priv, CORE_IMP_CTL);
  155. reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
  156. reg &= ~(RX_DIS | TX_DIS);
  157. core_writel(priv, reg, CORE_IMP_CTL);
  158. /* Enable forwarding */
  159. core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
  160. /* Enable IMP port in dumb mode */
  161. reg = core_readl(priv, CORE_SWITCH_CTRL);
  162. reg |= MII_DUMB_FWDG_EN;
  163. core_writel(priv, reg, CORE_SWITCH_CTRL);
  164. /* Resolve which bit controls the Broadcom tag */
  165. switch (port) {
  166. case 8:
  167. val = BRCM_HDR_EN_P8;
  168. break;
  169. case 7:
  170. val = BRCM_HDR_EN_P7;
  171. break;
  172. case 5:
  173. val = BRCM_HDR_EN_P5;
  174. break;
  175. default:
  176. val = 0;
  177. break;
  178. }
  179. /* Enable Broadcom tags for IMP port */
  180. reg = core_readl(priv, CORE_BRCM_HDR_CTRL);
  181. reg |= val;
  182. core_writel(priv, reg, CORE_BRCM_HDR_CTRL);
  183. /* Enable reception Broadcom tag for CPU TX (switch RX) to
  184. * allow us to tag outgoing frames
  185. */
  186. reg = core_readl(priv, CORE_BRCM_HDR_RX_DIS);
  187. reg &= ~(1 << port);
  188. core_writel(priv, reg, CORE_BRCM_HDR_RX_DIS);
  189. /* Enable transmission of Broadcom tags from the switch (CPU RX) to
  190. * allow delivering frames to the per-port net_devices
  191. */
  192. reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS);
  193. reg &= ~(1 << port);
  194. core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS);
  195. /* Force link status for IMP port */
  196. reg = core_readl(priv, CORE_STS_OVERRIDE_IMP);
  197. reg |= (MII_SW_OR | LINK_STS);
  198. core_writel(priv, reg, CORE_STS_OVERRIDE_IMP);
  199. }
  200. static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
  201. {
  202. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  203. u32 reg;
  204. reg = core_readl(priv, CORE_EEE_EN_CTRL);
  205. if (enable)
  206. reg |= 1 << port;
  207. else
  208. reg &= ~(1 << port);
  209. core_writel(priv, reg, CORE_EEE_EN_CTRL);
  210. }
  211. static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
  212. {
  213. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  214. u32 reg;
  215. reg = reg_readl(priv, REG_SPHY_CNTRL);
  216. if (enable) {
  217. reg |= PHY_RESET;
  218. reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS);
  219. reg_writel(priv, reg, REG_SPHY_CNTRL);
  220. udelay(21);
  221. reg = reg_readl(priv, REG_SPHY_CNTRL);
  222. reg &= ~PHY_RESET;
  223. } else {
  224. reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
  225. reg_writel(priv, reg, REG_SPHY_CNTRL);
  226. mdelay(1);
  227. reg |= CK25_DIS;
  228. }
  229. reg_writel(priv, reg, REG_SPHY_CNTRL);
  230. /* Use PHY-driven LED signaling */
  231. if (!enable) {
  232. reg = reg_readl(priv, REG_LED_CNTRL(0));
  233. reg |= SPDLNK_SRC_SEL;
  234. reg_writel(priv, reg, REG_LED_CNTRL(0));
  235. }
  236. }
  237. static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv,
  238. int port)
  239. {
  240. unsigned int off;
  241. switch (port) {
  242. case 7:
  243. off = P7_IRQ_OFF;
  244. break;
  245. case 0:
  246. /* Port 0 interrupts are located on the first bank */
  247. intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF));
  248. return;
  249. default:
  250. off = P_IRQ_OFF(port);
  251. break;
  252. }
  253. intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
  254. }
  255. static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
  256. int port)
  257. {
  258. unsigned int off;
  259. switch (port) {
  260. case 7:
  261. off = P7_IRQ_OFF;
  262. break;
  263. case 0:
  264. /* Port 0 interrupts are located on the first bank */
  265. intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
  266. intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR);
  267. return;
  268. default:
  269. off = P_IRQ_OFF(port);
  270. break;
  271. }
  272. intrl2_1_mask_set(priv, P_IRQ_MASK(off));
  273. intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR);
  274. }
  275. static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
  276. struct phy_device *phy)
  277. {
  278. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  279. s8 cpu_port = ds->dst[ds->index].cpu_port;
  280. u32 reg;
  281. /* Clear the memory power down */
  282. reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
  283. reg &= ~P_TXQ_PSM_VDD(port);
  284. core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
  285. /* Clear the Rx and Tx disable bits and set to no spanning tree */
  286. core_writel(priv, 0, CORE_G_PCTL_PORT(port));
  287. /* Re-enable the GPHY and re-apply workarounds */
  288. if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
  289. bcm_sf2_gphy_enable_set(ds, true);
  290. if (phy) {
  291. /* if phy_stop() has been called before, phy
  292. * will be in halted state, and phy_start()
  293. * will call resume.
  294. *
  295. * the resume path does not configure back
  296. * autoneg settings, and since we hard reset
  297. * the phy manually here, we need to reset the
  298. * state machine also.
  299. */
  300. phy->state = PHY_READY;
  301. phy_init_hw(phy);
  302. }
  303. }
  304. /* Enable MoCA port interrupts to get notified */
  305. if (port == priv->moca_port)
  306. bcm_sf2_port_intr_enable(priv, port);
  307. /* Set this port, and only this one to be in the default VLAN,
  308. * if member of a bridge, restore its membership prior to
  309. * bringing down this port.
  310. */
  311. reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
  312. reg &= ~PORT_VLAN_CTRL_MASK;
  313. reg |= (1 << port);
  314. reg |= priv->port_sts[port].vlan_ctl_mask;
  315. core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port));
  316. bcm_sf2_imp_vlan_setup(ds, cpu_port);
  317. /* If EEE was enabled, restore it */
  318. if (priv->port_sts[port].eee.eee_enabled)
  319. bcm_sf2_eee_enable_set(ds, port, true);
  320. return 0;
  321. }
  322. static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
  323. struct phy_device *phy)
  324. {
  325. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  326. u32 off, reg;
  327. if (priv->wol_ports_mask & (1 << port))
  328. return;
  329. if (port == priv->moca_port)
  330. bcm_sf2_port_intr_disable(priv, port);
  331. if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
  332. bcm_sf2_gphy_enable_set(ds, false);
  333. if (dsa_is_cpu_port(ds, port))
  334. off = CORE_IMP_CTL;
  335. else
  336. off = CORE_G_PCTL_PORT(port);
  337. reg = core_readl(priv, off);
  338. reg |= RX_DIS | TX_DIS;
  339. core_writel(priv, reg, off);
  340. /* Power down the port memory */
  341. reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
  342. reg |= P_TXQ_PSM_VDD(port);
  343. core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
  344. }
  345. /* Returns 0 if EEE was not enabled, or 1 otherwise
  346. */
  347. static int bcm_sf2_eee_init(struct dsa_switch *ds, int port,
  348. struct phy_device *phy)
  349. {
  350. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  351. struct ethtool_eee *p = &priv->port_sts[port].eee;
  352. int ret;
  353. p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full);
  354. ret = phy_init_eee(phy, 0);
  355. if (ret)
  356. return 0;
  357. bcm_sf2_eee_enable_set(ds, port, true);
  358. return 1;
  359. }
  360. static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port,
  361. struct ethtool_eee *e)
  362. {
  363. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  364. struct ethtool_eee *p = &priv->port_sts[port].eee;
  365. u32 reg;
  366. reg = core_readl(priv, CORE_EEE_LPI_INDICATE);
  367. e->eee_enabled = p->eee_enabled;
  368. e->eee_active = !!(reg & (1 << port));
  369. return 0;
  370. }
  371. static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port,
  372. struct phy_device *phydev,
  373. struct ethtool_eee *e)
  374. {
  375. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  376. struct ethtool_eee *p = &priv->port_sts[port].eee;
  377. p->eee_enabled = e->eee_enabled;
  378. if (!p->eee_enabled) {
  379. bcm_sf2_eee_enable_set(ds, port, false);
  380. } else {
  381. p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
  382. if (!p->eee_enabled)
  383. return -EOPNOTSUPP;
  384. }
  385. return 0;
  386. }
  387. /* Fast-ageing of ARL entries for a given port, equivalent to an ARL
  388. * flush for that port.
  389. */
  390. static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
  391. {
  392. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  393. unsigned int timeout = 1000;
  394. u32 reg;
  395. core_writel(priv, port, CORE_FAST_AGE_PORT);
  396. reg = core_readl(priv, CORE_FAST_AGE_CTRL);
  397. reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
  398. core_writel(priv, reg, CORE_FAST_AGE_CTRL);
  399. do {
  400. reg = core_readl(priv, CORE_FAST_AGE_CTRL);
  401. if (!(reg & FAST_AGE_STR_DONE))
  402. break;
  403. cpu_relax();
  404. } while (timeout--);
  405. if (!timeout)
  406. return -ETIMEDOUT;
  407. core_writel(priv, 0, CORE_FAST_AGE_CTRL);
  408. return 0;
  409. }
  410. static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port,
  411. struct net_device *bridge)
  412. {
  413. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  414. unsigned int i;
  415. u32 reg, p_ctl;
  416. priv->port_sts[port].bridge_dev = bridge;
  417. p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
  418. for (i = 0; i < priv->hw_params.num_ports; i++) {
  419. if (priv->port_sts[i].bridge_dev != bridge)
  420. continue;
  421. /* Add this local port to the remote port VLAN control
  422. * membership and update the remote port bitmask
  423. */
  424. reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
  425. reg |= 1 << port;
  426. core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
  427. priv->port_sts[i].vlan_ctl_mask = reg;
  428. p_ctl |= 1 << i;
  429. }
  430. /* Configure the local port VLAN control membership to include
  431. * remote ports and update the local port bitmask
  432. */
  433. core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
  434. priv->port_sts[port].vlan_ctl_mask = p_ctl;
  435. return 0;
  436. }
  437. static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port)
  438. {
  439. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  440. struct net_device *bridge = priv->port_sts[port].bridge_dev;
  441. unsigned int i;
  442. u32 reg, p_ctl;
  443. p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
  444. for (i = 0; i < priv->hw_params.num_ports; i++) {
  445. /* Don't touch the remaining ports */
  446. if (priv->port_sts[i].bridge_dev != bridge)
  447. continue;
  448. reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
  449. reg &= ~(1 << port);
  450. core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
  451. priv->port_sts[port].vlan_ctl_mask = reg;
  452. /* Prevent self removal to preserve isolation */
  453. if (port != i)
  454. p_ctl &= ~(1 << i);
  455. }
  456. core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
  457. priv->port_sts[port].vlan_ctl_mask = p_ctl;
  458. priv->port_sts[port].bridge_dev = NULL;
  459. }
  460. static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
  461. u8 state)
  462. {
  463. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  464. u8 hw_state, cur_hw_state;
  465. int ret = 0;
  466. u32 reg;
  467. reg = core_readl(priv, CORE_G_PCTL_PORT(port));
  468. cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
  469. switch (state) {
  470. case BR_STATE_DISABLED:
  471. hw_state = G_MISTP_DIS_STATE;
  472. break;
  473. case BR_STATE_LISTENING:
  474. hw_state = G_MISTP_LISTEN_STATE;
  475. break;
  476. case BR_STATE_LEARNING:
  477. hw_state = G_MISTP_LEARN_STATE;
  478. break;
  479. case BR_STATE_FORWARDING:
  480. hw_state = G_MISTP_FWD_STATE;
  481. break;
  482. case BR_STATE_BLOCKING:
  483. hw_state = G_MISTP_BLOCK_STATE;
  484. break;
  485. default:
  486. pr_err("%s: invalid STP state: %d\n", __func__, state);
  487. return -EINVAL;
  488. }
  489. /* Fast-age ARL entries if we are moving a port from Learning or
  490. * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
  491. * state (hw_state)
  492. */
  493. if (cur_hw_state != hw_state) {
  494. if (cur_hw_state >= G_MISTP_LEARN_STATE &&
  495. hw_state <= G_MISTP_LISTEN_STATE) {
  496. ret = bcm_sf2_sw_fast_age_port(ds, port);
  497. if (ret) {
  498. pr_err("%s: fast-ageing failed\n", __func__);
  499. return ret;
  500. }
  501. }
  502. }
  503. reg = core_readl(priv, CORE_G_PCTL_PORT(port));
  504. reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
  505. reg |= hw_state;
  506. core_writel(priv, reg, CORE_G_PCTL_PORT(port));
  507. return 0;
  508. }
  509. /* Address Resolution Logic routines */
  510. static int bcm_sf2_arl_op_wait(struct bcm_sf2_priv *priv)
  511. {
  512. unsigned int timeout = 10;
  513. u32 reg;
  514. do {
  515. reg = core_readl(priv, CORE_ARLA_RWCTL);
  516. if (!(reg & ARL_STRTDN))
  517. return 0;
  518. usleep_range(1000, 2000);
  519. } while (timeout--);
  520. return -ETIMEDOUT;
  521. }
  522. static int bcm_sf2_arl_rw_op(struct bcm_sf2_priv *priv, unsigned int op)
  523. {
  524. u32 cmd;
  525. if (op > ARL_RW)
  526. return -EINVAL;
  527. cmd = core_readl(priv, CORE_ARLA_RWCTL);
  528. cmd &= ~IVL_SVL_SELECT;
  529. cmd |= ARL_STRTDN;
  530. if (op)
  531. cmd |= ARL_RW;
  532. else
  533. cmd &= ~ARL_RW;
  534. core_writel(priv, cmd, CORE_ARLA_RWCTL);
  535. return bcm_sf2_arl_op_wait(priv);
  536. }
  537. static int bcm_sf2_arl_read(struct bcm_sf2_priv *priv, u64 mac,
  538. u16 vid, struct bcm_sf2_arl_entry *ent, u8 *idx,
  539. bool is_valid)
  540. {
  541. unsigned int i;
  542. int ret;
  543. ret = bcm_sf2_arl_op_wait(priv);
  544. if (ret)
  545. return ret;
  546. /* Read the 4 bins */
  547. for (i = 0; i < 4; i++) {
  548. u64 mac_vid;
  549. u32 fwd_entry;
  550. mac_vid = core_readq(priv, CORE_ARLA_MACVID_ENTRY(i));
  551. fwd_entry = core_readl(priv, CORE_ARLA_FWD_ENTRY(i));
  552. bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
  553. if (ent->is_valid && is_valid) {
  554. *idx = i;
  555. return 0;
  556. }
  557. /* This is the MAC we just deleted */
  558. if (!is_valid && (mac_vid & mac))
  559. return 0;
  560. }
  561. return -ENOENT;
  562. }
  563. static int bcm_sf2_arl_op(struct bcm_sf2_priv *priv, int op, int port,
  564. const unsigned char *addr, u16 vid, bool is_valid)
  565. {
  566. struct bcm_sf2_arl_entry ent;
  567. u32 fwd_entry;
  568. u64 mac, mac_vid = 0;
  569. u8 idx = 0;
  570. int ret;
  571. /* Convert the array into a 64-bit MAC */
  572. mac = bcm_sf2_mac_to_u64(addr);
  573. /* Perform a read for the given MAC and VID */
  574. core_writeq(priv, mac, CORE_ARLA_MAC);
  575. core_writel(priv, vid, CORE_ARLA_VID);
  576. /* Issue a read operation for this MAC */
  577. ret = bcm_sf2_arl_rw_op(priv, 1);
  578. if (ret)
  579. return ret;
  580. ret = bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
  581. /* If this is a read, just finish now */
  582. if (op)
  583. return ret;
  584. /* We could not find a matching MAC, so reset to a new entry */
  585. if (ret) {
  586. fwd_entry = 0;
  587. idx = 0;
  588. }
  589. memset(&ent, 0, sizeof(ent));
  590. ent.port = port;
  591. ent.is_valid = is_valid;
  592. ent.vid = vid;
  593. ent.is_static = true;
  594. memcpy(ent.mac, addr, ETH_ALEN);
  595. bcm_sf2_arl_from_entry(&mac_vid, &fwd_entry, &ent);
  596. core_writeq(priv, mac_vid, CORE_ARLA_MACVID_ENTRY(idx));
  597. core_writel(priv, fwd_entry, CORE_ARLA_FWD_ENTRY(idx));
  598. ret = bcm_sf2_arl_rw_op(priv, 0);
  599. if (ret)
  600. return ret;
  601. /* Re-read the entry to check */
  602. return bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
  603. }
  604. static int bcm_sf2_sw_fdb_prepare(struct dsa_switch *ds, int port,
  605. const struct switchdev_obj_port_fdb *fdb,
  606. struct switchdev_trans *trans)
  607. {
  608. /* We do not need to do anything specific here yet */
  609. return 0;
  610. }
  611. static int bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port,
  612. const struct switchdev_obj_port_fdb *fdb,
  613. struct switchdev_trans *trans)
  614. {
  615. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  616. return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true);
  617. }
  618. static int bcm_sf2_sw_fdb_del(struct dsa_switch *ds, int port,
  619. const struct switchdev_obj_port_fdb *fdb)
  620. {
  621. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  622. return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, false);
  623. }
  624. static int bcm_sf2_arl_search_wait(struct bcm_sf2_priv *priv)
  625. {
  626. unsigned timeout = 1000;
  627. u32 reg;
  628. do {
  629. reg = core_readl(priv, CORE_ARLA_SRCH_CTL);
  630. if (!(reg & ARLA_SRCH_STDN))
  631. return 0;
  632. if (reg & ARLA_SRCH_VLID)
  633. return 0;
  634. usleep_range(1000, 2000);
  635. } while (timeout--);
  636. return -ETIMEDOUT;
  637. }
  638. static void bcm_sf2_arl_search_rd(struct bcm_sf2_priv *priv, u8 idx,
  639. struct bcm_sf2_arl_entry *ent)
  640. {
  641. u64 mac_vid;
  642. u32 fwd_entry;
  643. mac_vid = core_readq(priv, CORE_ARLA_SRCH_RSLT_MACVID(idx));
  644. fwd_entry = core_readl(priv, CORE_ARLA_SRCH_RSLT(idx));
  645. bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
  646. }
  647. static int bcm_sf2_sw_fdb_copy(struct net_device *dev, int port,
  648. const struct bcm_sf2_arl_entry *ent,
  649. struct switchdev_obj_port_fdb *fdb,
  650. int (*cb)(struct switchdev_obj *obj))
  651. {
  652. if (!ent->is_valid)
  653. return 0;
  654. if (port != ent->port)
  655. return 0;
  656. ether_addr_copy(fdb->addr, ent->mac);
  657. fdb->vid = ent->vid;
  658. fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE;
  659. return cb(&fdb->obj);
  660. }
  661. static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port,
  662. struct switchdev_obj_port_fdb *fdb,
  663. int (*cb)(struct switchdev_obj *obj))
  664. {
  665. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  666. struct net_device *dev = ds->ports[port];
  667. struct bcm_sf2_arl_entry results[2];
  668. unsigned int count = 0;
  669. int ret;
  670. /* Start search operation */
  671. core_writel(priv, ARLA_SRCH_STDN, CORE_ARLA_SRCH_CTL);
  672. do {
  673. ret = bcm_sf2_arl_search_wait(priv);
  674. if (ret)
  675. return ret;
  676. /* Read both entries, then return their values back */
  677. bcm_sf2_arl_search_rd(priv, 0, &results[0]);
  678. ret = bcm_sf2_sw_fdb_copy(dev, port, &results[0], fdb, cb);
  679. if (ret)
  680. return ret;
  681. bcm_sf2_arl_search_rd(priv, 1, &results[1]);
  682. ret = bcm_sf2_sw_fdb_copy(dev, port, &results[1], fdb, cb);
  683. if (ret)
  684. return ret;
  685. if (!results[0].is_valid && !results[1].is_valid)
  686. break;
  687. } while (count++ < CORE_ARLA_NUM_ENTRIES);
  688. return 0;
  689. }
  690. static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
  691. {
  692. struct bcm_sf2_priv *priv = dev_id;
  693. priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
  694. ~priv->irq0_mask;
  695. intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
  696. return IRQ_HANDLED;
  697. }
  698. static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
  699. {
  700. struct bcm_sf2_priv *priv = dev_id;
  701. priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
  702. ~priv->irq1_mask;
  703. intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
  704. if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF))
  705. priv->port_sts[7].link = 1;
  706. if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF))
  707. priv->port_sts[7].link = 0;
  708. return IRQ_HANDLED;
  709. }
  710. static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
  711. {
  712. unsigned int timeout = 1000;
  713. u32 reg;
  714. reg = core_readl(priv, CORE_WATCHDOG_CTRL);
  715. reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
  716. core_writel(priv, reg, CORE_WATCHDOG_CTRL);
  717. do {
  718. reg = core_readl(priv, CORE_WATCHDOG_CTRL);
  719. if (!(reg & SOFTWARE_RESET))
  720. break;
  721. usleep_range(1000, 2000);
  722. } while (timeout-- > 0);
  723. if (timeout == 0)
  724. return -ETIMEDOUT;
  725. return 0;
  726. }
  727. static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
  728. {
  729. intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
  730. intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
  731. intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
  732. intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
  733. intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
  734. intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
  735. }
  736. static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
  737. struct device_node *dn)
  738. {
  739. struct device_node *port;
  740. const char *phy_mode_str;
  741. int mode;
  742. unsigned int port_num;
  743. int ret;
  744. priv->moca_port = -1;
  745. for_each_available_child_of_node(dn, port) {
  746. if (of_property_read_u32(port, "reg", &port_num))
  747. continue;
  748. /* Internal PHYs get assigned a specific 'phy-mode' property
  749. * value: "internal" to help flag them before MDIO probing
  750. * has completed, since they might be turned off at that
  751. * time
  752. */
  753. mode = of_get_phy_mode(port);
  754. if (mode < 0) {
  755. ret = of_property_read_string(port, "phy-mode",
  756. &phy_mode_str);
  757. if (ret < 0)
  758. continue;
  759. if (!strcasecmp(phy_mode_str, "internal"))
  760. priv->int_phy_mask |= 1 << port_num;
  761. }
  762. if (mode == PHY_INTERFACE_MODE_MOCA)
  763. priv->moca_port = port_num;
  764. }
  765. }
  766. static int bcm_sf2_sw_setup(struct dsa_switch *ds)
  767. {
  768. const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
  769. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  770. struct device_node *dn;
  771. void __iomem **base;
  772. unsigned int port;
  773. unsigned int i;
  774. u32 reg, rev;
  775. int ret;
  776. spin_lock_init(&priv->indir_lock);
  777. mutex_init(&priv->stats_mutex);
  778. /* All the interesting properties are at the parent device_node
  779. * level
  780. */
  781. dn = ds->pd->of_node->parent;
  782. bcm_sf2_identify_ports(priv, ds->pd->of_node);
  783. priv->irq0 = irq_of_parse_and_map(dn, 0);
  784. priv->irq1 = irq_of_parse_and_map(dn, 1);
  785. base = &priv->core;
  786. for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
  787. *base = of_iomap(dn, i);
  788. if (*base == NULL) {
  789. pr_err("unable to find register: %s\n", reg_names[i]);
  790. ret = -ENOMEM;
  791. goto out_unmap;
  792. }
  793. base++;
  794. }
  795. ret = bcm_sf2_sw_rst(priv);
  796. if (ret) {
  797. pr_err("unable to software reset switch: %d\n", ret);
  798. goto out_unmap;
  799. }
  800. /* Disable all interrupts and request them */
  801. bcm_sf2_intr_disable(priv);
  802. ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0,
  803. "switch_0", priv);
  804. if (ret < 0) {
  805. pr_err("failed to request switch_0 IRQ\n");
  806. goto out_unmap;
  807. }
  808. ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0,
  809. "switch_1", priv);
  810. if (ret < 0) {
  811. pr_err("failed to request switch_1 IRQ\n");
  812. goto out_free_irq0;
  813. }
  814. /* Reset the MIB counters */
  815. reg = core_readl(priv, CORE_GMNCFGCFG);
  816. reg |= RST_MIB_CNT;
  817. core_writel(priv, reg, CORE_GMNCFGCFG);
  818. reg &= ~RST_MIB_CNT;
  819. core_writel(priv, reg, CORE_GMNCFGCFG);
  820. /* Get the maximum number of ports for this switch */
  821. priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
  822. if (priv->hw_params.num_ports > DSA_MAX_PORTS)
  823. priv->hw_params.num_ports = DSA_MAX_PORTS;
  824. /* Assume a single GPHY setup if we can't read that property */
  825. if (of_property_read_u32(dn, "brcm,num-gphy",
  826. &priv->hw_params.num_gphy))
  827. priv->hw_params.num_gphy = 1;
  828. /* Enable all valid ports and disable those unused */
  829. for (port = 0; port < priv->hw_params.num_ports; port++) {
  830. /* IMP port receives special treatment */
  831. if ((1 << port) & ds->phys_port_mask)
  832. bcm_sf2_port_setup(ds, port, NULL);
  833. else if (dsa_is_cpu_port(ds, port))
  834. bcm_sf2_imp_setup(ds, port);
  835. else
  836. bcm_sf2_port_disable(ds, port, NULL);
  837. }
  838. /* Include the pseudo-PHY address and the broadcast PHY address to
  839. * divert reads towards our workaround. This is only required for
  840. * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
  841. * that we can use the regular SWITCH_MDIO master controller instead.
  842. *
  843. * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask
  844. * to have a 1:1 mapping between Port address and PHY address in order
  845. * to utilize the slave_mii_bus instance to read from Port PHYs. This is
  846. * not what we want here, so we initialize phys_mii_mask 0 to always
  847. * utilize the "master" MDIO bus backed by the "mdio-unimac" driver.
  848. */
  849. if (of_machine_is_compatible("brcm,bcm7445d0"))
  850. ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
  851. else
  852. ds->phys_mii_mask = 0;
  853. rev = reg_readl(priv, REG_SWITCH_REVISION);
  854. priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
  855. SWITCH_TOP_REV_MASK;
  856. priv->hw_params.core_rev = (rev & SF2_REV_MASK);
  857. rev = reg_readl(priv, REG_PHY_REVISION);
  858. priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
  859. pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
  860. priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
  861. priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
  862. priv->core, priv->irq0, priv->irq1);
  863. return 0;
  864. out_free_irq0:
  865. free_irq(priv->irq0, priv);
  866. out_unmap:
  867. base = &priv->core;
  868. for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
  869. if (*base)
  870. iounmap(*base);
  871. base++;
  872. }
  873. return ret;
  874. }
  875. static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr)
  876. {
  877. return 0;
  878. }
  879. static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
  880. {
  881. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  882. /* The BCM7xxx PHY driver expects to find the integrated PHY revision
  883. * in bits 15:8 and the patch level in bits 7:0 which is exactly what
  884. * the REG_PHY_REVISION register layout is.
  885. */
  886. return priv->hw_params.gphy_rev;
  887. }
  888. static int bcm_sf2_sw_indir_rw(struct dsa_switch *ds, int op, int addr,
  889. int regnum, u16 val)
  890. {
  891. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  892. int ret = 0;
  893. u32 reg;
  894. reg = reg_readl(priv, REG_SWITCH_CNTRL);
  895. reg |= MDIO_MASTER_SEL;
  896. reg_writel(priv, reg, REG_SWITCH_CNTRL);
  897. /* Page << 8 | offset */
  898. reg = 0x70;
  899. reg <<= 2;
  900. core_writel(priv, addr, reg);
  901. /* Page << 8 | offset */
  902. reg = 0x80 << 8 | regnum << 1;
  903. reg <<= 2;
  904. if (op)
  905. ret = core_readl(priv, reg);
  906. else
  907. core_writel(priv, val, reg);
  908. reg = reg_readl(priv, REG_SWITCH_CNTRL);
  909. reg &= ~MDIO_MASTER_SEL;
  910. reg_writel(priv, reg, REG_SWITCH_CNTRL);
  911. return ret & 0xffff;
  912. }
  913. static int bcm_sf2_sw_phy_read(struct dsa_switch *ds, int addr, int regnum)
  914. {
  915. /* Intercept reads from the MDIO broadcast address or Broadcom
  916. * pseudo-PHY address
  917. */
  918. switch (addr) {
  919. case 0:
  920. case BRCM_PSEUDO_PHY_ADDR:
  921. return bcm_sf2_sw_indir_rw(ds, 1, addr, regnum, 0);
  922. default:
  923. return 0xffff;
  924. }
  925. }
  926. static int bcm_sf2_sw_phy_write(struct dsa_switch *ds, int addr, int regnum,
  927. u16 val)
  928. {
  929. /* Intercept writes to the MDIO broadcast address or Broadcom
  930. * pseudo-PHY address
  931. */
  932. switch (addr) {
  933. case 0:
  934. case BRCM_PSEUDO_PHY_ADDR:
  935. bcm_sf2_sw_indir_rw(ds, 0, addr, regnum, val);
  936. break;
  937. }
  938. return 0;
  939. }
  940. static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
  941. struct phy_device *phydev)
  942. {
  943. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  944. u32 id_mode_dis = 0, port_mode;
  945. const char *str = NULL;
  946. u32 reg;
  947. switch (phydev->interface) {
  948. case PHY_INTERFACE_MODE_RGMII:
  949. str = "RGMII (no delay)";
  950. id_mode_dis = 1;
  951. case PHY_INTERFACE_MODE_RGMII_TXID:
  952. if (!str)
  953. str = "RGMII (TX delay)";
  954. port_mode = EXT_GPHY;
  955. break;
  956. case PHY_INTERFACE_MODE_MII:
  957. str = "MII";
  958. port_mode = EXT_EPHY;
  959. break;
  960. case PHY_INTERFACE_MODE_REVMII:
  961. str = "Reverse MII";
  962. port_mode = EXT_REVMII;
  963. break;
  964. default:
  965. /* All other PHYs: internal and MoCA */
  966. goto force_link;
  967. }
  968. /* If the link is down, just disable the interface to conserve power */
  969. if (!phydev->link) {
  970. reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
  971. reg &= ~RGMII_MODE_EN;
  972. reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
  973. goto force_link;
  974. }
  975. /* Clear id_mode_dis bit, and the existing port mode, but
  976. * make sure we enable the RGMII block for data to pass
  977. */
  978. reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
  979. reg &= ~ID_MODE_DIS;
  980. reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
  981. reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
  982. reg |= port_mode | RGMII_MODE_EN;
  983. if (id_mode_dis)
  984. reg |= ID_MODE_DIS;
  985. if (phydev->pause) {
  986. if (phydev->asym_pause)
  987. reg |= TX_PAUSE_EN;
  988. reg |= RX_PAUSE_EN;
  989. }
  990. reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
  991. pr_info("Port %d configured for %s\n", port, str);
  992. force_link:
  993. /* Force link settings detected from the PHY */
  994. reg = SW_OVERRIDE;
  995. switch (phydev->speed) {
  996. case SPEED_1000:
  997. reg |= SPDSTS_1000 << SPEED_SHIFT;
  998. break;
  999. case SPEED_100:
  1000. reg |= SPDSTS_100 << SPEED_SHIFT;
  1001. break;
  1002. }
  1003. if (phydev->link)
  1004. reg |= LINK_STS;
  1005. if (phydev->duplex == DUPLEX_FULL)
  1006. reg |= DUPLX_MODE;
  1007. core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
  1008. }
  1009. static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
  1010. struct fixed_phy_status *status)
  1011. {
  1012. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1013. u32 duplex, pause;
  1014. u32 reg;
  1015. duplex = core_readl(priv, CORE_DUPSTS);
  1016. pause = core_readl(priv, CORE_PAUSESTS);
  1017. status->link = 0;
  1018. /* MoCA port is special as we do not get link status from CORE_LNKSTS,
  1019. * which means that we need to force the link at the port override
  1020. * level to get the data to flow. We do use what the interrupt handler
  1021. * did determine before.
  1022. *
  1023. * For the other ports, we just force the link status, since this is
  1024. * a fixed PHY device.
  1025. */
  1026. if (port == priv->moca_port) {
  1027. status->link = priv->port_sts[port].link;
  1028. /* For MoCA interfaces, also force a link down notification
  1029. * since some version of the user-space daemon (mocad) use
  1030. * cmd->autoneg to force the link, which messes up the PHY
  1031. * state machine and make it go in PHY_FORCING state instead.
  1032. */
  1033. if (!status->link)
  1034. netif_carrier_off(ds->ports[port]);
  1035. status->duplex = 1;
  1036. } else {
  1037. status->link = 1;
  1038. status->duplex = !!(duplex & (1 << port));
  1039. }
  1040. reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port));
  1041. reg |= SW_OVERRIDE;
  1042. if (status->link)
  1043. reg |= LINK_STS;
  1044. else
  1045. reg &= ~LINK_STS;
  1046. core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
  1047. if ((pause & (1 << port)) &&
  1048. (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
  1049. status->asym_pause = 1;
  1050. status->pause = 1;
  1051. }
  1052. if (pause & (1 << port))
  1053. status->pause = 1;
  1054. }
  1055. static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
  1056. {
  1057. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1058. unsigned int port;
  1059. bcm_sf2_intr_disable(priv);
  1060. /* Disable all ports physically present including the IMP
  1061. * port, the other ones have already been disabled during
  1062. * bcm_sf2_sw_setup
  1063. */
  1064. for (port = 0; port < DSA_MAX_PORTS; port++) {
  1065. if ((1 << port) & ds->phys_port_mask ||
  1066. dsa_is_cpu_port(ds, port))
  1067. bcm_sf2_port_disable(ds, port, NULL);
  1068. }
  1069. return 0;
  1070. }
  1071. static int bcm_sf2_sw_resume(struct dsa_switch *ds)
  1072. {
  1073. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1074. unsigned int port;
  1075. int ret;
  1076. ret = bcm_sf2_sw_rst(priv);
  1077. if (ret) {
  1078. pr_err("%s: failed to software reset switch\n", __func__);
  1079. return ret;
  1080. }
  1081. if (priv->hw_params.num_gphy == 1)
  1082. bcm_sf2_gphy_enable_set(ds, true);
  1083. for (port = 0; port < DSA_MAX_PORTS; port++) {
  1084. if ((1 << port) & ds->phys_port_mask)
  1085. bcm_sf2_port_setup(ds, port, NULL);
  1086. else if (dsa_is_cpu_port(ds, port))
  1087. bcm_sf2_imp_setup(ds, port);
  1088. }
  1089. return 0;
  1090. }
  1091. static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
  1092. struct ethtool_wolinfo *wol)
  1093. {
  1094. struct net_device *p = ds->dst[ds->index].master_netdev;
  1095. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1096. struct ethtool_wolinfo pwol;
  1097. /* Get the parent device WoL settings */
  1098. p->ethtool_ops->get_wol(p, &pwol);
  1099. /* Advertise the parent device supported settings */
  1100. wol->supported = pwol.supported;
  1101. memset(&wol->sopass, 0, sizeof(wol->sopass));
  1102. if (pwol.wolopts & WAKE_MAGICSECURE)
  1103. memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));
  1104. if (priv->wol_ports_mask & (1 << port))
  1105. wol->wolopts = pwol.wolopts;
  1106. else
  1107. wol->wolopts = 0;
  1108. }
  1109. static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
  1110. struct ethtool_wolinfo *wol)
  1111. {
  1112. struct net_device *p = ds->dst[ds->index].master_netdev;
  1113. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1114. s8 cpu_port = ds->dst[ds->index].cpu_port;
  1115. struct ethtool_wolinfo pwol;
  1116. p->ethtool_ops->get_wol(p, &pwol);
  1117. if (wol->wolopts & ~pwol.supported)
  1118. return -EINVAL;
  1119. if (wol->wolopts)
  1120. priv->wol_ports_mask |= (1 << port);
  1121. else
  1122. priv->wol_ports_mask &= ~(1 << port);
  1123. /* If we have at least one port enabled, make sure the CPU port
  1124. * is also enabled. If the CPU port is the last one enabled, we disable
  1125. * it since this configuration does not make sense.
  1126. */
  1127. if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
  1128. priv->wol_ports_mask |= (1 << cpu_port);
  1129. else
  1130. priv->wol_ports_mask &= ~(1 << cpu_port);
  1131. return p->ethtool_ops->set_wol(p, wol);
  1132. }
  1133. static struct dsa_switch_driver bcm_sf2_switch_driver = {
  1134. .tag_protocol = DSA_TAG_PROTO_BRCM,
  1135. .priv_size = sizeof(struct bcm_sf2_priv),
  1136. .probe = bcm_sf2_sw_probe,
  1137. .setup = bcm_sf2_sw_setup,
  1138. .set_addr = bcm_sf2_sw_set_addr,
  1139. .get_phy_flags = bcm_sf2_sw_get_phy_flags,
  1140. .phy_read = bcm_sf2_sw_phy_read,
  1141. .phy_write = bcm_sf2_sw_phy_write,
  1142. .get_strings = bcm_sf2_sw_get_strings,
  1143. .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats,
  1144. .get_sset_count = bcm_sf2_sw_get_sset_count,
  1145. .adjust_link = bcm_sf2_sw_adjust_link,
  1146. .fixed_link_update = bcm_sf2_sw_fixed_link_update,
  1147. .suspend = bcm_sf2_sw_suspend,
  1148. .resume = bcm_sf2_sw_resume,
  1149. .get_wol = bcm_sf2_sw_get_wol,
  1150. .set_wol = bcm_sf2_sw_set_wol,
  1151. .port_enable = bcm_sf2_port_setup,
  1152. .port_disable = bcm_sf2_port_disable,
  1153. .get_eee = bcm_sf2_sw_get_eee,
  1154. .set_eee = bcm_sf2_sw_set_eee,
  1155. .port_bridge_join = bcm_sf2_sw_br_join,
  1156. .port_bridge_leave = bcm_sf2_sw_br_leave,
  1157. .port_stp_update = bcm_sf2_sw_br_set_stp_state,
  1158. .port_fdb_prepare = bcm_sf2_sw_fdb_prepare,
  1159. .port_fdb_add = bcm_sf2_sw_fdb_add,
  1160. .port_fdb_del = bcm_sf2_sw_fdb_del,
  1161. .port_fdb_dump = bcm_sf2_sw_fdb_dump,
  1162. };
  1163. static int __init bcm_sf2_init(void)
  1164. {
  1165. register_switch_driver(&bcm_sf2_switch_driver);
  1166. return 0;
  1167. }
  1168. module_init(bcm_sf2_init);
  1169. static void __exit bcm_sf2_exit(void)
  1170. {
  1171. unregister_switch_driver(&bcm_sf2_switch_driver);
  1172. }
  1173. module_exit(bcm_sf2_exit);
  1174. MODULE_AUTHOR("Broadcom Corporation");
  1175. MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
  1176. MODULE_LICENSE("GPL");
  1177. MODULE_ALIAS("platform:brcm-sf2");