bcm_sf2.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777
  1. /*
  2. * Broadcom Starfighter 2 DSA switch driver
  3. *
  4. * Copyright (C) 2014, Broadcom Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/list.h>
  12. #include <linux/module.h>
  13. #include <linux/netdevice.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/of.h>
  17. #include <linux/phy.h>
  18. #include <linux/phy_fixed.h>
  19. #include <linux/mii.h>
  20. #include <linux/of.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/of_address.h>
  23. #include <linux/of_net.h>
  24. #include <linux/of_mdio.h>
  25. #include <net/dsa.h>
  26. #include <linux/ethtool.h>
  27. #include <linux/if_bridge.h>
  28. #include <linux/brcmphy.h>
  29. #include <linux/etherdevice.h>
  30. #include <net/switchdev.h>
  31. #include "bcm_sf2.h"
  32. #include "bcm_sf2_regs.h"
  33. /* String, offset, and register size in bytes if different from 4 bytes */
  34. static const struct bcm_sf2_hw_stats bcm_sf2_mib[] = {
  35. { "TxOctets", 0x000, 8 },
  36. { "TxDropPkts", 0x020 },
  37. { "TxQPKTQ0", 0x030 },
  38. { "TxBroadcastPkts", 0x040 },
  39. { "TxMulticastPkts", 0x050 },
  40. { "TxUnicastPKts", 0x060 },
  41. { "TxCollisions", 0x070 },
  42. { "TxSingleCollision", 0x080 },
  43. { "TxMultipleCollision", 0x090 },
  44. { "TxDeferredCollision", 0x0a0 },
  45. { "TxLateCollision", 0x0b0 },
  46. { "TxExcessiveCollision", 0x0c0 },
  47. { "TxFrameInDisc", 0x0d0 },
  48. { "TxPausePkts", 0x0e0 },
  49. { "TxQPKTQ1", 0x0f0 },
  50. { "TxQPKTQ2", 0x100 },
  51. { "TxQPKTQ3", 0x110 },
  52. { "TxQPKTQ4", 0x120 },
  53. { "TxQPKTQ5", 0x130 },
  54. { "RxOctets", 0x140, 8 },
  55. { "RxUndersizePkts", 0x160 },
  56. { "RxPausePkts", 0x170 },
  57. { "RxPkts64Octets", 0x180 },
  58. { "RxPkts65to127Octets", 0x190 },
  59. { "RxPkts128to255Octets", 0x1a0 },
  60. { "RxPkts256to511Octets", 0x1b0 },
  61. { "RxPkts512to1023Octets", 0x1c0 },
  62. { "RxPkts1024toMaxPktsOctets", 0x1d0 },
  63. { "RxOversizePkts", 0x1e0 },
  64. { "RxJabbers", 0x1f0 },
  65. { "RxAlignmentErrors", 0x200 },
  66. { "RxFCSErrors", 0x210 },
  67. { "RxGoodOctets", 0x220, 8 },
  68. { "RxDropPkts", 0x240 },
  69. { "RxUnicastPkts", 0x250 },
  70. { "RxMulticastPkts", 0x260 },
  71. { "RxBroadcastPkts", 0x270 },
  72. { "RxSAChanges", 0x280 },
  73. { "RxFragments", 0x290 },
  74. { "RxJumboPkt", 0x2a0 },
  75. { "RxSymblErr", 0x2b0 },
  76. { "InRangeErrCount", 0x2c0 },
  77. { "OutRangeErrCount", 0x2d0 },
  78. { "EEELpiEvent", 0x2e0 },
  79. { "EEELpiDuration", 0x2f0 },
  80. { "RxDiscard", 0x300, 8 },
  81. { "TxQPKTQ6", 0x320 },
  82. { "TxQPKTQ7", 0x330 },
  83. { "TxPkts64Octets", 0x340 },
  84. { "TxPkts65to127Octets", 0x350 },
  85. { "TxPkts128to255Octets", 0x360 },
  86. { "TxPkts256to511Ocets", 0x370 },
  87. { "TxPkts512to1023Ocets", 0x380 },
  88. { "TxPkts1024toMaxPktOcets", 0x390 },
  89. };
  90. #define BCM_SF2_STATS_SIZE ARRAY_SIZE(bcm_sf2_mib)
  91. static void bcm_sf2_sw_get_strings(struct dsa_switch *ds,
  92. int port, uint8_t *data)
  93. {
  94. unsigned int i;
  95. for (i = 0; i < BCM_SF2_STATS_SIZE; i++)
  96. memcpy(data + i * ETH_GSTRING_LEN,
  97. bcm_sf2_mib[i].string, ETH_GSTRING_LEN);
  98. }
  99. static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds,
  100. int port, uint64_t *data)
  101. {
  102. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  103. const struct bcm_sf2_hw_stats *s;
  104. unsigned int i;
  105. u64 val = 0;
  106. u32 offset;
  107. mutex_lock(&priv->stats_mutex);
  108. /* Now fetch the per-port counters */
  109. for (i = 0; i < BCM_SF2_STATS_SIZE; i++) {
  110. s = &bcm_sf2_mib[i];
  111. /* Do a latched 64-bit read if needed */
  112. offset = s->reg + CORE_P_MIB_OFFSET(port);
  113. if (s->sizeof_stat == 8)
  114. val = core_readq(priv, offset);
  115. else
  116. val = core_readl(priv, offset);
  117. data[i] = (u64)val;
  118. }
  119. mutex_unlock(&priv->stats_mutex);
  120. }
  121. static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds)
  122. {
  123. return BCM_SF2_STATS_SIZE;
  124. }
  125. static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
  126. {
  127. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  128. unsigned int i;
  129. u32 reg;
  130. /* Enable the IMP Port to be in the same VLAN as the other ports
  131. * on a per-port basis such that we only have Port i and IMP in
  132. * the same VLAN.
  133. */
  134. for (i = 0; i < priv->hw_params.num_ports; i++) {
  135. if (!((1 << i) & ds->enabled_port_mask))
  136. continue;
  137. reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
  138. reg |= (1 << cpu_port);
  139. core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
  140. }
  141. }
  142. static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
  143. {
  144. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  145. u32 reg, val;
  146. /* Enable the port memories */
  147. reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
  148. reg &= ~P_TXQ_PSM_VDD(port);
  149. core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
  150. /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
  151. reg = core_readl(priv, CORE_IMP_CTL);
  152. reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
  153. reg &= ~(RX_DIS | TX_DIS);
  154. core_writel(priv, reg, CORE_IMP_CTL);
  155. /* Enable forwarding */
  156. core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
  157. /* Enable IMP port in dumb mode */
  158. reg = core_readl(priv, CORE_SWITCH_CTRL);
  159. reg |= MII_DUMB_FWDG_EN;
  160. core_writel(priv, reg, CORE_SWITCH_CTRL);
  161. /* Resolve which bit controls the Broadcom tag */
  162. switch (port) {
  163. case 8:
  164. val = BRCM_HDR_EN_P8;
  165. break;
  166. case 7:
  167. val = BRCM_HDR_EN_P7;
  168. break;
  169. case 5:
  170. val = BRCM_HDR_EN_P5;
  171. break;
  172. default:
  173. val = 0;
  174. break;
  175. }
  176. /* Enable Broadcom tags for IMP port */
  177. reg = core_readl(priv, CORE_BRCM_HDR_CTRL);
  178. reg |= val;
  179. core_writel(priv, reg, CORE_BRCM_HDR_CTRL);
  180. /* Enable reception Broadcom tag for CPU TX (switch RX) to
  181. * allow us to tag outgoing frames
  182. */
  183. reg = core_readl(priv, CORE_BRCM_HDR_RX_DIS);
  184. reg &= ~(1 << port);
  185. core_writel(priv, reg, CORE_BRCM_HDR_RX_DIS);
  186. /* Enable transmission of Broadcom tags from the switch (CPU RX) to
  187. * allow delivering frames to the per-port net_devices
  188. */
  189. reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS);
  190. reg &= ~(1 << port);
  191. core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS);
  192. /* Force link status for IMP port */
  193. reg = core_readl(priv, CORE_STS_OVERRIDE_IMP);
  194. reg |= (MII_SW_OR | LINK_STS);
  195. core_writel(priv, reg, CORE_STS_OVERRIDE_IMP);
  196. }
  197. static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
  198. {
  199. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  200. u32 reg;
  201. reg = core_readl(priv, CORE_EEE_EN_CTRL);
  202. if (enable)
  203. reg |= 1 << port;
  204. else
  205. reg &= ~(1 << port);
  206. core_writel(priv, reg, CORE_EEE_EN_CTRL);
  207. }
  208. static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
  209. {
  210. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  211. u32 reg;
  212. reg = reg_readl(priv, REG_SPHY_CNTRL);
  213. if (enable) {
  214. reg |= PHY_RESET;
  215. reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS);
  216. reg_writel(priv, reg, REG_SPHY_CNTRL);
  217. udelay(21);
  218. reg = reg_readl(priv, REG_SPHY_CNTRL);
  219. reg &= ~PHY_RESET;
  220. } else {
  221. reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
  222. reg_writel(priv, reg, REG_SPHY_CNTRL);
  223. mdelay(1);
  224. reg |= CK25_DIS;
  225. }
  226. reg_writel(priv, reg, REG_SPHY_CNTRL);
  227. /* Use PHY-driven LED signaling */
  228. if (!enable) {
  229. reg = reg_readl(priv, REG_LED_CNTRL(0));
  230. reg |= SPDLNK_SRC_SEL;
  231. reg_writel(priv, reg, REG_LED_CNTRL(0));
  232. }
  233. }
  234. static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv,
  235. int port)
  236. {
  237. unsigned int off;
  238. switch (port) {
  239. case 7:
  240. off = P7_IRQ_OFF;
  241. break;
  242. case 0:
  243. /* Port 0 interrupts are located on the first bank */
  244. intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF));
  245. return;
  246. default:
  247. off = P_IRQ_OFF(port);
  248. break;
  249. }
  250. intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
  251. }
  252. static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
  253. int port)
  254. {
  255. unsigned int off;
  256. switch (port) {
  257. case 7:
  258. off = P7_IRQ_OFF;
  259. break;
  260. case 0:
  261. /* Port 0 interrupts are located on the first bank */
  262. intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
  263. intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR);
  264. return;
  265. default:
  266. off = P_IRQ_OFF(port);
  267. break;
  268. }
  269. intrl2_1_mask_set(priv, P_IRQ_MASK(off));
  270. intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR);
  271. }
  272. static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
  273. struct phy_device *phy)
  274. {
  275. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  276. s8 cpu_port = ds->dst[ds->index].cpu_port;
  277. u32 reg;
  278. /* Clear the memory power down */
  279. reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
  280. reg &= ~P_TXQ_PSM_VDD(port);
  281. core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
  282. /* Clear the Rx and Tx disable bits and set to no spanning tree */
  283. core_writel(priv, 0, CORE_G_PCTL_PORT(port));
  284. /* Re-enable the GPHY and re-apply workarounds */
  285. if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
  286. bcm_sf2_gphy_enable_set(ds, true);
  287. if (phy) {
  288. /* if phy_stop() has been called before, phy
  289. * will be in halted state, and phy_start()
  290. * will call resume.
  291. *
  292. * the resume path does not configure back
  293. * autoneg settings, and since we hard reset
  294. * the phy manually here, we need to reset the
  295. * state machine also.
  296. */
  297. phy->state = PHY_READY;
  298. phy_init_hw(phy);
  299. }
  300. }
  301. /* Enable MoCA port interrupts to get notified */
  302. if (port == priv->moca_port)
  303. bcm_sf2_port_intr_enable(priv, port);
  304. /* Set this port, and only this one to be in the default VLAN,
  305. * if member of a bridge, restore its membership prior to
  306. * bringing down this port.
  307. */
  308. reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
  309. reg &= ~PORT_VLAN_CTRL_MASK;
  310. reg |= (1 << port);
  311. reg |= priv->port_sts[port].vlan_ctl_mask;
  312. core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port));
  313. bcm_sf2_imp_vlan_setup(ds, cpu_port);
  314. /* If EEE was enabled, restore it */
  315. if (priv->port_sts[port].eee.eee_enabled)
  316. bcm_sf2_eee_enable_set(ds, port, true);
  317. return 0;
  318. }
  319. static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
  320. struct phy_device *phy)
  321. {
  322. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  323. u32 off, reg;
  324. if (priv->wol_ports_mask & (1 << port))
  325. return;
  326. if (port == priv->moca_port)
  327. bcm_sf2_port_intr_disable(priv, port);
  328. if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
  329. bcm_sf2_gphy_enable_set(ds, false);
  330. if (dsa_is_cpu_port(ds, port))
  331. off = CORE_IMP_CTL;
  332. else
  333. off = CORE_G_PCTL_PORT(port);
  334. reg = core_readl(priv, off);
  335. reg |= RX_DIS | TX_DIS;
  336. core_writel(priv, reg, off);
  337. /* Power down the port memory */
  338. reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
  339. reg |= P_TXQ_PSM_VDD(port);
  340. core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
  341. }
  342. /* Returns 0 if EEE was not enabled, or 1 otherwise
  343. */
  344. static int bcm_sf2_eee_init(struct dsa_switch *ds, int port,
  345. struct phy_device *phy)
  346. {
  347. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  348. struct ethtool_eee *p = &priv->port_sts[port].eee;
  349. int ret;
  350. p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full);
  351. ret = phy_init_eee(phy, 0);
  352. if (ret)
  353. return 0;
  354. bcm_sf2_eee_enable_set(ds, port, true);
  355. return 1;
  356. }
  357. static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port,
  358. struct ethtool_eee *e)
  359. {
  360. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  361. struct ethtool_eee *p = &priv->port_sts[port].eee;
  362. u32 reg;
  363. reg = core_readl(priv, CORE_EEE_LPI_INDICATE);
  364. e->eee_enabled = p->eee_enabled;
  365. e->eee_active = !!(reg & (1 << port));
  366. return 0;
  367. }
  368. static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port,
  369. struct phy_device *phydev,
  370. struct ethtool_eee *e)
  371. {
  372. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  373. struct ethtool_eee *p = &priv->port_sts[port].eee;
  374. p->eee_enabled = e->eee_enabled;
  375. if (!p->eee_enabled) {
  376. bcm_sf2_eee_enable_set(ds, port, false);
  377. } else {
  378. p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
  379. if (!p->eee_enabled)
  380. return -EOPNOTSUPP;
  381. }
  382. return 0;
  383. }
  384. static int bcm_sf2_fast_age_op(struct bcm_sf2_priv *priv)
  385. {
  386. unsigned int timeout = 1000;
  387. u32 reg;
  388. reg = core_readl(priv, CORE_FAST_AGE_CTRL);
  389. reg |= EN_AGE_PORT | EN_AGE_VLAN | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
  390. core_writel(priv, reg, CORE_FAST_AGE_CTRL);
  391. do {
  392. reg = core_readl(priv, CORE_FAST_AGE_CTRL);
  393. if (!(reg & FAST_AGE_STR_DONE))
  394. break;
  395. cpu_relax();
  396. } while (timeout--);
  397. if (!timeout)
  398. return -ETIMEDOUT;
  399. core_writel(priv, 0, CORE_FAST_AGE_CTRL);
  400. return 0;
  401. }
  402. /* Fast-ageing of ARL entries for a given port, equivalent to an ARL
  403. * flush for that port.
  404. */
  405. static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
  406. {
  407. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  408. core_writel(priv, port, CORE_FAST_AGE_PORT);
  409. return bcm_sf2_fast_age_op(priv);
  410. }
  411. static int bcm_sf2_sw_fast_age_vlan(struct bcm_sf2_priv *priv, u16 vid)
  412. {
  413. core_writel(priv, vid, CORE_FAST_AGE_VID);
  414. return bcm_sf2_fast_age_op(priv);
  415. }
  416. static int bcm_sf2_vlan_op_wait(struct bcm_sf2_priv *priv)
  417. {
  418. unsigned int timeout = 10;
  419. u32 reg;
  420. do {
  421. reg = core_readl(priv, CORE_ARLA_VTBL_RWCTRL);
  422. if (!(reg & ARLA_VTBL_STDN))
  423. return 0;
  424. usleep_range(1000, 2000);
  425. } while (timeout--);
  426. return -ETIMEDOUT;
  427. }
  428. static int bcm_sf2_vlan_op(struct bcm_sf2_priv *priv, u8 op)
  429. {
  430. core_writel(priv, ARLA_VTBL_STDN | op, CORE_ARLA_VTBL_RWCTRL);
  431. return bcm_sf2_vlan_op_wait(priv);
  432. }
  433. static void bcm_sf2_set_vlan_entry(struct bcm_sf2_priv *priv, u16 vid,
  434. struct bcm_sf2_vlan *vlan)
  435. {
  436. int ret;
  437. core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR);
  438. core_writel(priv, vlan->untag << UNTAG_MAP_SHIFT | vlan->members,
  439. CORE_ARLA_VTBL_ENTRY);
  440. ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_WRITE);
  441. if (ret)
  442. pr_err("failed to write VLAN entry\n");
  443. }
  444. static int bcm_sf2_get_vlan_entry(struct bcm_sf2_priv *priv, u16 vid,
  445. struct bcm_sf2_vlan *vlan)
  446. {
  447. u32 entry;
  448. int ret;
  449. core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR);
  450. ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_READ);
  451. if (ret)
  452. return ret;
  453. entry = core_readl(priv, CORE_ARLA_VTBL_ENTRY);
  454. vlan->members = entry & FWD_MAP_MASK;
  455. vlan->untag = (entry >> UNTAG_MAP_SHIFT) & UNTAG_MAP_MASK;
  456. return 0;
  457. }
  458. static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port,
  459. struct net_device *bridge)
  460. {
  461. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  462. s8 cpu_port = ds->dst->cpu_port;
  463. unsigned int i;
  464. u32 reg, p_ctl;
  465. /* Make this port leave the all VLANs join since we will have proper
  466. * VLAN entries from now on
  467. */
  468. reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN);
  469. reg &= ~BIT(port);
  470. if ((reg & BIT(cpu_port)) == BIT(cpu_port))
  471. reg &= ~BIT(cpu_port);
  472. core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN);
  473. priv->port_sts[port].bridge_dev = bridge;
  474. p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
  475. for (i = 0; i < priv->hw_params.num_ports; i++) {
  476. if (priv->port_sts[i].bridge_dev != bridge)
  477. continue;
  478. /* Add this local port to the remote port VLAN control
  479. * membership and update the remote port bitmask
  480. */
  481. reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
  482. reg |= 1 << port;
  483. core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
  484. priv->port_sts[i].vlan_ctl_mask = reg;
  485. p_ctl |= 1 << i;
  486. }
  487. /* Configure the local port VLAN control membership to include
  488. * remote ports and update the local port bitmask
  489. */
  490. core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
  491. priv->port_sts[port].vlan_ctl_mask = p_ctl;
  492. return 0;
  493. }
  494. static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port)
  495. {
  496. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  497. struct net_device *bridge = priv->port_sts[port].bridge_dev;
  498. s8 cpu_port = ds->dst->cpu_port;
  499. unsigned int i;
  500. u32 reg, p_ctl;
  501. p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
  502. for (i = 0; i < priv->hw_params.num_ports; i++) {
  503. /* Don't touch the remaining ports */
  504. if (priv->port_sts[i].bridge_dev != bridge)
  505. continue;
  506. reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
  507. reg &= ~(1 << port);
  508. core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
  509. priv->port_sts[port].vlan_ctl_mask = reg;
  510. /* Prevent self removal to preserve isolation */
  511. if (port != i)
  512. p_ctl &= ~(1 << i);
  513. }
  514. core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
  515. priv->port_sts[port].vlan_ctl_mask = p_ctl;
  516. priv->port_sts[port].bridge_dev = NULL;
  517. /* Make this port join all VLANs without VLAN entries */
  518. reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN);
  519. reg |= BIT(port);
  520. if (!(reg & BIT(cpu_port)))
  521. reg |= BIT(cpu_port);
  522. core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN);
  523. }
  524. static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
  525. u8 state)
  526. {
  527. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  528. u8 hw_state, cur_hw_state;
  529. u32 reg;
  530. reg = core_readl(priv, CORE_G_PCTL_PORT(port));
  531. cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
  532. switch (state) {
  533. case BR_STATE_DISABLED:
  534. hw_state = G_MISTP_DIS_STATE;
  535. break;
  536. case BR_STATE_LISTENING:
  537. hw_state = G_MISTP_LISTEN_STATE;
  538. break;
  539. case BR_STATE_LEARNING:
  540. hw_state = G_MISTP_LEARN_STATE;
  541. break;
  542. case BR_STATE_FORWARDING:
  543. hw_state = G_MISTP_FWD_STATE;
  544. break;
  545. case BR_STATE_BLOCKING:
  546. hw_state = G_MISTP_BLOCK_STATE;
  547. break;
  548. default:
  549. pr_err("%s: invalid STP state: %d\n", __func__, state);
  550. return;
  551. }
  552. /* Fast-age ARL entries if we are moving a port from Learning or
  553. * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
  554. * state (hw_state)
  555. */
  556. if (cur_hw_state != hw_state) {
  557. if (cur_hw_state >= G_MISTP_LEARN_STATE &&
  558. hw_state <= G_MISTP_LISTEN_STATE) {
  559. if (bcm_sf2_sw_fast_age_port(ds, port)) {
  560. pr_err("%s: fast-ageing failed\n", __func__);
  561. return;
  562. }
  563. }
  564. }
  565. reg = core_readl(priv, CORE_G_PCTL_PORT(port));
  566. reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
  567. reg |= hw_state;
  568. core_writel(priv, reg, CORE_G_PCTL_PORT(port));
  569. }
  570. /* Address Resolution Logic routines */
  571. static int bcm_sf2_arl_op_wait(struct bcm_sf2_priv *priv)
  572. {
  573. unsigned int timeout = 10;
  574. u32 reg;
  575. do {
  576. reg = core_readl(priv, CORE_ARLA_RWCTL);
  577. if (!(reg & ARL_STRTDN))
  578. return 0;
  579. usleep_range(1000, 2000);
  580. } while (timeout--);
  581. return -ETIMEDOUT;
  582. }
  583. static int bcm_sf2_arl_rw_op(struct bcm_sf2_priv *priv, unsigned int op)
  584. {
  585. u32 cmd;
  586. if (op > ARL_RW)
  587. return -EINVAL;
  588. cmd = core_readl(priv, CORE_ARLA_RWCTL);
  589. cmd &= ~IVL_SVL_SELECT;
  590. cmd |= ARL_STRTDN;
  591. if (op)
  592. cmd |= ARL_RW;
  593. else
  594. cmd &= ~ARL_RW;
  595. core_writel(priv, cmd, CORE_ARLA_RWCTL);
  596. return bcm_sf2_arl_op_wait(priv);
  597. }
  598. static int bcm_sf2_arl_read(struct bcm_sf2_priv *priv, u64 mac,
  599. u16 vid, struct bcm_sf2_arl_entry *ent, u8 *idx,
  600. bool is_valid)
  601. {
  602. unsigned int i;
  603. int ret;
  604. ret = bcm_sf2_arl_op_wait(priv);
  605. if (ret)
  606. return ret;
  607. /* Read the 4 bins */
  608. for (i = 0; i < 4; i++) {
  609. u64 mac_vid;
  610. u32 fwd_entry;
  611. mac_vid = core_readq(priv, CORE_ARLA_MACVID_ENTRY(i));
  612. fwd_entry = core_readl(priv, CORE_ARLA_FWD_ENTRY(i));
  613. bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
  614. if (ent->is_valid && is_valid) {
  615. *idx = i;
  616. return 0;
  617. }
  618. /* This is the MAC we just deleted */
  619. if (!is_valid && (mac_vid & mac))
  620. return 0;
  621. }
  622. return -ENOENT;
  623. }
  624. static int bcm_sf2_arl_op(struct bcm_sf2_priv *priv, int op, int port,
  625. const unsigned char *addr, u16 vid, bool is_valid)
  626. {
  627. struct bcm_sf2_arl_entry ent;
  628. u32 fwd_entry;
  629. u64 mac, mac_vid = 0;
  630. u8 idx = 0;
  631. int ret;
  632. /* Convert the array into a 64-bit MAC */
  633. mac = bcm_sf2_mac_to_u64(addr);
  634. /* Perform a read for the given MAC and VID */
  635. core_writeq(priv, mac, CORE_ARLA_MAC);
  636. core_writel(priv, vid, CORE_ARLA_VID);
  637. /* Issue a read operation for this MAC */
  638. ret = bcm_sf2_arl_rw_op(priv, 1);
  639. if (ret)
  640. return ret;
  641. ret = bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
  642. /* If this is a read, just finish now */
  643. if (op)
  644. return ret;
  645. /* We could not find a matching MAC, so reset to a new entry */
  646. if (ret) {
  647. fwd_entry = 0;
  648. idx = 0;
  649. }
  650. memset(&ent, 0, sizeof(ent));
  651. ent.port = port;
  652. ent.is_valid = is_valid;
  653. ent.vid = vid;
  654. ent.is_static = true;
  655. memcpy(ent.mac, addr, ETH_ALEN);
  656. bcm_sf2_arl_from_entry(&mac_vid, &fwd_entry, &ent);
  657. core_writeq(priv, mac_vid, CORE_ARLA_MACVID_ENTRY(idx));
  658. core_writel(priv, fwd_entry, CORE_ARLA_FWD_ENTRY(idx));
  659. ret = bcm_sf2_arl_rw_op(priv, 0);
  660. if (ret)
  661. return ret;
  662. /* Re-read the entry to check */
  663. return bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
  664. }
  665. static int bcm_sf2_sw_fdb_prepare(struct dsa_switch *ds, int port,
  666. const struct switchdev_obj_port_fdb *fdb,
  667. struct switchdev_trans *trans)
  668. {
  669. /* We do not need to do anything specific here yet */
  670. return 0;
  671. }
  672. static void bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port,
  673. const struct switchdev_obj_port_fdb *fdb,
  674. struct switchdev_trans *trans)
  675. {
  676. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  677. if (bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true))
  678. pr_err("%s: failed to add MAC address\n", __func__);
  679. }
  680. static int bcm_sf2_sw_fdb_del(struct dsa_switch *ds, int port,
  681. const struct switchdev_obj_port_fdb *fdb)
  682. {
  683. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  684. return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, false);
  685. }
  686. static int bcm_sf2_arl_search_wait(struct bcm_sf2_priv *priv)
  687. {
  688. unsigned timeout = 1000;
  689. u32 reg;
  690. do {
  691. reg = core_readl(priv, CORE_ARLA_SRCH_CTL);
  692. if (!(reg & ARLA_SRCH_STDN))
  693. return 0;
  694. if (reg & ARLA_SRCH_VLID)
  695. return 0;
  696. usleep_range(1000, 2000);
  697. } while (timeout--);
  698. return -ETIMEDOUT;
  699. }
  700. static void bcm_sf2_arl_search_rd(struct bcm_sf2_priv *priv, u8 idx,
  701. struct bcm_sf2_arl_entry *ent)
  702. {
  703. u64 mac_vid;
  704. u32 fwd_entry;
  705. mac_vid = core_readq(priv, CORE_ARLA_SRCH_RSLT_MACVID(idx));
  706. fwd_entry = core_readl(priv, CORE_ARLA_SRCH_RSLT(idx));
  707. bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
  708. }
  709. static int bcm_sf2_sw_fdb_copy(struct net_device *dev, int port,
  710. const struct bcm_sf2_arl_entry *ent,
  711. struct switchdev_obj_port_fdb *fdb,
  712. int (*cb)(struct switchdev_obj *obj))
  713. {
  714. if (!ent->is_valid)
  715. return 0;
  716. if (port != ent->port)
  717. return 0;
  718. ether_addr_copy(fdb->addr, ent->mac);
  719. fdb->vid = ent->vid;
  720. fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE;
  721. return cb(&fdb->obj);
  722. }
  723. static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port,
  724. struct switchdev_obj_port_fdb *fdb,
  725. int (*cb)(struct switchdev_obj *obj))
  726. {
  727. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  728. struct net_device *dev = ds->ports[port].netdev;
  729. struct bcm_sf2_arl_entry results[2];
  730. unsigned int count = 0;
  731. int ret;
  732. /* Start search operation */
  733. core_writel(priv, ARLA_SRCH_STDN, CORE_ARLA_SRCH_CTL);
  734. do {
  735. ret = bcm_sf2_arl_search_wait(priv);
  736. if (ret)
  737. return ret;
  738. /* Read both entries, then return their values back */
  739. bcm_sf2_arl_search_rd(priv, 0, &results[0]);
  740. ret = bcm_sf2_sw_fdb_copy(dev, port, &results[0], fdb, cb);
  741. if (ret)
  742. return ret;
  743. bcm_sf2_arl_search_rd(priv, 1, &results[1]);
  744. ret = bcm_sf2_sw_fdb_copy(dev, port, &results[1], fdb, cb);
  745. if (ret)
  746. return ret;
  747. if (!results[0].is_valid && !results[1].is_valid)
  748. break;
  749. } while (count++ < CORE_ARLA_NUM_ENTRIES);
  750. return 0;
  751. }
  752. static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr,
  753. int regnum, u16 val)
  754. {
  755. int ret = 0;
  756. u32 reg;
  757. reg = reg_readl(priv, REG_SWITCH_CNTRL);
  758. reg |= MDIO_MASTER_SEL;
  759. reg_writel(priv, reg, REG_SWITCH_CNTRL);
  760. /* Page << 8 | offset */
  761. reg = 0x70;
  762. reg <<= 2;
  763. core_writel(priv, addr, reg);
  764. /* Page << 8 | offset */
  765. reg = 0x80 << 8 | regnum << 1;
  766. reg <<= 2;
  767. if (op)
  768. ret = core_readl(priv, reg);
  769. else
  770. core_writel(priv, val, reg);
  771. reg = reg_readl(priv, REG_SWITCH_CNTRL);
  772. reg &= ~MDIO_MASTER_SEL;
  773. reg_writel(priv, reg, REG_SWITCH_CNTRL);
  774. return ret & 0xffff;
  775. }
  776. static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
  777. {
  778. struct bcm_sf2_priv *priv = bus->priv;
  779. /* Intercept reads from Broadcom pseudo-PHY address, else, send
  780. * them to our master MDIO bus controller
  781. */
  782. if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
  783. return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
  784. else
  785. return mdiobus_read(priv->master_mii_bus, addr, regnum);
  786. }
  787. static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
  788. u16 val)
  789. {
  790. struct bcm_sf2_priv *priv = bus->priv;
  791. /* Intercept writes to the Broadcom pseudo-PHY address, else,
  792. * send them to our master MDIO bus controller
  793. */
  794. if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
  795. bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
  796. else
  797. mdiobus_write(priv->master_mii_bus, addr, regnum, val);
  798. return 0;
  799. }
  800. static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
  801. {
  802. struct bcm_sf2_priv *priv = dev_id;
  803. priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
  804. ~priv->irq0_mask;
  805. intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
  806. return IRQ_HANDLED;
  807. }
  808. static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
  809. {
  810. struct bcm_sf2_priv *priv = dev_id;
  811. priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
  812. ~priv->irq1_mask;
  813. intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
  814. if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF))
  815. priv->port_sts[7].link = 1;
  816. if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF))
  817. priv->port_sts[7].link = 0;
  818. return IRQ_HANDLED;
  819. }
  820. static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
  821. {
  822. unsigned int timeout = 1000;
  823. u32 reg;
  824. reg = core_readl(priv, CORE_WATCHDOG_CTRL);
  825. reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
  826. core_writel(priv, reg, CORE_WATCHDOG_CTRL);
  827. do {
  828. reg = core_readl(priv, CORE_WATCHDOG_CTRL);
  829. if (!(reg & SOFTWARE_RESET))
  830. break;
  831. usleep_range(1000, 2000);
  832. } while (timeout-- > 0);
  833. if (timeout == 0)
  834. return -ETIMEDOUT;
  835. return 0;
  836. }
  837. static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
  838. {
  839. intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
  840. intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
  841. intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
  842. intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
  843. intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
  844. intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
  845. }
  846. static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
  847. struct device_node *dn)
  848. {
  849. struct device_node *port;
  850. const char *phy_mode_str;
  851. int mode;
  852. unsigned int port_num;
  853. int ret;
  854. priv->moca_port = -1;
  855. for_each_available_child_of_node(dn, port) {
  856. if (of_property_read_u32(port, "reg", &port_num))
  857. continue;
  858. /* Internal PHYs get assigned a specific 'phy-mode' property
  859. * value: "internal" to help flag them before MDIO probing
  860. * has completed, since they might be turned off at that
  861. * time
  862. */
  863. mode = of_get_phy_mode(port);
  864. if (mode < 0) {
  865. ret = of_property_read_string(port, "phy-mode",
  866. &phy_mode_str);
  867. if (ret < 0)
  868. continue;
  869. if (!strcasecmp(phy_mode_str, "internal"))
  870. priv->int_phy_mask |= 1 << port_num;
  871. }
  872. if (mode == PHY_INTERFACE_MODE_MOCA)
  873. priv->moca_port = port_num;
  874. }
  875. }
  876. static int bcm_sf2_mdio_register(struct dsa_switch *ds)
  877. {
  878. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  879. struct device_node *dn;
  880. static int index;
  881. int err;
  882. /* Find our integrated MDIO bus node */
  883. dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
  884. priv->master_mii_bus = of_mdio_find_bus(dn);
  885. if (!priv->master_mii_bus)
  886. return -EPROBE_DEFER;
  887. get_device(&priv->master_mii_bus->dev);
  888. priv->master_mii_dn = dn;
  889. priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
  890. if (!priv->slave_mii_bus)
  891. return -ENOMEM;
  892. priv->slave_mii_bus->priv = priv;
  893. priv->slave_mii_bus->name = "sf2 slave mii";
  894. priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read;
  895. priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write;
  896. snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
  897. index++);
  898. priv->slave_mii_bus->dev.of_node = dn;
  899. /* Include the pseudo-PHY address to divert reads towards our
  900. * workaround. This is only required for 7445D0, since 7445E0
  901. * disconnects the internal switch pseudo-PHY such that we can use the
  902. * regular SWITCH_MDIO master controller instead.
  903. *
  904. * Here we flag the pseudo PHY as needing special treatment and would
  905. * otherwise make all other PHY read/writes go to the master MDIO bus
  906. * controller that comes with this switch backed by the "mdio-unimac"
  907. * driver.
  908. */
  909. if (of_machine_is_compatible("brcm,bcm7445d0"))
  910. priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR);
  911. else
  912. priv->indir_phy_mask = 0;
  913. ds->phys_mii_mask = priv->indir_phy_mask;
  914. ds->slave_mii_bus = priv->slave_mii_bus;
  915. priv->slave_mii_bus->parent = ds->dev->parent;
  916. priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
  917. if (dn)
  918. err = of_mdiobus_register(priv->slave_mii_bus, dn);
  919. else
  920. err = mdiobus_register(priv->slave_mii_bus);
  921. if (err)
  922. of_node_put(dn);
  923. return err;
  924. }
  925. static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
  926. {
  927. mdiobus_unregister(priv->slave_mii_bus);
  928. if (priv->master_mii_dn)
  929. of_node_put(priv->master_mii_dn);
  930. }
  931. static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr)
  932. {
  933. return 0;
  934. }
  935. static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
  936. {
  937. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  938. /* The BCM7xxx PHY driver expects to find the integrated PHY revision
  939. * in bits 15:8 and the patch level in bits 7:0 which is exactly what
  940. * the REG_PHY_REVISION register layout is.
  941. */
  942. return priv->hw_params.gphy_rev;
  943. }
  944. static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
  945. struct phy_device *phydev)
  946. {
  947. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  948. u32 id_mode_dis = 0, port_mode;
  949. const char *str = NULL;
  950. u32 reg;
  951. switch (phydev->interface) {
  952. case PHY_INTERFACE_MODE_RGMII:
  953. str = "RGMII (no delay)";
  954. id_mode_dis = 1;
  955. case PHY_INTERFACE_MODE_RGMII_TXID:
  956. if (!str)
  957. str = "RGMII (TX delay)";
  958. port_mode = EXT_GPHY;
  959. break;
  960. case PHY_INTERFACE_MODE_MII:
  961. str = "MII";
  962. port_mode = EXT_EPHY;
  963. break;
  964. case PHY_INTERFACE_MODE_REVMII:
  965. str = "Reverse MII";
  966. port_mode = EXT_REVMII;
  967. break;
  968. default:
  969. /* All other PHYs: internal and MoCA */
  970. goto force_link;
  971. }
  972. /* If the link is down, just disable the interface to conserve power */
  973. if (!phydev->link) {
  974. reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
  975. reg &= ~RGMII_MODE_EN;
  976. reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
  977. goto force_link;
  978. }
  979. /* Clear id_mode_dis bit, and the existing port mode, but
  980. * make sure we enable the RGMII block for data to pass
  981. */
  982. reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
  983. reg &= ~ID_MODE_DIS;
  984. reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
  985. reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
  986. reg |= port_mode | RGMII_MODE_EN;
  987. if (id_mode_dis)
  988. reg |= ID_MODE_DIS;
  989. if (phydev->pause) {
  990. if (phydev->asym_pause)
  991. reg |= TX_PAUSE_EN;
  992. reg |= RX_PAUSE_EN;
  993. }
  994. reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
  995. pr_info("Port %d configured for %s\n", port, str);
  996. force_link:
  997. /* Force link settings detected from the PHY */
  998. reg = SW_OVERRIDE;
  999. switch (phydev->speed) {
  1000. case SPEED_1000:
  1001. reg |= SPDSTS_1000 << SPEED_SHIFT;
  1002. break;
  1003. case SPEED_100:
  1004. reg |= SPDSTS_100 << SPEED_SHIFT;
  1005. break;
  1006. }
  1007. if (phydev->link)
  1008. reg |= LINK_STS;
  1009. if (phydev->duplex == DUPLEX_FULL)
  1010. reg |= DUPLX_MODE;
  1011. core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
  1012. }
  1013. static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
  1014. struct fixed_phy_status *status)
  1015. {
  1016. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1017. u32 duplex, pause;
  1018. u32 reg;
  1019. duplex = core_readl(priv, CORE_DUPSTS);
  1020. pause = core_readl(priv, CORE_PAUSESTS);
  1021. status->link = 0;
  1022. /* MoCA port is special as we do not get link status from CORE_LNKSTS,
  1023. * which means that we need to force the link at the port override
  1024. * level to get the data to flow. We do use what the interrupt handler
  1025. * did determine before.
  1026. *
  1027. * For the other ports, we just force the link status, since this is
  1028. * a fixed PHY device.
  1029. */
  1030. if (port == priv->moca_port) {
  1031. status->link = priv->port_sts[port].link;
  1032. /* For MoCA interfaces, also force a link down notification
  1033. * since some version of the user-space daemon (mocad) use
  1034. * cmd->autoneg to force the link, which messes up the PHY
  1035. * state machine and make it go in PHY_FORCING state instead.
  1036. */
  1037. if (!status->link)
  1038. netif_carrier_off(ds->ports[port].netdev);
  1039. status->duplex = 1;
  1040. } else {
  1041. status->link = 1;
  1042. status->duplex = !!(duplex & (1 << port));
  1043. }
  1044. reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port));
  1045. reg |= SW_OVERRIDE;
  1046. if (status->link)
  1047. reg |= LINK_STS;
  1048. else
  1049. reg &= ~LINK_STS;
  1050. core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
  1051. if ((pause & (1 << port)) &&
  1052. (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
  1053. status->asym_pause = 1;
  1054. status->pause = 1;
  1055. }
  1056. if (pause & (1 << port))
  1057. status->pause = 1;
  1058. }
  1059. static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
  1060. {
  1061. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1062. unsigned int port;
  1063. bcm_sf2_intr_disable(priv);
  1064. /* Disable all ports physically present including the IMP
  1065. * port, the other ones have already been disabled during
  1066. * bcm_sf2_sw_setup
  1067. */
  1068. for (port = 0; port < DSA_MAX_PORTS; port++) {
  1069. if ((1 << port) & ds->enabled_port_mask ||
  1070. dsa_is_cpu_port(ds, port))
  1071. bcm_sf2_port_disable(ds, port, NULL);
  1072. }
  1073. return 0;
  1074. }
  1075. static int bcm_sf2_sw_resume(struct dsa_switch *ds)
  1076. {
  1077. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1078. unsigned int port;
  1079. int ret;
  1080. ret = bcm_sf2_sw_rst(priv);
  1081. if (ret) {
  1082. pr_err("%s: failed to software reset switch\n", __func__);
  1083. return ret;
  1084. }
  1085. if (priv->hw_params.num_gphy == 1)
  1086. bcm_sf2_gphy_enable_set(ds, true);
  1087. for (port = 0; port < DSA_MAX_PORTS; port++) {
  1088. if ((1 << port) & ds->enabled_port_mask)
  1089. bcm_sf2_port_setup(ds, port, NULL);
  1090. else if (dsa_is_cpu_port(ds, port))
  1091. bcm_sf2_imp_setup(ds, port);
  1092. }
  1093. return 0;
  1094. }
  1095. static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
  1096. struct ethtool_wolinfo *wol)
  1097. {
  1098. struct net_device *p = ds->dst[ds->index].master_netdev;
  1099. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1100. struct ethtool_wolinfo pwol;
  1101. /* Get the parent device WoL settings */
  1102. p->ethtool_ops->get_wol(p, &pwol);
  1103. /* Advertise the parent device supported settings */
  1104. wol->supported = pwol.supported;
  1105. memset(&wol->sopass, 0, sizeof(wol->sopass));
  1106. if (pwol.wolopts & WAKE_MAGICSECURE)
  1107. memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));
  1108. if (priv->wol_ports_mask & (1 << port))
  1109. wol->wolopts = pwol.wolopts;
  1110. else
  1111. wol->wolopts = 0;
  1112. }
  1113. static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
  1114. struct ethtool_wolinfo *wol)
  1115. {
  1116. struct net_device *p = ds->dst[ds->index].master_netdev;
  1117. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1118. s8 cpu_port = ds->dst[ds->index].cpu_port;
  1119. struct ethtool_wolinfo pwol;
  1120. p->ethtool_ops->get_wol(p, &pwol);
  1121. if (wol->wolopts & ~pwol.supported)
  1122. return -EINVAL;
  1123. if (wol->wolopts)
  1124. priv->wol_ports_mask |= (1 << port);
  1125. else
  1126. priv->wol_ports_mask &= ~(1 << port);
  1127. /* If we have at least one port enabled, make sure the CPU port
  1128. * is also enabled. If the CPU port is the last one enabled, we disable
  1129. * it since this configuration does not make sense.
  1130. */
  1131. if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
  1132. priv->wol_ports_mask |= (1 << cpu_port);
  1133. else
  1134. priv->wol_ports_mask &= ~(1 << cpu_port);
  1135. return p->ethtool_ops->set_wol(p, wol);
  1136. }
  1137. static void bcm_sf2_enable_vlan(struct bcm_sf2_priv *priv, bool enable)
  1138. {
  1139. u32 mgmt, vc0, vc1, vc4, vc5;
  1140. mgmt = core_readl(priv, CORE_SWMODE);
  1141. vc0 = core_readl(priv, CORE_VLAN_CTRL0);
  1142. vc1 = core_readl(priv, CORE_VLAN_CTRL1);
  1143. vc4 = core_readl(priv, CORE_VLAN_CTRL4);
  1144. vc5 = core_readl(priv, CORE_VLAN_CTRL5);
  1145. mgmt &= ~SW_FWDG_MODE;
  1146. if (enable) {
  1147. vc0 |= VLAN_EN | VLAN_LEARN_MODE_IVL;
  1148. vc1 |= EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP;
  1149. vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT);
  1150. vc4 |= INGR_VID_CHK_DROP;
  1151. vc5 |= DROP_VTABLE_MISS | EN_VID_FFF_FWD;
  1152. } else {
  1153. vc0 &= ~(VLAN_EN | VLAN_LEARN_MODE_IVL);
  1154. vc1 &= ~(EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP);
  1155. vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT);
  1156. vc5 &= ~(DROP_VTABLE_MISS | EN_VID_FFF_FWD);
  1157. vc4 |= INGR_VID_CHK_VID_VIOL_IMP;
  1158. }
  1159. core_writel(priv, vc0, CORE_VLAN_CTRL0);
  1160. core_writel(priv, vc1, CORE_VLAN_CTRL1);
  1161. core_writel(priv, 0, CORE_VLAN_CTRL3);
  1162. core_writel(priv, vc4, CORE_VLAN_CTRL4);
  1163. core_writel(priv, vc5, CORE_VLAN_CTRL5);
  1164. core_writel(priv, mgmt, CORE_SWMODE);
  1165. }
  1166. static void bcm_sf2_sw_configure_vlan(struct dsa_switch *ds)
  1167. {
  1168. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1169. unsigned int port;
  1170. /* Clear all VLANs */
  1171. bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_CLEAR);
  1172. for (port = 0; port < priv->hw_params.num_ports; port++) {
  1173. if (!((1 << port) & ds->enabled_port_mask))
  1174. continue;
  1175. core_writel(priv, 1, CORE_DEFAULT_1Q_TAG_P(port));
  1176. }
  1177. }
  1178. static int bcm_sf2_sw_vlan_filtering(struct dsa_switch *ds, int port,
  1179. bool vlan_filtering)
  1180. {
  1181. return 0;
  1182. }
  1183. static int bcm_sf2_sw_vlan_prepare(struct dsa_switch *ds, int port,
  1184. const struct switchdev_obj_port_vlan *vlan,
  1185. struct switchdev_trans *trans)
  1186. {
  1187. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1188. bcm_sf2_enable_vlan(priv, true);
  1189. return 0;
  1190. }
  1191. static void bcm_sf2_sw_vlan_add(struct dsa_switch *ds, int port,
  1192. const struct switchdev_obj_port_vlan *vlan,
  1193. struct switchdev_trans *trans)
  1194. {
  1195. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1196. bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
  1197. bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
  1198. s8 cpu_port = ds->dst->cpu_port;
  1199. struct bcm_sf2_vlan *vl;
  1200. u16 vid;
  1201. for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
  1202. vl = &priv->vlans[vid];
  1203. bcm_sf2_get_vlan_entry(priv, vid, vl);
  1204. vl->members |= BIT(port) | BIT(cpu_port);
  1205. if (untagged)
  1206. vl->untag |= BIT(port) | BIT(cpu_port);
  1207. else
  1208. vl->untag &= ~(BIT(port) | BIT(cpu_port));
  1209. bcm_sf2_set_vlan_entry(priv, vid, vl);
  1210. bcm_sf2_sw_fast_age_vlan(priv, vid);
  1211. }
  1212. if (pvid) {
  1213. core_writel(priv, vlan->vid_end, CORE_DEFAULT_1Q_TAG_P(port));
  1214. core_writel(priv, vlan->vid_end,
  1215. CORE_DEFAULT_1Q_TAG_P(cpu_port));
  1216. bcm_sf2_sw_fast_age_vlan(priv, vid);
  1217. }
  1218. }
  1219. static int bcm_sf2_sw_vlan_del(struct dsa_switch *ds, int port,
  1220. const struct switchdev_obj_port_vlan *vlan)
  1221. {
  1222. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1223. bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
  1224. s8 cpu_port = ds->dst->cpu_port;
  1225. struct bcm_sf2_vlan *vl;
  1226. u16 vid, pvid;
  1227. int ret;
  1228. pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port));
  1229. for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
  1230. vl = &priv->vlans[vid];
  1231. ret = bcm_sf2_get_vlan_entry(priv, vid, vl);
  1232. if (ret)
  1233. return ret;
  1234. vl->members &= ~BIT(port);
  1235. if ((vl->members & BIT(cpu_port)) == BIT(cpu_port))
  1236. vl->members = 0;
  1237. if (pvid == vid)
  1238. pvid = 0;
  1239. if (untagged) {
  1240. vl->untag &= ~BIT(port);
  1241. if ((vl->untag & BIT(port)) == BIT(cpu_port))
  1242. vl->untag = 0;
  1243. }
  1244. bcm_sf2_set_vlan_entry(priv, vid, vl);
  1245. bcm_sf2_sw_fast_age_vlan(priv, vid);
  1246. }
  1247. core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(port));
  1248. core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(cpu_port));
  1249. bcm_sf2_sw_fast_age_vlan(priv, vid);
  1250. return 0;
  1251. }
  1252. static int bcm_sf2_sw_vlan_dump(struct dsa_switch *ds, int port,
  1253. struct switchdev_obj_port_vlan *vlan,
  1254. int (*cb)(struct switchdev_obj *obj))
  1255. {
  1256. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1257. struct bcm_sf2_port_status *p = &priv->port_sts[port];
  1258. struct bcm_sf2_vlan *vl;
  1259. u16 vid, pvid;
  1260. int err = 0;
  1261. pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port));
  1262. for (vid = 0; vid < VLAN_N_VID; vid++) {
  1263. vl = &priv->vlans[vid];
  1264. if (!(vl->members & BIT(port)))
  1265. continue;
  1266. vlan->vid_begin = vlan->vid_end = vid;
  1267. vlan->flags = 0;
  1268. if (vl->untag & BIT(port))
  1269. vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  1270. if (p->pvid == vid)
  1271. vlan->flags |= BRIDGE_VLAN_INFO_PVID;
  1272. err = cb(&vlan->obj);
  1273. if (err)
  1274. break;
  1275. }
  1276. return err;
  1277. }
  1278. static int bcm_sf2_sw_setup(struct dsa_switch *ds)
  1279. {
  1280. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1281. unsigned int port;
  1282. /* Enable all valid ports and disable those unused */
  1283. for (port = 0; port < priv->hw_params.num_ports; port++) {
  1284. /* IMP port receives special treatment */
  1285. if ((1 << port) & ds->enabled_port_mask)
  1286. bcm_sf2_port_setup(ds, port, NULL);
  1287. else if (dsa_is_cpu_port(ds, port))
  1288. bcm_sf2_imp_setup(ds, port);
  1289. else
  1290. bcm_sf2_port_disable(ds, port, NULL);
  1291. }
  1292. bcm_sf2_sw_configure_vlan(ds);
  1293. return 0;
  1294. }
  1295. static struct dsa_switch_driver bcm_sf2_switch_driver = {
  1296. .tag_protocol = DSA_TAG_PROTO_BRCM,
  1297. .setup = bcm_sf2_sw_setup,
  1298. .set_addr = bcm_sf2_sw_set_addr,
  1299. .get_phy_flags = bcm_sf2_sw_get_phy_flags,
  1300. .get_strings = bcm_sf2_sw_get_strings,
  1301. .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats,
  1302. .get_sset_count = bcm_sf2_sw_get_sset_count,
  1303. .adjust_link = bcm_sf2_sw_adjust_link,
  1304. .fixed_link_update = bcm_sf2_sw_fixed_link_update,
  1305. .suspend = bcm_sf2_sw_suspend,
  1306. .resume = bcm_sf2_sw_resume,
  1307. .get_wol = bcm_sf2_sw_get_wol,
  1308. .set_wol = bcm_sf2_sw_set_wol,
  1309. .port_enable = bcm_sf2_port_setup,
  1310. .port_disable = bcm_sf2_port_disable,
  1311. .get_eee = bcm_sf2_sw_get_eee,
  1312. .set_eee = bcm_sf2_sw_set_eee,
  1313. .port_bridge_join = bcm_sf2_sw_br_join,
  1314. .port_bridge_leave = bcm_sf2_sw_br_leave,
  1315. .port_stp_state_set = bcm_sf2_sw_br_set_stp_state,
  1316. .port_fdb_prepare = bcm_sf2_sw_fdb_prepare,
  1317. .port_fdb_add = bcm_sf2_sw_fdb_add,
  1318. .port_fdb_del = bcm_sf2_sw_fdb_del,
  1319. .port_fdb_dump = bcm_sf2_sw_fdb_dump,
  1320. .port_vlan_filtering = bcm_sf2_sw_vlan_filtering,
  1321. .port_vlan_prepare = bcm_sf2_sw_vlan_prepare,
  1322. .port_vlan_add = bcm_sf2_sw_vlan_add,
  1323. .port_vlan_del = bcm_sf2_sw_vlan_del,
  1324. .port_vlan_dump = bcm_sf2_sw_vlan_dump,
  1325. };
  1326. static int bcm_sf2_sw_probe(struct platform_device *pdev)
  1327. {
  1328. const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
  1329. struct device_node *dn = pdev->dev.of_node;
  1330. struct bcm_sf2_priv *priv;
  1331. struct dsa_switch *ds;
  1332. void __iomem **base;
  1333. struct resource *r;
  1334. unsigned int i;
  1335. u32 reg, rev;
  1336. int ret;
  1337. ds = devm_kzalloc(&pdev->dev, sizeof(*ds) + sizeof(*priv), GFP_KERNEL);
  1338. if (!ds)
  1339. return -ENOMEM;
  1340. priv = (struct bcm_sf2_priv *)(ds + 1);
  1341. ds->priv = priv;
  1342. ds->dev = &pdev->dev;
  1343. ds->drv = &bcm_sf2_switch_driver;
  1344. dev_set_drvdata(&pdev->dev, ds);
  1345. spin_lock_init(&priv->indir_lock);
  1346. mutex_init(&priv->stats_mutex);
  1347. bcm_sf2_identify_ports(priv, dn->child);
  1348. priv->irq0 = irq_of_parse_and_map(dn, 0);
  1349. priv->irq1 = irq_of_parse_and_map(dn, 1);
  1350. base = &priv->core;
  1351. for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
  1352. r = platform_get_resource(pdev, IORESOURCE_MEM, i);
  1353. *base = devm_ioremap_resource(&pdev->dev, r);
  1354. if (IS_ERR(*base)) {
  1355. pr_err("unable to find register: %s\n", reg_names[i]);
  1356. return PTR_ERR(*base);
  1357. }
  1358. base++;
  1359. }
  1360. ret = bcm_sf2_sw_rst(priv);
  1361. if (ret) {
  1362. pr_err("unable to software reset switch: %d\n", ret);
  1363. return ret;
  1364. }
  1365. ret = bcm_sf2_mdio_register(ds);
  1366. if (ret) {
  1367. pr_err("failed to register MDIO bus\n");
  1368. return ret;
  1369. }
  1370. /* Disable all interrupts and request them */
  1371. bcm_sf2_intr_disable(priv);
  1372. ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0,
  1373. "switch_0", priv);
  1374. if (ret < 0) {
  1375. pr_err("failed to request switch_0 IRQ\n");
  1376. goto out_mdio;
  1377. }
  1378. ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0,
  1379. "switch_1", priv);
  1380. if (ret < 0) {
  1381. pr_err("failed to request switch_1 IRQ\n");
  1382. goto out_mdio;
  1383. }
  1384. /* Reset the MIB counters */
  1385. reg = core_readl(priv, CORE_GMNCFGCFG);
  1386. reg |= RST_MIB_CNT;
  1387. core_writel(priv, reg, CORE_GMNCFGCFG);
  1388. reg &= ~RST_MIB_CNT;
  1389. core_writel(priv, reg, CORE_GMNCFGCFG);
  1390. /* Get the maximum number of ports for this switch */
  1391. priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
  1392. if (priv->hw_params.num_ports > DSA_MAX_PORTS)
  1393. priv->hw_params.num_ports = DSA_MAX_PORTS;
  1394. /* Assume a single GPHY setup if we can't read that property */
  1395. if (of_property_read_u32(dn, "brcm,num-gphy",
  1396. &priv->hw_params.num_gphy))
  1397. priv->hw_params.num_gphy = 1;
  1398. rev = reg_readl(priv, REG_SWITCH_REVISION);
  1399. priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
  1400. SWITCH_TOP_REV_MASK;
  1401. priv->hw_params.core_rev = (rev & SF2_REV_MASK);
  1402. rev = reg_readl(priv, REG_PHY_REVISION);
  1403. priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
  1404. ret = dsa_register_switch(ds, dn);
  1405. if (ret)
  1406. goto out_mdio;
  1407. pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
  1408. priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
  1409. priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
  1410. priv->core, priv->irq0, priv->irq1);
  1411. return 0;
  1412. out_mdio:
  1413. bcm_sf2_mdio_unregister(priv);
  1414. return ret;
  1415. }
  1416. static int bcm_sf2_sw_remove(struct platform_device *pdev)
  1417. {
  1418. struct dsa_switch *ds = platform_get_drvdata(pdev);
  1419. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1420. /* Disable all ports and interrupts */
  1421. priv->wol_ports_mask = 0;
  1422. bcm_sf2_sw_suspend(ds);
  1423. dsa_unregister_switch(ds);
  1424. bcm_sf2_mdio_unregister(priv);
  1425. return 0;
  1426. }
  1427. #ifdef CONFIG_PM_SLEEP
  1428. static int bcm_sf2_suspend(struct device *dev)
  1429. {
  1430. struct platform_device *pdev = to_platform_device(dev);
  1431. struct dsa_switch *ds = platform_get_drvdata(pdev);
  1432. return dsa_switch_suspend(ds);
  1433. }
  1434. static int bcm_sf2_resume(struct device *dev)
  1435. {
  1436. struct platform_device *pdev = to_platform_device(dev);
  1437. struct dsa_switch *ds = platform_get_drvdata(pdev);
  1438. return dsa_switch_resume(ds);
  1439. }
  1440. #endif /* CONFIG_PM_SLEEP */
  1441. static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops,
  1442. bcm_sf2_suspend, bcm_sf2_resume);
  1443. static const struct of_device_id bcm_sf2_of_match[] = {
  1444. { .compatible = "brcm,bcm7445-switch-v4.0" },
  1445. { /* sentinel */ },
  1446. };
  1447. static struct platform_driver bcm_sf2_driver = {
  1448. .probe = bcm_sf2_sw_probe,
  1449. .remove = bcm_sf2_sw_remove,
  1450. .driver = {
  1451. .name = "brcm-sf2",
  1452. .of_match_table = bcm_sf2_of_match,
  1453. .pm = &bcm_sf2_pm_ops,
  1454. },
  1455. };
  1456. module_platform_driver(bcm_sf2_driver);
  1457. MODULE_AUTHOR("Broadcom Corporation");
  1458. MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
  1459. MODULE_LICENSE("GPL");
  1460. MODULE_ALIAS("platform:brcm-sf2");