bcm_sf2.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782
  1. /*
  2. * Broadcom Starfighter 2 DSA switch driver
  3. *
  4. * Copyright (C) 2014, Broadcom Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/list.h>
  12. #include <linux/module.h>
  13. #include <linux/netdevice.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/of.h>
  17. #include <linux/phy.h>
  18. #include <linux/phy_fixed.h>
  19. #include <linux/mii.h>
  20. #include <linux/of.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/of_address.h>
  23. #include <linux/of_net.h>
  24. #include <linux/of_mdio.h>
  25. #include <net/dsa.h>
  26. #include <linux/ethtool.h>
  27. #include <linux/if_bridge.h>
  28. #include <linux/brcmphy.h>
  29. #include <linux/etherdevice.h>
  30. #include <net/switchdev.h>
  31. #include "bcm_sf2.h"
  32. #include "bcm_sf2_regs.h"
  33. /* String, offset, and register size in bytes if different from 4 bytes */
  34. static const struct bcm_sf2_hw_stats bcm_sf2_mib[] = {
  35. { "TxOctets", 0x000, 8 },
  36. { "TxDropPkts", 0x020 },
  37. { "TxQPKTQ0", 0x030 },
  38. { "TxBroadcastPkts", 0x040 },
  39. { "TxMulticastPkts", 0x050 },
  40. { "TxUnicastPKts", 0x060 },
  41. { "TxCollisions", 0x070 },
  42. { "TxSingleCollision", 0x080 },
  43. { "TxMultipleCollision", 0x090 },
  44. { "TxDeferredCollision", 0x0a0 },
  45. { "TxLateCollision", 0x0b0 },
  46. { "TxExcessiveCollision", 0x0c0 },
  47. { "TxFrameInDisc", 0x0d0 },
  48. { "TxPausePkts", 0x0e0 },
  49. { "TxQPKTQ1", 0x0f0 },
  50. { "TxQPKTQ2", 0x100 },
  51. { "TxQPKTQ3", 0x110 },
  52. { "TxQPKTQ4", 0x120 },
  53. { "TxQPKTQ5", 0x130 },
  54. { "RxOctets", 0x140, 8 },
  55. { "RxUndersizePkts", 0x160 },
  56. { "RxPausePkts", 0x170 },
  57. { "RxPkts64Octets", 0x180 },
  58. { "RxPkts65to127Octets", 0x190 },
  59. { "RxPkts128to255Octets", 0x1a0 },
  60. { "RxPkts256to511Octets", 0x1b0 },
  61. { "RxPkts512to1023Octets", 0x1c0 },
  62. { "RxPkts1024toMaxPktsOctets", 0x1d0 },
  63. { "RxOversizePkts", 0x1e0 },
  64. { "RxJabbers", 0x1f0 },
  65. { "RxAlignmentErrors", 0x200 },
  66. { "RxFCSErrors", 0x210 },
  67. { "RxGoodOctets", 0x220, 8 },
  68. { "RxDropPkts", 0x240 },
  69. { "RxUnicastPkts", 0x250 },
  70. { "RxMulticastPkts", 0x260 },
  71. { "RxBroadcastPkts", 0x270 },
  72. { "RxSAChanges", 0x280 },
  73. { "RxFragments", 0x290 },
  74. { "RxJumboPkt", 0x2a0 },
  75. { "RxSymblErr", 0x2b0 },
  76. { "InRangeErrCount", 0x2c0 },
  77. { "OutRangeErrCount", 0x2d0 },
  78. { "EEELpiEvent", 0x2e0 },
  79. { "EEELpiDuration", 0x2f0 },
  80. { "RxDiscard", 0x300, 8 },
  81. { "TxQPKTQ6", 0x320 },
  82. { "TxQPKTQ7", 0x330 },
  83. { "TxPkts64Octets", 0x340 },
  84. { "TxPkts65to127Octets", 0x350 },
  85. { "TxPkts128to255Octets", 0x360 },
  86. { "TxPkts256to511Ocets", 0x370 },
  87. { "TxPkts512to1023Ocets", 0x380 },
  88. { "TxPkts1024toMaxPktOcets", 0x390 },
  89. };
  90. #define BCM_SF2_STATS_SIZE ARRAY_SIZE(bcm_sf2_mib)
  91. static void bcm_sf2_sw_get_strings(struct dsa_switch *ds,
  92. int port, uint8_t *data)
  93. {
  94. unsigned int i;
  95. for (i = 0; i < BCM_SF2_STATS_SIZE; i++)
  96. memcpy(data + i * ETH_GSTRING_LEN,
  97. bcm_sf2_mib[i].string, ETH_GSTRING_LEN);
  98. }
  99. static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds,
  100. int port, uint64_t *data)
  101. {
  102. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  103. const struct bcm_sf2_hw_stats *s;
  104. unsigned int i;
  105. u64 val = 0;
  106. u32 offset;
  107. mutex_lock(&priv->stats_mutex);
  108. /* Now fetch the per-port counters */
  109. for (i = 0; i < BCM_SF2_STATS_SIZE; i++) {
  110. s = &bcm_sf2_mib[i];
  111. /* Do a latched 64-bit read if needed */
  112. offset = s->reg + CORE_P_MIB_OFFSET(port);
  113. if (s->sizeof_stat == 8)
  114. val = core_readq(priv, offset);
  115. else
  116. val = core_readl(priv, offset);
  117. data[i] = (u64)val;
  118. }
  119. mutex_unlock(&priv->stats_mutex);
  120. }
  121. static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds)
  122. {
  123. return BCM_SF2_STATS_SIZE;
  124. }
  125. static enum dsa_tag_protocol bcm_sf2_sw_get_tag_protocol(struct dsa_switch *ds)
  126. {
  127. return DSA_TAG_PROTO_BRCM;
  128. }
  129. static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
  130. {
  131. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  132. unsigned int i;
  133. u32 reg;
  134. /* Enable the IMP Port to be in the same VLAN as the other ports
  135. * on a per-port basis such that we only have Port i and IMP in
  136. * the same VLAN.
  137. */
  138. for (i = 0; i < priv->hw_params.num_ports; i++) {
  139. if (!((1 << i) & ds->enabled_port_mask))
  140. continue;
  141. reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
  142. reg |= (1 << cpu_port);
  143. core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
  144. }
  145. }
  146. static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
  147. {
  148. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  149. u32 reg, val;
  150. /* Enable the port memories */
  151. reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
  152. reg &= ~P_TXQ_PSM_VDD(port);
  153. core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
  154. /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
  155. reg = core_readl(priv, CORE_IMP_CTL);
  156. reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
  157. reg &= ~(RX_DIS | TX_DIS);
  158. core_writel(priv, reg, CORE_IMP_CTL);
  159. /* Enable forwarding */
  160. core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
  161. /* Enable IMP port in dumb mode */
  162. reg = core_readl(priv, CORE_SWITCH_CTRL);
  163. reg |= MII_DUMB_FWDG_EN;
  164. core_writel(priv, reg, CORE_SWITCH_CTRL);
  165. /* Resolve which bit controls the Broadcom tag */
  166. switch (port) {
  167. case 8:
  168. val = BRCM_HDR_EN_P8;
  169. break;
  170. case 7:
  171. val = BRCM_HDR_EN_P7;
  172. break;
  173. case 5:
  174. val = BRCM_HDR_EN_P5;
  175. break;
  176. default:
  177. val = 0;
  178. break;
  179. }
  180. /* Enable Broadcom tags for IMP port */
  181. reg = core_readl(priv, CORE_BRCM_HDR_CTRL);
  182. reg |= val;
  183. core_writel(priv, reg, CORE_BRCM_HDR_CTRL);
  184. /* Enable reception Broadcom tag for CPU TX (switch RX) to
  185. * allow us to tag outgoing frames
  186. */
  187. reg = core_readl(priv, CORE_BRCM_HDR_RX_DIS);
  188. reg &= ~(1 << port);
  189. core_writel(priv, reg, CORE_BRCM_HDR_RX_DIS);
  190. /* Enable transmission of Broadcom tags from the switch (CPU RX) to
  191. * allow delivering frames to the per-port net_devices
  192. */
  193. reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS);
  194. reg &= ~(1 << port);
  195. core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS);
  196. /* Force link status for IMP port */
  197. reg = core_readl(priv, CORE_STS_OVERRIDE_IMP);
  198. reg |= (MII_SW_OR | LINK_STS);
  199. core_writel(priv, reg, CORE_STS_OVERRIDE_IMP);
  200. }
  201. static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
  202. {
  203. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  204. u32 reg;
  205. reg = core_readl(priv, CORE_EEE_EN_CTRL);
  206. if (enable)
  207. reg |= 1 << port;
  208. else
  209. reg &= ~(1 << port);
  210. core_writel(priv, reg, CORE_EEE_EN_CTRL);
  211. }
  212. static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
  213. {
  214. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  215. u32 reg;
  216. reg = reg_readl(priv, REG_SPHY_CNTRL);
  217. if (enable) {
  218. reg |= PHY_RESET;
  219. reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS);
  220. reg_writel(priv, reg, REG_SPHY_CNTRL);
  221. udelay(21);
  222. reg = reg_readl(priv, REG_SPHY_CNTRL);
  223. reg &= ~PHY_RESET;
  224. } else {
  225. reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
  226. reg_writel(priv, reg, REG_SPHY_CNTRL);
  227. mdelay(1);
  228. reg |= CK25_DIS;
  229. }
  230. reg_writel(priv, reg, REG_SPHY_CNTRL);
  231. /* Use PHY-driven LED signaling */
  232. if (!enable) {
  233. reg = reg_readl(priv, REG_LED_CNTRL(0));
  234. reg |= SPDLNK_SRC_SEL;
  235. reg_writel(priv, reg, REG_LED_CNTRL(0));
  236. }
  237. }
  238. static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv,
  239. int port)
  240. {
  241. unsigned int off;
  242. switch (port) {
  243. case 7:
  244. off = P7_IRQ_OFF;
  245. break;
  246. case 0:
  247. /* Port 0 interrupts are located on the first bank */
  248. intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF));
  249. return;
  250. default:
  251. off = P_IRQ_OFF(port);
  252. break;
  253. }
  254. intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
  255. }
  256. static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
  257. int port)
  258. {
  259. unsigned int off;
  260. switch (port) {
  261. case 7:
  262. off = P7_IRQ_OFF;
  263. break;
  264. case 0:
  265. /* Port 0 interrupts are located on the first bank */
  266. intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
  267. intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR);
  268. return;
  269. default:
  270. off = P_IRQ_OFF(port);
  271. break;
  272. }
  273. intrl2_1_mask_set(priv, P_IRQ_MASK(off));
  274. intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR);
  275. }
  276. static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
  277. struct phy_device *phy)
  278. {
  279. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  280. s8 cpu_port = ds->dst[ds->index].cpu_port;
  281. u32 reg;
  282. /* Clear the memory power down */
  283. reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
  284. reg &= ~P_TXQ_PSM_VDD(port);
  285. core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
  286. /* Clear the Rx and Tx disable bits and set to no spanning tree */
  287. core_writel(priv, 0, CORE_G_PCTL_PORT(port));
  288. /* Re-enable the GPHY and re-apply workarounds */
  289. if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
  290. bcm_sf2_gphy_enable_set(ds, true);
  291. if (phy) {
  292. /* if phy_stop() has been called before, phy
  293. * will be in halted state, and phy_start()
  294. * will call resume.
  295. *
  296. * the resume path does not configure back
  297. * autoneg settings, and since we hard reset
  298. * the phy manually here, we need to reset the
  299. * state machine also.
  300. */
  301. phy->state = PHY_READY;
  302. phy_init_hw(phy);
  303. }
  304. }
  305. /* Enable MoCA port interrupts to get notified */
  306. if (port == priv->moca_port)
  307. bcm_sf2_port_intr_enable(priv, port);
  308. /* Set this port, and only this one to be in the default VLAN,
  309. * if member of a bridge, restore its membership prior to
  310. * bringing down this port.
  311. */
  312. reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
  313. reg &= ~PORT_VLAN_CTRL_MASK;
  314. reg |= (1 << port);
  315. reg |= priv->port_sts[port].vlan_ctl_mask;
  316. core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port));
  317. bcm_sf2_imp_vlan_setup(ds, cpu_port);
  318. /* If EEE was enabled, restore it */
  319. if (priv->port_sts[port].eee.eee_enabled)
  320. bcm_sf2_eee_enable_set(ds, port, true);
  321. return 0;
  322. }
  323. static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
  324. struct phy_device *phy)
  325. {
  326. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  327. u32 off, reg;
  328. if (priv->wol_ports_mask & (1 << port))
  329. return;
  330. if (port == priv->moca_port)
  331. bcm_sf2_port_intr_disable(priv, port);
  332. if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
  333. bcm_sf2_gphy_enable_set(ds, false);
  334. if (dsa_is_cpu_port(ds, port))
  335. off = CORE_IMP_CTL;
  336. else
  337. off = CORE_G_PCTL_PORT(port);
  338. reg = core_readl(priv, off);
  339. reg |= RX_DIS | TX_DIS;
  340. core_writel(priv, reg, off);
  341. /* Power down the port memory */
  342. reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
  343. reg |= P_TXQ_PSM_VDD(port);
  344. core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
  345. }
  346. /* Returns 0 if EEE was not enabled, or 1 otherwise
  347. */
  348. static int bcm_sf2_eee_init(struct dsa_switch *ds, int port,
  349. struct phy_device *phy)
  350. {
  351. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  352. struct ethtool_eee *p = &priv->port_sts[port].eee;
  353. int ret;
  354. p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full);
  355. ret = phy_init_eee(phy, 0);
  356. if (ret)
  357. return 0;
  358. bcm_sf2_eee_enable_set(ds, port, true);
  359. return 1;
  360. }
  361. static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port,
  362. struct ethtool_eee *e)
  363. {
  364. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  365. struct ethtool_eee *p = &priv->port_sts[port].eee;
  366. u32 reg;
  367. reg = core_readl(priv, CORE_EEE_LPI_INDICATE);
  368. e->eee_enabled = p->eee_enabled;
  369. e->eee_active = !!(reg & (1 << port));
  370. return 0;
  371. }
  372. static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port,
  373. struct phy_device *phydev,
  374. struct ethtool_eee *e)
  375. {
  376. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  377. struct ethtool_eee *p = &priv->port_sts[port].eee;
  378. p->eee_enabled = e->eee_enabled;
  379. if (!p->eee_enabled) {
  380. bcm_sf2_eee_enable_set(ds, port, false);
  381. } else {
  382. p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
  383. if (!p->eee_enabled)
  384. return -EOPNOTSUPP;
  385. }
  386. return 0;
  387. }
  388. static int bcm_sf2_fast_age_op(struct bcm_sf2_priv *priv)
  389. {
  390. unsigned int timeout = 1000;
  391. u32 reg;
  392. reg = core_readl(priv, CORE_FAST_AGE_CTRL);
  393. reg |= EN_AGE_PORT | EN_AGE_VLAN | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
  394. core_writel(priv, reg, CORE_FAST_AGE_CTRL);
  395. do {
  396. reg = core_readl(priv, CORE_FAST_AGE_CTRL);
  397. if (!(reg & FAST_AGE_STR_DONE))
  398. break;
  399. cpu_relax();
  400. } while (timeout--);
  401. if (!timeout)
  402. return -ETIMEDOUT;
  403. core_writel(priv, 0, CORE_FAST_AGE_CTRL);
  404. return 0;
  405. }
  406. /* Fast-ageing of ARL entries for a given port, equivalent to an ARL
  407. * flush for that port.
  408. */
  409. static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
  410. {
  411. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  412. core_writel(priv, port, CORE_FAST_AGE_PORT);
  413. return bcm_sf2_fast_age_op(priv);
  414. }
  415. static int bcm_sf2_sw_fast_age_vlan(struct bcm_sf2_priv *priv, u16 vid)
  416. {
  417. core_writel(priv, vid, CORE_FAST_AGE_VID);
  418. return bcm_sf2_fast_age_op(priv);
  419. }
  420. static int bcm_sf2_vlan_op_wait(struct bcm_sf2_priv *priv)
  421. {
  422. unsigned int timeout = 10;
  423. u32 reg;
  424. do {
  425. reg = core_readl(priv, CORE_ARLA_VTBL_RWCTRL);
  426. if (!(reg & ARLA_VTBL_STDN))
  427. return 0;
  428. usleep_range(1000, 2000);
  429. } while (timeout--);
  430. return -ETIMEDOUT;
  431. }
  432. static int bcm_sf2_vlan_op(struct bcm_sf2_priv *priv, u8 op)
  433. {
  434. core_writel(priv, ARLA_VTBL_STDN | op, CORE_ARLA_VTBL_RWCTRL);
  435. return bcm_sf2_vlan_op_wait(priv);
  436. }
  437. static void bcm_sf2_set_vlan_entry(struct bcm_sf2_priv *priv, u16 vid,
  438. struct bcm_sf2_vlan *vlan)
  439. {
  440. int ret;
  441. core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR);
  442. core_writel(priv, vlan->untag << UNTAG_MAP_SHIFT | vlan->members,
  443. CORE_ARLA_VTBL_ENTRY);
  444. ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_WRITE);
  445. if (ret)
  446. pr_err("failed to write VLAN entry\n");
  447. }
  448. static int bcm_sf2_get_vlan_entry(struct bcm_sf2_priv *priv, u16 vid,
  449. struct bcm_sf2_vlan *vlan)
  450. {
  451. u32 entry;
  452. int ret;
  453. core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR);
  454. ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_READ);
  455. if (ret)
  456. return ret;
  457. entry = core_readl(priv, CORE_ARLA_VTBL_ENTRY);
  458. vlan->members = entry & FWD_MAP_MASK;
  459. vlan->untag = (entry >> UNTAG_MAP_SHIFT) & UNTAG_MAP_MASK;
  460. return 0;
  461. }
  462. static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port,
  463. struct net_device *bridge)
  464. {
  465. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  466. s8 cpu_port = ds->dst->cpu_port;
  467. unsigned int i;
  468. u32 reg, p_ctl;
  469. /* Make this port leave the all VLANs join since we will have proper
  470. * VLAN entries from now on
  471. */
  472. reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN);
  473. reg &= ~BIT(port);
  474. if ((reg & BIT(cpu_port)) == BIT(cpu_port))
  475. reg &= ~BIT(cpu_port);
  476. core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN);
  477. priv->port_sts[port].bridge_dev = bridge;
  478. p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
  479. for (i = 0; i < priv->hw_params.num_ports; i++) {
  480. if (priv->port_sts[i].bridge_dev != bridge)
  481. continue;
  482. /* Add this local port to the remote port VLAN control
  483. * membership and update the remote port bitmask
  484. */
  485. reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
  486. reg |= 1 << port;
  487. core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
  488. priv->port_sts[i].vlan_ctl_mask = reg;
  489. p_ctl |= 1 << i;
  490. }
  491. /* Configure the local port VLAN control membership to include
  492. * remote ports and update the local port bitmask
  493. */
  494. core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
  495. priv->port_sts[port].vlan_ctl_mask = p_ctl;
  496. return 0;
  497. }
  498. static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port)
  499. {
  500. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  501. struct net_device *bridge = priv->port_sts[port].bridge_dev;
  502. s8 cpu_port = ds->dst->cpu_port;
  503. unsigned int i;
  504. u32 reg, p_ctl;
  505. p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
  506. for (i = 0; i < priv->hw_params.num_ports; i++) {
  507. /* Don't touch the remaining ports */
  508. if (priv->port_sts[i].bridge_dev != bridge)
  509. continue;
  510. reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
  511. reg &= ~(1 << port);
  512. core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
  513. priv->port_sts[port].vlan_ctl_mask = reg;
  514. /* Prevent self removal to preserve isolation */
  515. if (port != i)
  516. p_ctl &= ~(1 << i);
  517. }
  518. core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
  519. priv->port_sts[port].vlan_ctl_mask = p_ctl;
  520. priv->port_sts[port].bridge_dev = NULL;
  521. /* Make this port join all VLANs without VLAN entries */
  522. reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN);
  523. reg |= BIT(port);
  524. if (!(reg & BIT(cpu_port)))
  525. reg |= BIT(cpu_port);
  526. core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN);
  527. }
  528. static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
  529. u8 state)
  530. {
  531. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  532. u8 hw_state, cur_hw_state;
  533. u32 reg;
  534. reg = core_readl(priv, CORE_G_PCTL_PORT(port));
  535. cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
  536. switch (state) {
  537. case BR_STATE_DISABLED:
  538. hw_state = G_MISTP_DIS_STATE;
  539. break;
  540. case BR_STATE_LISTENING:
  541. hw_state = G_MISTP_LISTEN_STATE;
  542. break;
  543. case BR_STATE_LEARNING:
  544. hw_state = G_MISTP_LEARN_STATE;
  545. break;
  546. case BR_STATE_FORWARDING:
  547. hw_state = G_MISTP_FWD_STATE;
  548. break;
  549. case BR_STATE_BLOCKING:
  550. hw_state = G_MISTP_BLOCK_STATE;
  551. break;
  552. default:
  553. pr_err("%s: invalid STP state: %d\n", __func__, state);
  554. return;
  555. }
  556. /* Fast-age ARL entries if we are moving a port from Learning or
  557. * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
  558. * state (hw_state)
  559. */
  560. if (cur_hw_state != hw_state) {
  561. if (cur_hw_state >= G_MISTP_LEARN_STATE &&
  562. hw_state <= G_MISTP_LISTEN_STATE) {
  563. if (bcm_sf2_sw_fast_age_port(ds, port)) {
  564. pr_err("%s: fast-ageing failed\n", __func__);
  565. return;
  566. }
  567. }
  568. }
  569. reg = core_readl(priv, CORE_G_PCTL_PORT(port));
  570. reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
  571. reg |= hw_state;
  572. core_writel(priv, reg, CORE_G_PCTL_PORT(port));
  573. }
  574. /* Address Resolution Logic routines */
  575. static int bcm_sf2_arl_op_wait(struct bcm_sf2_priv *priv)
  576. {
  577. unsigned int timeout = 10;
  578. u32 reg;
  579. do {
  580. reg = core_readl(priv, CORE_ARLA_RWCTL);
  581. if (!(reg & ARL_STRTDN))
  582. return 0;
  583. usleep_range(1000, 2000);
  584. } while (timeout--);
  585. return -ETIMEDOUT;
  586. }
  587. static int bcm_sf2_arl_rw_op(struct bcm_sf2_priv *priv, unsigned int op)
  588. {
  589. u32 cmd;
  590. if (op > ARL_RW)
  591. return -EINVAL;
  592. cmd = core_readl(priv, CORE_ARLA_RWCTL);
  593. cmd &= ~IVL_SVL_SELECT;
  594. cmd |= ARL_STRTDN;
  595. if (op)
  596. cmd |= ARL_RW;
  597. else
  598. cmd &= ~ARL_RW;
  599. core_writel(priv, cmd, CORE_ARLA_RWCTL);
  600. return bcm_sf2_arl_op_wait(priv);
  601. }
  602. static int bcm_sf2_arl_read(struct bcm_sf2_priv *priv, u64 mac,
  603. u16 vid, struct bcm_sf2_arl_entry *ent, u8 *idx,
  604. bool is_valid)
  605. {
  606. unsigned int i;
  607. int ret;
  608. ret = bcm_sf2_arl_op_wait(priv);
  609. if (ret)
  610. return ret;
  611. /* Read the 4 bins */
  612. for (i = 0; i < 4; i++) {
  613. u64 mac_vid;
  614. u32 fwd_entry;
  615. mac_vid = core_readq(priv, CORE_ARLA_MACVID_ENTRY(i));
  616. fwd_entry = core_readl(priv, CORE_ARLA_FWD_ENTRY(i));
  617. bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
  618. if (ent->is_valid && is_valid) {
  619. *idx = i;
  620. return 0;
  621. }
  622. /* This is the MAC we just deleted */
  623. if (!is_valid && (mac_vid & mac))
  624. return 0;
  625. }
  626. return -ENOENT;
  627. }
  628. static int bcm_sf2_arl_op(struct bcm_sf2_priv *priv, int op, int port,
  629. const unsigned char *addr, u16 vid, bool is_valid)
  630. {
  631. struct bcm_sf2_arl_entry ent;
  632. u32 fwd_entry;
  633. u64 mac, mac_vid = 0;
  634. u8 idx = 0;
  635. int ret;
  636. /* Convert the array into a 64-bit MAC */
  637. mac = bcm_sf2_mac_to_u64(addr);
  638. /* Perform a read for the given MAC and VID */
  639. core_writeq(priv, mac, CORE_ARLA_MAC);
  640. core_writel(priv, vid, CORE_ARLA_VID);
  641. /* Issue a read operation for this MAC */
  642. ret = bcm_sf2_arl_rw_op(priv, 1);
  643. if (ret)
  644. return ret;
  645. ret = bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
  646. /* If this is a read, just finish now */
  647. if (op)
  648. return ret;
  649. /* We could not find a matching MAC, so reset to a new entry */
  650. if (ret) {
  651. fwd_entry = 0;
  652. idx = 0;
  653. }
  654. memset(&ent, 0, sizeof(ent));
  655. ent.port = port;
  656. ent.is_valid = is_valid;
  657. ent.vid = vid;
  658. ent.is_static = true;
  659. memcpy(ent.mac, addr, ETH_ALEN);
  660. bcm_sf2_arl_from_entry(&mac_vid, &fwd_entry, &ent);
  661. core_writeq(priv, mac_vid, CORE_ARLA_MACVID_ENTRY(idx));
  662. core_writel(priv, fwd_entry, CORE_ARLA_FWD_ENTRY(idx));
  663. ret = bcm_sf2_arl_rw_op(priv, 0);
  664. if (ret)
  665. return ret;
  666. /* Re-read the entry to check */
  667. return bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
  668. }
  669. static int bcm_sf2_sw_fdb_prepare(struct dsa_switch *ds, int port,
  670. const struct switchdev_obj_port_fdb *fdb,
  671. struct switchdev_trans *trans)
  672. {
  673. /* We do not need to do anything specific here yet */
  674. return 0;
  675. }
  676. static void bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port,
  677. const struct switchdev_obj_port_fdb *fdb,
  678. struct switchdev_trans *trans)
  679. {
  680. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  681. if (bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true))
  682. pr_err("%s: failed to add MAC address\n", __func__);
  683. }
  684. static int bcm_sf2_sw_fdb_del(struct dsa_switch *ds, int port,
  685. const struct switchdev_obj_port_fdb *fdb)
  686. {
  687. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  688. return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, false);
  689. }
  690. static int bcm_sf2_arl_search_wait(struct bcm_sf2_priv *priv)
  691. {
  692. unsigned timeout = 1000;
  693. u32 reg;
  694. do {
  695. reg = core_readl(priv, CORE_ARLA_SRCH_CTL);
  696. if (!(reg & ARLA_SRCH_STDN))
  697. return 0;
  698. if (reg & ARLA_SRCH_VLID)
  699. return 0;
  700. usleep_range(1000, 2000);
  701. } while (timeout--);
  702. return -ETIMEDOUT;
  703. }
  704. static void bcm_sf2_arl_search_rd(struct bcm_sf2_priv *priv, u8 idx,
  705. struct bcm_sf2_arl_entry *ent)
  706. {
  707. u64 mac_vid;
  708. u32 fwd_entry;
  709. mac_vid = core_readq(priv, CORE_ARLA_SRCH_RSLT_MACVID(idx));
  710. fwd_entry = core_readl(priv, CORE_ARLA_SRCH_RSLT(idx));
  711. bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
  712. }
  713. static int bcm_sf2_sw_fdb_copy(struct net_device *dev, int port,
  714. const struct bcm_sf2_arl_entry *ent,
  715. struct switchdev_obj_port_fdb *fdb,
  716. int (*cb)(struct switchdev_obj *obj))
  717. {
  718. if (!ent->is_valid)
  719. return 0;
  720. if (port != ent->port)
  721. return 0;
  722. ether_addr_copy(fdb->addr, ent->mac);
  723. fdb->vid = ent->vid;
  724. fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE;
  725. return cb(&fdb->obj);
  726. }
  727. static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port,
  728. struct switchdev_obj_port_fdb *fdb,
  729. int (*cb)(struct switchdev_obj *obj))
  730. {
  731. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  732. struct net_device *dev = ds->ports[port].netdev;
  733. struct bcm_sf2_arl_entry results[2];
  734. unsigned int count = 0;
  735. int ret;
  736. /* Start search operation */
  737. core_writel(priv, ARLA_SRCH_STDN, CORE_ARLA_SRCH_CTL);
  738. do {
  739. ret = bcm_sf2_arl_search_wait(priv);
  740. if (ret)
  741. return ret;
  742. /* Read both entries, then return their values back */
  743. bcm_sf2_arl_search_rd(priv, 0, &results[0]);
  744. ret = bcm_sf2_sw_fdb_copy(dev, port, &results[0], fdb, cb);
  745. if (ret)
  746. return ret;
  747. bcm_sf2_arl_search_rd(priv, 1, &results[1]);
  748. ret = bcm_sf2_sw_fdb_copy(dev, port, &results[1], fdb, cb);
  749. if (ret)
  750. return ret;
  751. if (!results[0].is_valid && !results[1].is_valid)
  752. break;
  753. } while (count++ < CORE_ARLA_NUM_ENTRIES);
  754. return 0;
  755. }
  756. static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr,
  757. int regnum, u16 val)
  758. {
  759. int ret = 0;
  760. u32 reg;
  761. reg = reg_readl(priv, REG_SWITCH_CNTRL);
  762. reg |= MDIO_MASTER_SEL;
  763. reg_writel(priv, reg, REG_SWITCH_CNTRL);
  764. /* Page << 8 | offset */
  765. reg = 0x70;
  766. reg <<= 2;
  767. core_writel(priv, addr, reg);
  768. /* Page << 8 | offset */
  769. reg = 0x80 << 8 | regnum << 1;
  770. reg <<= 2;
  771. if (op)
  772. ret = core_readl(priv, reg);
  773. else
  774. core_writel(priv, val, reg);
  775. reg = reg_readl(priv, REG_SWITCH_CNTRL);
  776. reg &= ~MDIO_MASTER_SEL;
  777. reg_writel(priv, reg, REG_SWITCH_CNTRL);
  778. return ret & 0xffff;
  779. }
  780. static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
  781. {
  782. struct bcm_sf2_priv *priv = bus->priv;
  783. /* Intercept reads from Broadcom pseudo-PHY address, else, send
  784. * them to our master MDIO bus controller
  785. */
  786. if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
  787. return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
  788. else
  789. return mdiobus_read(priv->master_mii_bus, addr, regnum);
  790. }
  791. static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
  792. u16 val)
  793. {
  794. struct bcm_sf2_priv *priv = bus->priv;
  795. /* Intercept writes to the Broadcom pseudo-PHY address, else,
  796. * send them to our master MDIO bus controller
  797. */
  798. if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
  799. bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
  800. else
  801. mdiobus_write(priv->master_mii_bus, addr, regnum, val);
  802. return 0;
  803. }
  804. static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
  805. {
  806. struct bcm_sf2_priv *priv = dev_id;
  807. priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
  808. ~priv->irq0_mask;
  809. intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
  810. return IRQ_HANDLED;
  811. }
  812. static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
  813. {
  814. struct bcm_sf2_priv *priv = dev_id;
  815. priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
  816. ~priv->irq1_mask;
  817. intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
  818. if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF))
  819. priv->port_sts[7].link = 1;
  820. if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF))
  821. priv->port_sts[7].link = 0;
  822. return IRQ_HANDLED;
  823. }
  824. static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
  825. {
  826. unsigned int timeout = 1000;
  827. u32 reg;
  828. reg = core_readl(priv, CORE_WATCHDOG_CTRL);
  829. reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
  830. core_writel(priv, reg, CORE_WATCHDOG_CTRL);
  831. do {
  832. reg = core_readl(priv, CORE_WATCHDOG_CTRL);
  833. if (!(reg & SOFTWARE_RESET))
  834. break;
  835. usleep_range(1000, 2000);
  836. } while (timeout-- > 0);
  837. if (timeout == 0)
  838. return -ETIMEDOUT;
  839. return 0;
  840. }
  841. static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
  842. {
  843. intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
  844. intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
  845. intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
  846. intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
  847. intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
  848. intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
  849. }
  850. static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
  851. struct device_node *dn)
  852. {
  853. struct device_node *port;
  854. const char *phy_mode_str;
  855. int mode;
  856. unsigned int port_num;
  857. int ret;
  858. priv->moca_port = -1;
  859. for_each_available_child_of_node(dn, port) {
  860. if (of_property_read_u32(port, "reg", &port_num))
  861. continue;
  862. /* Internal PHYs get assigned a specific 'phy-mode' property
  863. * value: "internal" to help flag them before MDIO probing
  864. * has completed, since they might be turned off at that
  865. * time
  866. */
  867. mode = of_get_phy_mode(port);
  868. if (mode < 0) {
  869. ret = of_property_read_string(port, "phy-mode",
  870. &phy_mode_str);
  871. if (ret < 0)
  872. continue;
  873. if (!strcasecmp(phy_mode_str, "internal"))
  874. priv->int_phy_mask |= 1 << port_num;
  875. }
  876. if (mode == PHY_INTERFACE_MODE_MOCA)
  877. priv->moca_port = port_num;
  878. }
  879. }
  880. static int bcm_sf2_mdio_register(struct dsa_switch *ds)
  881. {
  882. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  883. struct device_node *dn;
  884. static int index;
  885. int err;
  886. /* Find our integrated MDIO bus node */
  887. dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
  888. priv->master_mii_bus = of_mdio_find_bus(dn);
  889. if (!priv->master_mii_bus)
  890. return -EPROBE_DEFER;
  891. get_device(&priv->master_mii_bus->dev);
  892. priv->master_mii_dn = dn;
  893. priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
  894. if (!priv->slave_mii_bus)
  895. return -ENOMEM;
  896. priv->slave_mii_bus->priv = priv;
  897. priv->slave_mii_bus->name = "sf2 slave mii";
  898. priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read;
  899. priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write;
  900. snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
  901. index++);
  902. priv->slave_mii_bus->dev.of_node = dn;
  903. /* Include the pseudo-PHY address to divert reads towards our
  904. * workaround. This is only required for 7445D0, since 7445E0
  905. * disconnects the internal switch pseudo-PHY such that we can use the
  906. * regular SWITCH_MDIO master controller instead.
  907. *
  908. * Here we flag the pseudo PHY as needing special treatment and would
  909. * otherwise make all other PHY read/writes go to the master MDIO bus
  910. * controller that comes with this switch backed by the "mdio-unimac"
  911. * driver.
  912. */
  913. if (of_machine_is_compatible("brcm,bcm7445d0"))
  914. priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR);
  915. else
  916. priv->indir_phy_mask = 0;
  917. ds->phys_mii_mask = priv->indir_phy_mask;
  918. ds->slave_mii_bus = priv->slave_mii_bus;
  919. priv->slave_mii_bus->parent = ds->dev->parent;
  920. priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
  921. if (dn)
  922. err = of_mdiobus_register(priv->slave_mii_bus, dn);
  923. else
  924. err = mdiobus_register(priv->slave_mii_bus);
  925. if (err)
  926. of_node_put(dn);
  927. return err;
  928. }
  929. static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
  930. {
  931. mdiobus_unregister(priv->slave_mii_bus);
  932. if (priv->master_mii_dn)
  933. of_node_put(priv->master_mii_dn);
  934. }
  935. static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr)
  936. {
  937. return 0;
  938. }
  939. static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
  940. {
  941. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  942. /* The BCM7xxx PHY driver expects to find the integrated PHY revision
  943. * in bits 15:8 and the patch level in bits 7:0 which is exactly what
  944. * the REG_PHY_REVISION register layout is.
  945. */
  946. return priv->hw_params.gphy_rev;
  947. }
  948. static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
  949. struct phy_device *phydev)
  950. {
  951. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  952. u32 id_mode_dis = 0, port_mode;
  953. const char *str = NULL;
  954. u32 reg;
  955. switch (phydev->interface) {
  956. case PHY_INTERFACE_MODE_RGMII:
  957. str = "RGMII (no delay)";
  958. id_mode_dis = 1;
  959. case PHY_INTERFACE_MODE_RGMII_TXID:
  960. if (!str)
  961. str = "RGMII (TX delay)";
  962. port_mode = EXT_GPHY;
  963. break;
  964. case PHY_INTERFACE_MODE_MII:
  965. str = "MII";
  966. port_mode = EXT_EPHY;
  967. break;
  968. case PHY_INTERFACE_MODE_REVMII:
  969. str = "Reverse MII";
  970. port_mode = EXT_REVMII;
  971. break;
  972. default:
  973. /* All other PHYs: internal and MoCA */
  974. goto force_link;
  975. }
  976. /* If the link is down, just disable the interface to conserve power */
  977. if (!phydev->link) {
  978. reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
  979. reg &= ~RGMII_MODE_EN;
  980. reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
  981. goto force_link;
  982. }
  983. /* Clear id_mode_dis bit, and the existing port mode, but
  984. * make sure we enable the RGMII block for data to pass
  985. */
  986. reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
  987. reg &= ~ID_MODE_DIS;
  988. reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
  989. reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
  990. reg |= port_mode | RGMII_MODE_EN;
  991. if (id_mode_dis)
  992. reg |= ID_MODE_DIS;
  993. if (phydev->pause) {
  994. if (phydev->asym_pause)
  995. reg |= TX_PAUSE_EN;
  996. reg |= RX_PAUSE_EN;
  997. }
  998. reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
  999. pr_info("Port %d configured for %s\n", port, str);
  1000. force_link:
  1001. /* Force link settings detected from the PHY */
  1002. reg = SW_OVERRIDE;
  1003. switch (phydev->speed) {
  1004. case SPEED_1000:
  1005. reg |= SPDSTS_1000 << SPEED_SHIFT;
  1006. break;
  1007. case SPEED_100:
  1008. reg |= SPDSTS_100 << SPEED_SHIFT;
  1009. break;
  1010. }
  1011. if (phydev->link)
  1012. reg |= LINK_STS;
  1013. if (phydev->duplex == DUPLEX_FULL)
  1014. reg |= DUPLX_MODE;
  1015. core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
  1016. }
  1017. static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
  1018. struct fixed_phy_status *status)
  1019. {
  1020. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1021. u32 duplex, pause;
  1022. u32 reg;
  1023. duplex = core_readl(priv, CORE_DUPSTS);
  1024. pause = core_readl(priv, CORE_PAUSESTS);
  1025. status->link = 0;
  1026. /* MoCA port is special as we do not get link status from CORE_LNKSTS,
  1027. * which means that we need to force the link at the port override
  1028. * level to get the data to flow. We do use what the interrupt handler
  1029. * did determine before.
  1030. *
  1031. * For the other ports, we just force the link status, since this is
  1032. * a fixed PHY device.
  1033. */
  1034. if (port == priv->moca_port) {
  1035. status->link = priv->port_sts[port].link;
  1036. /* For MoCA interfaces, also force a link down notification
  1037. * since some version of the user-space daemon (mocad) use
  1038. * cmd->autoneg to force the link, which messes up the PHY
  1039. * state machine and make it go in PHY_FORCING state instead.
  1040. */
  1041. if (!status->link)
  1042. netif_carrier_off(ds->ports[port].netdev);
  1043. status->duplex = 1;
  1044. } else {
  1045. status->link = 1;
  1046. status->duplex = !!(duplex & (1 << port));
  1047. }
  1048. reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port));
  1049. reg |= SW_OVERRIDE;
  1050. if (status->link)
  1051. reg |= LINK_STS;
  1052. else
  1053. reg &= ~LINK_STS;
  1054. core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
  1055. if ((pause & (1 << port)) &&
  1056. (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
  1057. status->asym_pause = 1;
  1058. status->pause = 1;
  1059. }
  1060. if (pause & (1 << port))
  1061. status->pause = 1;
  1062. }
  1063. static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
  1064. {
  1065. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1066. unsigned int port;
  1067. bcm_sf2_intr_disable(priv);
  1068. /* Disable all ports physically present including the IMP
  1069. * port, the other ones have already been disabled during
  1070. * bcm_sf2_sw_setup
  1071. */
  1072. for (port = 0; port < DSA_MAX_PORTS; port++) {
  1073. if ((1 << port) & ds->enabled_port_mask ||
  1074. dsa_is_cpu_port(ds, port))
  1075. bcm_sf2_port_disable(ds, port, NULL);
  1076. }
  1077. return 0;
  1078. }
  1079. static int bcm_sf2_sw_resume(struct dsa_switch *ds)
  1080. {
  1081. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1082. unsigned int port;
  1083. int ret;
  1084. ret = bcm_sf2_sw_rst(priv);
  1085. if (ret) {
  1086. pr_err("%s: failed to software reset switch\n", __func__);
  1087. return ret;
  1088. }
  1089. if (priv->hw_params.num_gphy == 1)
  1090. bcm_sf2_gphy_enable_set(ds, true);
  1091. for (port = 0; port < DSA_MAX_PORTS; port++) {
  1092. if ((1 << port) & ds->enabled_port_mask)
  1093. bcm_sf2_port_setup(ds, port, NULL);
  1094. else if (dsa_is_cpu_port(ds, port))
  1095. bcm_sf2_imp_setup(ds, port);
  1096. }
  1097. return 0;
  1098. }
  1099. static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
  1100. struct ethtool_wolinfo *wol)
  1101. {
  1102. struct net_device *p = ds->dst[ds->index].master_netdev;
  1103. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1104. struct ethtool_wolinfo pwol;
  1105. /* Get the parent device WoL settings */
  1106. p->ethtool_ops->get_wol(p, &pwol);
  1107. /* Advertise the parent device supported settings */
  1108. wol->supported = pwol.supported;
  1109. memset(&wol->sopass, 0, sizeof(wol->sopass));
  1110. if (pwol.wolopts & WAKE_MAGICSECURE)
  1111. memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));
  1112. if (priv->wol_ports_mask & (1 << port))
  1113. wol->wolopts = pwol.wolopts;
  1114. else
  1115. wol->wolopts = 0;
  1116. }
  1117. static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
  1118. struct ethtool_wolinfo *wol)
  1119. {
  1120. struct net_device *p = ds->dst[ds->index].master_netdev;
  1121. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1122. s8 cpu_port = ds->dst[ds->index].cpu_port;
  1123. struct ethtool_wolinfo pwol;
  1124. p->ethtool_ops->get_wol(p, &pwol);
  1125. if (wol->wolopts & ~pwol.supported)
  1126. return -EINVAL;
  1127. if (wol->wolopts)
  1128. priv->wol_ports_mask |= (1 << port);
  1129. else
  1130. priv->wol_ports_mask &= ~(1 << port);
  1131. /* If we have at least one port enabled, make sure the CPU port
  1132. * is also enabled. If the CPU port is the last one enabled, we disable
  1133. * it since this configuration does not make sense.
  1134. */
  1135. if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
  1136. priv->wol_ports_mask |= (1 << cpu_port);
  1137. else
  1138. priv->wol_ports_mask &= ~(1 << cpu_port);
  1139. return p->ethtool_ops->set_wol(p, wol);
  1140. }
  1141. static void bcm_sf2_enable_vlan(struct bcm_sf2_priv *priv, bool enable)
  1142. {
  1143. u32 mgmt, vc0, vc1, vc4, vc5;
  1144. mgmt = core_readl(priv, CORE_SWMODE);
  1145. vc0 = core_readl(priv, CORE_VLAN_CTRL0);
  1146. vc1 = core_readl(priv, CORE_VLAN_CTRL1);
  1147. vc4 = core_readl(priv, CORE_VLAN_CTRL4);
  1148. vc5 = core_readl(priv, CORE_VLAN_CTRL5);
  1149. mgmt &= ~SW_FWDG_MODE;
  1150. if (enable) {
  1151. vc0 |= VLAN_EN | VLAN_LEARN_MODE_IVL;
  1152. vc1 |= EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP;
  1153. vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT);
  1154. vc4 |= INGR_VID_CHK_DROP;
  1155. vc5 |= DROP_VTABLE_MISS | EN_VID_FFF_FWD;
  1156. } else {
  1157. vc0 &= ~(VLAN_EN | VLAN_LEARN_MODE_IVL);
  1158. vc1 &= ~(EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP);
  1159. vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT);
  1160. vc5 &= ~(DROP_VTABLE_MISS | EN_VID_FFF_FWD);
  1161. vc4 |= INGR_VID_CHK_VID_VIOL_IMP;
  1162. }
  1163. core_writel(priv, vc0, CORE_VLAN_CTRL0);
  1164. core_writel(priv, vc1, CORE_VLAN_CTRL1);
  1165. core_writel(priv, 0, CORE_VLAN_CTRL3);
  1166. core_writel(priv, vc4, CORE_VLAN_CTRL4);
  1167. core_writel(priv, vc5, CORE_VLAN_CTRL5);
  1168. core_writel(priv, mgmt, CORE_SWMODE);
  1169. }
  1170. static void bcm_sf2_sw_configure_vlan(struct dsa_switch *ds)
  1171. {
  1172. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1173. unsigned int port;
  1174. /* Clear all VLANs */
  1175. bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_CLEAR);
  1176. for (port = 0; port < priv->hw_params.num_ports; port++) {
  1177. if (!((1 << port) & ds->enabled_port_mask))
  1178. continue;
  1179. core_writel(priv, 1, CORE_DEFAULT_1Q_TAG_P(port));
  1180. }
  1181. }
  1182. static int bcm_sf2_sw_vlan_filtering(struct dsa_switch *ds, int port,
  1183. bool vlan_filtering)
  1184. {
  1185. return 0;
  1186. }
  1187. static int bcm_sf2_sw_vlan_prepare(struct dsa_switch *ds, int port,
  1188. const struct switchdev_obj_port_vlan *vlan,
  1189. struct switchdev_trans *trans)
  1190. {
  1191. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1192. bcm_sf2_enable_vlan(priv, true);
  1193. return 0;
  1194. }
  1195. static void bcm_sf2_sw_vlan_add(struct dsa_switch *ds, int port,
  1196. const struct switchdev_obj_port_vlan *vlan,
  1197. struct switchdev_trans *trans)
  1198. {
  1199. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1200. bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
  1201. bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
  1202. s8 cpu_port = ds->dst->cpu_port;
  1203. struct bcm_sf2_vlan *vl;
  1204. u16 vid;
  1205. for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
  1206. vl = &priv->vlans[vid];
  1207. bcm_sf2_get_vlan_entry(priv, vid, vl);
  1208. vl->members |= BIT(port) | BIT(cpu_port);
  1209. if (untagged)
  1210. vl->untag |= BIT(port) | BIT(cpu_port);
  1211. else
  1212. vl->untag &= ~(BIT(port) | BIT(cpu_port));
  1213. bcm_sf2_set_vlan_entry(priv, vid, vl);
  1214. bcm_sf2_sw_fast_age_vlan(priv, vid);
  1215. }
  1216. if (pvid) {
  1217. core_writel(priv, vlan->vid_end, CORE_DEFAULT_1Q_TAG_P(port));
  1218. core_writel(priv, vlan->vid_end,
  1219. CORE_DEFAULT_1Q_TAG_P(cpu_port));
  1220. bcm_sf2_sw_fast_age_vlan(priv, vid);
  1221. }
  1222. }
  1223. static int bcm_sf2_sw_vlan_del(struct dsa_switch *ds, int port,
  1224. const struct switchdev_obj_port_vlan *vlan)
  1225. {
  1226. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1227. bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
  1228. s8 cpu_port = ds->dst->cpu_port;
  1229. struct bcm_sf2_vlan *vl;
  1230. u16 vid, pvid;
  1231. int ret;
  1232. pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port));
  1233. for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
  1234. vl = &priv->vlans[vid];
  1235. ret = bcm_sf2_get_vlan_entry(priv, vid, vl);
  1236. if (ret)
  1237. return ret;
  1238. vl->members &= ~BIT(port);
  1239. if ((vl->members & BIT(cpu_port)) == BIT(cpu_port))
  1240. vl->members = 0;
  1241. if (pvid == vid)
  1242. pvid = 0;
  1243. if (untagged) {
  1244. vl->untag &= ~BIT(port);
  1245. if ((vl->untag & BIT(port)) == BIT(cpu_port))
  1246. vl->untag = 0;
  1247. }
  1248. bcm_sf2_set_vlan_entry(priv, vid, vl);
  1249. bcm_sf2_sw_fast_age_vlan(priv, vid);
  1250. }
  1251. core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(port));
  1252. core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(cpu_port));
  1253. bcm_sf2_sw_fast_age_vlan(priv, vid);
  1254. return 0;
  1255. }
  1256. static int bcm_sf2_sw_vlan_dump(struct dsa_switch *ds, int port,
  1257. struct switchdev_obj_port_vlan *vlan,
  1258. int (*cb)(struct switchdev_obj *obj))
  1259. {
  1260. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1261. struct bcm_sf2_port_status *p = &priv->port_sts[port];
  1262. struct bcm_sf2_vlan *vl;
  1263. u16 vid, pvid;
  1264. int err = 0;
  1265. pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port));
  1266. for (vid = 0; vid < VLAN_N_VID; vid++) {
  1267. vl = &priv->vlans[vid];
  1268. if (!(vl->members & BIT(port)))
  1269. continue;
  1270. vlan->vid_begin = vlan->vid_end = vid;
  1271. vlan->flags = 0;
  1272. if (vl->untag & BIT(port))
  1273. vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  1274. if (p->pvid == vid)
  1275. vlan->flags |= BRIDGE_VLAN_INFO_PVID;
  1276. err = cb(&vlan->obj);
  1277. if (err)
  1278. break;
  1279. }
  1280. return err;
  1281. }
  1282. static int bcm_sf2_sw_setup(struct dsa_switch *ds)
  1283. {
  1284. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1285. unsigned int port;
  1286. /* Enable all valid ports and disable those unused */
  1287. for (port = 0; port < priv->hw_params.num_ports; port++) {
  1288. /* IMP port receives special treatment */
  1289. if ((1 << port) & ds->enabled_port_mask)
  1290. bcm_sf2_port_setup(ds, port, NULL);
  1291. else if (dsa_is_cpu_port(ds, port))
  1292. bcm_sf2_imp_setup(ds, port);
  1293. else
  1294. bcm_sf2_port_disable(ds, port, NULL);
  1295. }
  1296. bcm_sf2_sw_configure_vlan(ds);
  1297. return 0;
  1298. }
  1299. static struct dsa_switch_ops bcm_sf2_switch_ops = {
  1300. .setup = bcm_sf2_sw_setup,
  1301. .get_tag_protocol = bcm_sf2_sw_get_tag_protocol,
  1302. .set_addr = bcm_sf2_sw_set_addr,
  1303. .get_phy_flags = bcm_sf2_sw_get_phy_flags,
  1304. .get_strings = bcm_sf2_sw_get_strings,
  1305. .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats,
  1306. .get_sset_count = bcm_sf2_sw_get_sset_count,
  1307. .adjust_link = bcm_sf2_sw_adjust_link,
  1308. .fixed_link_update = bcm_sf2_sw_fixed_link_update,
  1309. .suspend = bcm_sf2_sw_suspend,
  1310. .resume = bcm_sf2_sw_resume,
  1311. .get_wol = bcm_sf2_sw_get_wol,
  1312. .set_wol = bcm_sf2_sw_set_wol,
  1313. .port_enable = bcm_sf2_port_setup,
  1314. .port_disable = bcm_sf2_port_disable,
  1315. .get_eee = bcm_sf2_sw_get_eee,
  1316. .set_eee = bcm_sf2_sw_set_eee,
  1317. .port_bridge_join = bcm_sf2_sw_br_join,
  1318. .port_bridge_leave = bcm_sf2_sw_br_leave,
  1319. .port_stp_state_set = bcm_sf2_sw_br_set_stp_state,
  1320. .port_fdb_prepare = bcm_sf2_sw_fdb_prepare,
  1321. .port_fdb_add = bcm_sf2_sw_fdb_add,
  1322. .port_fdb_del = bcm_sf2_sw_fdb_del,
  1323. .port_fdb_dump = bcm_sf2_sw_fdb_dump,
  1324. .port_vlan_filtering = bcm_sf2_sw_vlan_filtering,
  1325. .port_vlan_prepare = bcm_sf2_sw_vlan_prepare,
  1326. .port_vlan_add = bcm_sf2_sw_vlan_add,
  1327. .port_vlan_del = bcm_sf2_sw_vlan_del,
  1328. .port_vlan_dump = bcm_sf2_sw_vlan_dump,
  1329. };
  1330. static int bcm_sf2_sw_probe(struct platform_device *pdev)
  1331. {
  1332. const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
  1333. struct device_node *dn = pdev->dev.of_node;
  1334. struct bcm_sf2_priv *priv;
  1335. struct dsa_switch *ds;
  1336. void __iomem **base;
  1337. struct resource *r;
  1338. unsigned int i;
  1339. u32 reg, rev;
  1340. int ret;
  1341. ds = devm_kzalloc(&pdev->dev, sizeof(*ds) + sizeof(*priv), GFP_KERNEL);
  1342. if (!ds)
  1343. return -ENOMEM;
  1344. priv = (struct bcm_sf2_priv *)(ds + 1);
  1345. ds->priv = priv;
  1346. ds->dev = &pdev->dev;
  1347. ds->ops = &bcm_sf2_switch_ops;
  1348. dev_set_drvdata(&pdev->dev, ds);
  1349. spin_lock_init(&priv->indir_lock);
  1350. mutex_init(&priv->stats_mutex);
  1351. bcm_sf2_identify_ports(priv, dn->child);
  1352. priv->irq0 = irq_of_parse_and_map(dn, 0);
  1353. priv->irq1 = irq_of_parse_and_map(dn, 1);
  1354. base = &priv->core;
  1355. for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
  1356. r = platform_get_resource(pdev, IORESOURCE_MEM, i);
  1357. *base = devm_ioremap_resource(&pdev->dev, r);
  1358. if (IS_ERR(*base)) {
  1359. pr_err("unable to find register: %s\n", reg_names[i]);
  1360. return PTR_ERR(*base);
  1361. }
  1362. base++;
  1363. }
  1364. ret = bcm_sf2_sw_rst(priv);
  1365. if (ret) {
  1366. pr_err("unable to software reset switch: %d\n", ret);
  1367. return ret;
  1368. }
  1369. ret = bcm_sf2_mdio_register(ds);
  1370. if (ret) {
  1371. pr_err("failed to register MDIO bus\n");
  1372. return ret;
  1373. }
  1374. /* Disable all interrupts and request them */
  1375. bcm_sf2_intr_disable(priv);
  1376. ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0,
  1377. "switch_0", priv);
  1378. if (ret < 0) {
  1379. pr_err("failed to request switch_0 IRQ\n");
  1380. goto out_mdio;
  1381. }
  1382. ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0,
  1383. "switch_1", priv);
  1384. if (ret < 0) {
  1385. pr_err("failed to request switch_1 IRQ\n");
  1386. goto out_mdio;
  1387. }
  1388. /* Reset the MIB counters */
  1389. reg = core_readl(priv, CORE_GMNCFGCFG);
  1390. reg |= RST_MIB_CNT;
  1391. core_writel(priv, reg, CORE_GMNCFGCFG);
  1392. reg &= ~RST_MIB_CNT;
  1393. core_writel(priv, reg, CORE_GMNCFGCFG);
  1394. /* Get the maximum number of ports for this switch */
  1395. priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
  1396. if (priv->hw_params.num_ports > DSA_MAX_PORTS)
  1397. priv->hw_params.num_ports = DSA_MAX_PORTS;
  1398. /* Assume a single GPHY setup if we can't read that property */
  1399. if (of_property_read_u32(dn, "brcm,num-gphy",
  1400. &priv->hw_params.num_gphy))
  1401. priv->hw_params.num_gphy = 1;
  1402. rev = reg_readl(priv, REG_SWITCH_REVISION);
  1403. priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
  1404. SWITCH_TOP_REV_MASK;
  1405. priv->hw_params.core_rev = (rev & SF2_REV_MASK);
  1406. rev = reg_readl(priv, REG_PHY_REVISION);
  1407. priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
  1408. ret = dsa_register_switch(ds, dn);
  1409. if (ret)
  1410. goto out_mdio;
  1411. pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
  1412. priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
  1413. priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
  1414. priv->core, priv->irq0, priv->irq1);
  1415. return 0;
  1416. out_mdio:
  1417. bcm_sf2_mdio_unregister(priv);
  1418. return ret;
  1419. }
  1420. static int bcm_sf2_sw_remove(struct platform_device *pdev)
  1421. {
  1422. struct dsa_switch *ds = platform_get_drvdata(pdev);
  1423. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1424. /* Disable all ports and interrupts */
  1425. priv->wol_ports_mask = 0;
  1426. bcm_sf2_sw_suspend(ds);
  1427. dsa_unregister_switch(ds);
  1428. bcm_sf2_mdio_unregister(priv);
  1429. return 0;
  1430. }
  1431. #ifdef CONFIG_PM_SLEEP
  1432. static int bcm_sf2_suspend(struct device *dev)
  1433. {
  1434. struct platform_device *pdev = to_platform_device(dev);
  1435. struct dsa_switch *ds = platform_get_drvdata(pdev);
  1436. return dsa_switch_suspend(ds);
  1437. }
  1438. static int bcm_sf2_resume(struct device *dev)
  1439. {
  1440. struct platform_device *pdev = to_platform_device(dev);
  1441. struct dsa_switch *ds = platform_get_drvdata(pdev);
  1442. return dsa_switch_resume(ds);
  1443. }
  1444. #endif /* CONFIG_PM_SLEEP */
  1445. static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops,
  1446. bcm_sf2_suspend, bcm_sf2_resume);
  1447. static const struct of_device_id bcm_sf2_of_match[] = {
  1448. { .compatible = "brcm,bcm7445-switch-v4.0" },
  1449. { /* sentinel */ },
  1450. };
  1451. static struct platform_driver bcm_sf2_driver = {
  1452. .probe = bcm_sf2_sw_probe,
  1453. .remove = bcm_sf2_sw_remove,
  1454. .driver = {
  1455. .name = "brcm-sf2",
  1456. .of_match_table = bcm_sf2_of_match,
  1457. .pm = &bcm_sf2_pm_ops,
  1458. },
  1459. };
  1460. module_platform_driver(bcm_sf2_driver);
  1461. MODULE_AUTHOR("Broadcom Corporation");
  1462. MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
  1463. MODULE_LICENSE("GPL");
  1464. MODULE_ALIAS("platform:brcm-sf2");