bcmsysport.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633
  1. /*
  2. * Broadcom BCM7xxx System Port Ethernet MAC driver
  3. *
  4. * Copyright (C) 2014 Broadcom Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <linux/init.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/module.h>
  14. #include <linux/kernel.h>
  15. #include <linux/netdevice.h>
  16. #include <linux/etherdevice.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/of.h>
  19. #include <linux/of_net.h>
  20. #include <linux/of_mdio.h>
  21. #include <linux/phy.h>
  22. #include <linux/phy_fixed.h>
  23. #include <net/ip.h>
  24. #include <net/ipv6.h>
  25. #include "bcmsysport.h"
  26. /* I/O accessors register helpers */
  27. #define BCM_SYSPORT_IO_MACRO(name, offset) \
  28. static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
  29. { \
  30. u32 reg = __raw_readl(priv->base + offset + off); \
  31. return reg; \
  32. } \
  33. static inline void name##_writel(struct bcm_sysport_priv *priv, \
  34. u32 val, u32 off) \
  35. { \
  36. __raw_writel(val, priv->base + offset + off); \
  37. } \
  38. BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
  39. BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
  40. BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
  41. BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
  42. BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET);
  43. BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
  44. BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
  45. BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
  46. BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
  47. BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
  48. /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
  49. * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
  50. */
  51. #define BCM_SYSPORT_INTR_L2(which) \
  52. static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
  53. u32 mask) \
  54. { \
  55. intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
  56. priv->irq##which##_mask &= ~(mask); \
  57. } \
  58. static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
  59. u32 mask) \
  60. { \
  61. intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
  62. priv->irq##which##_mask |= (mask); \
  63. } \
  64. BCM_SYSPORT_INTR_L2(0)
  65. BCM_SYSPORT_INTR_L2(1)
  66. /* Register accesses to GISB/RBUS registers are expensive (few hundred
  67. * nanoseconds), so keep the check for 64-bits explicit here to save
  68. * one register write per-packet on 32-bits platforms.
  69. */
  70. static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
  71. void __iomem *d,
  72. dma_addr_t addr)
  73. {
  74. #ifdef CONFIG_PHYS_ADDR_T_64BIT
  75. __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
  76. d + DESC_ADDR_HI_STATUS_LEN);
  77. #endif
  78. __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO);
  79. }
  80. static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
  81. struct dma_desc *desc,
  82. unsigned int port)
  83. {
  84. /* Ports are latched, so write upper address first */
  85. tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
  86. tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
  87. }
  88. /* Ethtool operations */
  89. static int bcm_sysport_set_settings(struct net_device *dev,
  90. struct ethtool_cmd *cmd)
  91. {
  92. struct bcm_sysport_priv *priv = netdev_priv(dev);
  93. if (!netif_running(dev))
  94. return -EINVAL;
  95. return phy_ethtool_sset(priv->phydev, cmd);
  96. }
  97. static int bcm_sysport_get_settings(struct net_device *dev,
  98. struct ethtool_cmd *cmd)
  99. {
  100. struct bcm_sysport_priv *priv = netdev_priv(dev);
  101. if (!netif_running(dev))
  102. return -EINVAL;
  103. return phy_ethtool_gset(priv->phydev, cmd);
  104. }
  105. static int bcm_sysport_set_rx_csum(struct net_device *dev,
  106. netdev_features_t wanted)
  107. {
  108. struct bcm_sysport_priv *priv = netdev_priv(dev);
  109. u32 reg;
  110. priv->rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
  111. reg = rxchk_readl(priv, RXCHK_CONTROL);
  112. if (priv->rx_csum_en)
  113. reg |= RXCHK_EN;
  114. else
  115. reg &= ~RXCHK_EN;
  116. /* If UniMAC forwards CRC, we need to skip over it to get
  117. * a valid CHK bit to be set in the per-packet status word
  118. */
  119. if (priv->rx_csum_en && priv->crc_fwd)
  120. reg |= RXCHK_SKIP_FCS;
  121. else
  122. reg &= ~RXCHK_SKIP_FCS;
  123. rxchk_writel(priv, reg, RXCHK_CONTROL);
  124. return 0;
  125. }
  126. static int bcm_sysport_set_tx_csum(struct net_device *dev,
  127. netdev_features_t wanted)
  128. {
  129. struct bcm_sysport_priv *priv = netdev_priv(dev);
  130. u32 reg;
  131. /* Hardware transmit checksum requires us to enable the Transmit status
  132. * block prepended to the packet contents
  133. */
  134. priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
  135. reg = tdma_readl(priv, TDMA_CONTROL);
  136. if (priv->tsb_en)
  137. reg |= TSB_EN;
  138. else
  139. reg &= ~TSB_EN;
  140. tdma_writel(priv, reg, TDMA_CONTROL);
  141. return 0;
  142. }
  143. static int bcm_sysport_set_features(struct net_device *dev,
  144. netdev_features_t features)
  145. {
  146. netdev_features_t changed = features ^ dev->features;
  147. netdev_features_t wanted = dev->wanted_features;
  148. int ret = 0;
  149. if (changed & NETIF_F_RXCSUM)
  150. ret = bcm_sysport_set_rx_csum(dev, wanted);
  151. if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
  152. ret = bcm_sysport_set_tx_csum(dev, wanted);
  153. return ret;
  154. }
  155. /* Hardware counters must be kept in sync because the order/offset
  156. * is important here (order in structure declaration = order in hardware)
  157. */
  158. static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
  159. /* general stats */
  160. STAT_NETDEV(rx_packets),
  161. STAT_NETDEV(tx_packets),
  162. STAT_NETDEV(rx_bytes),
  163. STAT_NETDEV(tx_bytes),
  164. STAT_NETDEV(rx_errors),
  165. STAT_NETDEV(tx_errors),
  166. STAT_NETDEV(rx_dropped),
  167. STAT_NETDEV(tx_dropped),
  168. STAT_NETDEV(multicast),
  169. /* UniMAC RSV counters */
  170. STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
  171. STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
  172. STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
  173. STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
  174. STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
  175. STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
  176. STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
  177. STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
  178. STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
  179. STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
  180. STAT_MIB_RX("rx_pkts", mib.rx.pkt),
  181. STAT_MIB_RX("rx_bytes", mib.rx.bytes),
  182. STAT_MIB_RX("rx_multicast", mib.rx.mca),
  183. STAT_MIB_RX("rx_broadcast", mib.rx.bca),
  184. STAT_MIB_RX("rx_fcs", mib.rx.fcs),
  185. STAT_MIB_RX("rx_control", mib.rx.cf),
  186. STAT_MIB_RX("rx_pause", mib.rx.pf),
  187. STAT_MIB_RX("rx_unknown", mib.rx.uo),
  188. STAT_MIB_RX("rx_align", mib.rx.aln),
  189. STAT_MIB_RX("rx_outrange", mib.rx.flr),
  190. STAT_MIB_RX("rx_code", mib.rx.cde),
  191. STAT_MIB_RX("rx_carrier", mib.rx.fcr),
  192. STAT_MIB_RX("rx_oversize", mib.rx.ovr),
  193. STAT_MIB_RX("rx_jabber", mib.rx.jbr),
  194. STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
  195. STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
  196. STAT_MIB_RX("rx_unicast", mib.rx.uc),
  197. STAT_MIB_RX("rx_ppp", mib.rx.ppp),
  198. STAT_MIB_RX("rx_crc", mib.rx.rcrc),
  199. /* UniMAC TSV counters */
  200. STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
  201. STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
  202. STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
  203. STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
  204. STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
  205. STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
  206. STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
  207. STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
  208. STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
  209. STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
  210. STAT_MIB_TX("tx_pkts", mib.tx.pkts),
  211. STAT_MIB_TX("tx_multicast", mib.tx.mca),
  212. STAT_MIB_TX("tx_broadcast", mib.tx.bca),
  213. STAT_MIB_TX("tx_pause", mib.tx.pf),
  214. STAT_MIB_TX("tx_control", mib.tx.cf),
  215. STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
  216. STAT_MIB_TX("tx_oversize", mib.tx.ovr),
  217. STAT_MIB_TX("tx_defer", mib.tx.drf),
  218. STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
  219. STAT_MIB_TX("tx_single_col", mib.tx.scl),
  220. STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
  221. STAT_MIB_TX("tx_late_col", mib.tx.lcl),
  222. STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
  223. STAT_MIB_TX("tx_frags", mib.tx.frg),
  224. STAT_MIB_TX("tx_total_col", mib.tx.ncl),
  225. STAT_MIB_TX("tx_jabber", mib.tx.jbr),
  226. STAT_MIB_TX("tx_bytes", mib.tx.bytes),
  227. STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
  228. STAT_MIB_TX("tx_unicast", mib.tx.uc),
  229. /* UniMAC RUNT counters */
  230. STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
  231. STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
  232. STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
  233. STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
  234. /* RXCHK misc statistics */
  235. STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
  236. STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
  237. RXCHK_OTHER_DISC_CNTR),
  238. /* RBUF misc statistics */
  239. STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
  240. STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
  241. };
  242. #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
  243. static void bcm_sysport_get_drvinfo(struct net_device *dev,
  244. struct ethtool_drvinfo *info)
  245. {
  246. strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
  247. strlcpy(info->version, "0.1", sizeof(info->version));
  248. strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
  249. info->n_stats = BCM_SYSPORT_STATS_LEN;
  250. }
  251. static u32 bcm_sysport_get_msglvl(struct net_device *dev)
  252. {
  253. struct bcm_sysport_priv *priv = netdev_priv(dev);
  254. return priv->msg_enable;
  255. }
  256. static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
  257. {
  258. struct bcm_sysport_priv *priv = netdev_priv(dev);
  259. priv->msg_enable = enable;
  260. }
  261. static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
  262. {
  263. switch (string_set) {
  264. case ETH_SS_STATS:
  265. return BCM_SYSPORT_STATS_LEN;
  266. default:
  267. return -EOPNOTSUPP;
  268. }
  269. }
  270. static void bcm_sysport_get_strings(struct net_device *dev,
  271. u32 stringset, u8 *data)
  272. {
  273. int i;
  274. switch (stringset) {
  275. case ETH_SS_STATS:
  276. for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
  277. memcpy(data + i * ETH_GSTRING_LEN,
  278. bcm_sysport_gstrings_stats[i].stat_string,
  279. ETH_GSTRING_LEN);
  280. }
  281. break;
  282. default:
  283. break;
  284. }
  285. }
  286. static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
  287. {
  288. int i, j = 0;
  289. for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
  290. const struct bcm_sysport_stats *s;
  291. u8 offset = 0;
  292. u32 val = 0;
  293. char *p;
  294. s = &bcm_sysport_gstrings_stats[i];
  295. switch (s->type) {
  296. case BCM_SYSPORT_STAT_NETDEV:
  297. continue;
  298. case BCM_SYSPORT_STAT_MIB_RX:
  299. case BCM_SYSPORT_STAT_MIB_TX:
  300. case BCM_SYSPORT_STAT_RUNT:
  301. if (s->type != BCM_SYSPORT_STAT_MIB_RX)
  302. offset = UMAC_MIB_STAT_OFFSET;
  303. val = umac_readl(priv, UMAC_MIB_START + j + offset);
  304. break;
  305. case BCM_SYSPORT_STAT_RXCHK:
  306. val = rxchk_readl(priv, s->reg_offset);
  307. if (val == ~0)
  308. rxchk_writel(priv, 0, s->reg_offset);
  309. break;
  310. case BCM_SYSPORT_STAT_RBUF:
  311. val = rbuf_readl(priv, s->reg_offset);
  312. if (val == ~0)
  313. rbuf_writel(priv, 0, s->reg_offset);
  314. break;
  315. }
  316. j += s->stat_sizeof;
  317. p = (char *)priv + s->stat_offset;
  318. *(u32 *)p = val;
  319. }
  320. netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
  321. }
  322. static void bcm_sysport_get_stats(struct net_device *dev,
  323. struct ethtool_stats *stats, u64 *data)
  324. {
  325. struct bcm_sysport_priv *priv = netdev_priv(dev);
  326. int i;
  327. if (netif_running(dev))
  328. bcm_sysport_update_mib_counters(priv);
  329. for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
  330. const struct bcm_sysport_stats *s;
  331. char *p;
  332. s = &bcm_sysport_gstrings_stats[i];
  333. if (s->type == BCM_SYSPORT_STAT_NETDEV)
  334. p = (char *)&dev->stats;
  335. else
  336. p = (char *)priv;
  337. p += s->stat_offset;
  338. data[i] = *(u32 *)p;
  339. }
  340. }
  341. static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
  342. {
  343. dev_kfree_skb_any(cb->skb);
  344. cb->skb = NULL;
  345. dma_unmap_addr_set(cb, dma_addr, 0);
  346. }
  347. static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
  348. struct bcm_sysport_cb *cb)
  349. {
  350. struct device *kdev = &priv->pdev->dev;
  351. struct net_device *ndev = priv->netdev;
  352. dma_addr_t mapping;
  353. int ret;
  354. cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
  355. if (!cb->skb) {
  356. netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
  357. return -ENOMEM;
  358. }
  359. mapping = dma_map_single(kdev, cb->skb->data,
  360. RX_BUF_LENGTH, DMA_FROM_DEVICE);
  361. ret = dma_mapping_error(kdev, mapping);
  362. if (ret) {
  363. bcm_sysport_free_cb(cb);
  364. netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
  365. return ret;
  366. }
  367. dma_unmap_addr_set(cb, dma_addr, mapping);
  368. dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
  369. priv->rx_bd_assign_index++;
  370. priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
  371. priv->rx_bd_assign_ptr = priv->rx_bds +
  372. (priv->rx_bd_assign_index * DESC_SIZE);
  373. netif_dbg(priv, rx_status, ndev, "RX refill\n");
  374. return 0;
  375. }
  376. static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
  377. {
  378. struct bcm_sysport_cb *cb;
  379. int ret = 0;
  380. unsigned int i;
  381. for (i = 0; i < priv->num_rx_bds; i++) {
  382. cb = &priv->rx_cbs[priv->rx_bd_assign_index];
  383. if (cb->skb)
  384. continue;
  385. ret = bcm_sysport_rx_refill(priv, cb);
  386. if (ret)
  387. break;
  388. }
  389. return ret;
  390. }
  391. /* Poll the hardware for up to budget packets to process */
  392. static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
  393. unsigned int budget)
  394. {
  395. struct device *kdev = &priv->pdev->dev;
  396. struct net_device *ndev = priv->netdev;
  397. unsigned int processed = 0, to_process;
  398. struct bcm_sysport_cb *cb;
  399. struct sk_buff *skb;
  400. unsigned int p_index;
  401. u16 len, status;
  402. struct bcm_rsb *rsb;
  403. /* Determine how much we should process since last call */
  404. p_index = rdma_readl(priv, RDMA_PROD_INDEX);
  405. p_index &= RDMA_PROD_INDEX_MASK;
  406. if (p_index < priv->rx_c_index)
  407. to_process = (RDMA_CONS_INDEX_MASK + 1) -
  408. priv->rx_c_index + p_index;
  409. else
  410. to_process = p_index - priv->rx_c_index;
  411. netif_dbg(priv, rx_status, ndev,
  412. "p_index=%d rx_c_index=%d to_process=%d\n",
  413. p_index, priv->rx_c_index, to_process);
  414. while ((processed < to_process) &&
  415. (processed < budget)) {
  416. cb = &priv->rx_cbs[priv->rx_read_ptr];
  417. skb = cb->skb;
  418. dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
  419. RX_BUF_LENGTH, DMA_FROM_DEVICE);
  420. /* Extract the Receive Status Block prepended */
  421. rsb = (struct bcm_rsb *)skb->data;
  422. len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
  423. status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
  424. DESC_STATUS_MASK;
  425. processed++;
  426. priv->rx_read_ptr++;
  427. if (priv->rx_read_ptr == priv->num_rx_bds)
  428. priv->rx_read_ptr = 0;
  429. netif_dbg(priv, rx_status, ndev,
  430. "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
  431. p_index, priv->rx_c_index, priv->rx_read_ptr,
  432. len, status);
  433. if (unlikely(!skb)) {
  434. netif_err(priv, rx_err, ndev, "out of memory!\n");
  435. ndev->stats.rx_dropped++;
  436. ndev->stats.rx_errors++;
  437. goto refill;
  438. }
  439. if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
  440. netif_err(priv, rx_status, ndev, "fragmented packet!\n");
  441. ndev->stats.rx_dropped++;
  442. ndev->stats.rx_errors++;
  443. bcm_sysport_free_cb(cb);
  444. goto refill;
  445. }
  446. if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
  447. netif_err(priv, rx_err, ndev, "error packet\n");
  448. if (status & RX_STATUS_OVFLOW)
  449. ndev->stats.rx_over_errors++;
  450. ndev->stats.rx_dropped++;
  451. ndev->stats.rx_errors++;
  452. bcm_sysport_free_cb(cb);
  453. goto refill;
  454. }
  455. skb_put(skb, len);
  456. /* Hardware validated our checksum */
  457. if (likely(status & DESC_L4_CSUM))
  458. skb->ip_summed = CHECKSUM_UNNECESSARY;
  459. /* Hardware pre-pends packets with 2bytes before Ethernet
  460. * header plus we have the Receive Status Block, strip off all
  461. * of this from the SKB.
  462. */
  463. skb_pull(skb, sizeof(*rsb) + 2);
  464. len -= (sizeof(*rsb) + 2);
  465. /* UniMAC may forward CRC */
  466. if (priv->crc_fwd) {
  467. skb_trim(skb, len - ETH_FCS_LEN);
  468. len -= ETH_FCS_LEN;
  469. }
  470. skb->protocol = eth_type_trans(skb, ndev);
  471. ndev->stats.rx_packets++;
  472. ndev->stats.rx_bytes += len;
  473. napi_gro_receive(&priv->napi, skb);
  474. refill:
  475. bcm_sysport_rx_refill(priv, cb);
  476. }
  477. return processed;
  478. }
  479. static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
  480. struct bcm_sysport_cb *cb,
  481. unsigned int *bytes_compl,
  482. unsigned int *pkts_compl)
  483. {
  484. struct device *kdev = &priv->pdev->dev;
  485. struct net_device *ndev = priv->netdev;
  486. if (cb->skb) {
  487. ndev->stats.tx_bytes += cb->skb->len;
  488. *bytes_compl += cb->skb->len;
  489. dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
  490. dma_unmap_len(cb, dma_len),
  491. DMA_TO_DEVICE);
  492. ndev->stats.tx_packets++;
  493. (*pkts_compl)++;
  494. bcm_sysport_free_cb(cb);
  495. /* SKB fragment */
  496. } else if (dma_unmap_addr(cb, dma_addr)) {
  497. ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);
  498. dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
  499. dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
  500. dma_unmap_addr_set(cb, dma_addr, 0);
  501. }
  502. }
  503. /* Reclaim queued SKBs for transmission completion, lockless version */
  504. static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
  505. struct bcm_sysport_tx_ring *ring)
  506. {
  507. struct net_device *ndev = priv->netdev;
  508. unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
  509. unsigned int pkts_compl = 0, bytes_compl = 0;
  510. struct bcm_sysport_cb *cb;
  511. struct netdev_queue *txq;
  512. u32 hw_ind;
  513. txq = netdev_get_tx_queue(ndev, ring->index);
  514. /* Compute how many descriptors have been processed since last call */
  515. hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
  516. c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
  517. ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
  518. last_c_index = ring->c_index;
  519. num_tx_cbs = ring->size;
  520. c_index &= (num_tx_cbs - 1);
  521. if (c_index >= last_c_index)
  522. last_tx_cn = c_index - last_c_index;
  523. else
  524. last_tx_cn = num_tx_cbs - last_c_index + c_index;
  525. netif_dbg(priv, tx_done, ndev,
  526. "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
  527. ring->index, c_index, last_tx_cn, last_c_index);
  528. while (last_tx_cn-- > 0) {
  529. cb = ring->cbs + last_c_index;
  530. bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
  531. ring->desc_count++;
  532. last_c_index++;
  533. last_c_index &= (num_tx_cbs - 1);
  534. }
  535. ring->c_index = c_index;
  536. if (netif_tx_queue_stopped(txq) && pkts_compl)
  537. netif_tx_wake_queue(txq);
  538. netif_dbg(priv, tx_done, ndev,
  539. "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
  540. ring->index, ring->c_index, pkts_compl, bytes_compl);
  541. return pkts_compl;
  542. }
  543. /* Locked version of the per-ring TX reclaim routine */
  544. static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
  545. struct bcm_sysport_tx_ring *ring)
  546. {
  547. unsigned int released;
  548. unsigned long flags;
  549. spin_lock_irqsave(&ring->lock, flags);
  550. released = __bcm_sysport_tx_reclaim(priv, ring);
  551. spin_unlock_irqrestore(&ring->lock, flags);
  552. return released;
  553. }
  554. static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
  555. {
  556. struct bcm_sysport_tx_ring *ring =
  557. container_of(napi, struct bcm_sysport_tx_ring, napi);
  558. unsigned int work_done = 0;
  559. work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
  560. if (work_done == 0) {
  561. napi_complete(napi);
  562. /* re-enable TX interrupt */
  563. intrl2_1_mask_clear(ring->priv, BIT(ring->index));
  564. }
  565. return 0;
  566. }
  567. static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
  568. {
  569. unsigned int q;
  570. for (q = 0; q < priv->netdev->num_tx_queues; q++)
  571. bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
  572. }
  573. static int bcm_sysport_poll(struct napi_struct *napi, int budget)
  574. {
  575. struct bcm_sysport_priv *priv =
  576. container_of(napi, struct bcm_sysport_priv, napi);
  577. unsigned int work_done = 0;
  578. work_done = bcm_sysport_desc_rx(priv, budget);
  579. priv->rx_c_index += work_done;
  580. priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
  581. rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
  582. if (work_done < budget) {
  583. napi_complete(napi);
  584. /* re-enable RX interrupts */
  585. intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
  586. }
  587. return work_done;
  588. }
  589. /* RX and misc interrupt routine */
  590. static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
  591. {
  592. struct net_device *dev = dev_id;
  593. struct bcm_sysport_priv *priv = netdev_priv(dev);
  594. priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
  595. ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
  596. intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
  597. if (unlikely(priv->irq0_stat == 0)) {
  598. netdev_warn(priv->netdev, "spurious RX interrupt\n");
  599. return IRQ_NONE;
  600. }
  601. if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
  602. if (likely(napi_schedule_prep(&priv->napi))) {
  603. /* disable RX interrupts */
  604. intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
  605. __napi_schedule(&priv->napi);
  606. }
  607. }
  608. /* TX ring is full, perform a full reclaim since we do not know
  609. * which one would trigger this interrupt
  610. */
  611. if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
  612. bcm_sysport_tx_reclaim_all(priv);
  613. return IRQ_HANDLED;
  614. }
  615. /* TX interrupt service routine */
  616. static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
  617. {
  618. struct net_device *dev = dev_id;
  619. struct bcm_sysport_priv *priv = netdev_priv(dev);
  620. struct bcm_sysport_tx_ring *txr;
  621. unsigned int ring;
  622. priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
  623. ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
  624. intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
  625. if (unlikely(priv->irq1_stat == 0)) {
  626. netdev_warn(priv->netdev, "spurious TX interrupt\n");
  627. return IRQ_NONE;
  628. }
  629. for (ring = 0; ring < dev->num_tx_queues; ring++) {
  630. if (!(priv->irq1_stat & BIT(ring)))
  631. continue;
  632. txr = &priv->tx_rings[ring];
  633. if (likely(napi_schedule_prep(&txr->napi))) {
  634. intrl2_1_mask_set(priv, BIT(ring));
  635. __napi_schedule(&txr->napi);
  636. }
  637. }
  638. return IRQ_HANDLED;
  639. }
  640. static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev)
  641. {
  642. struct sk_buff *nskb;
  643. struct bcm_tsb *tsb;
  644. u32 csum_info;
  645. u8 ip_proto;
  646. u16 csum_start;
  647. u16 ip_ver;
  648. /* Re-allocate SKB if needed */
  649. if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
  650. nskb = skb_realloc_headroom(skb, sizeof(*tsb));
  651. dev_kfree_skb(skb);
  652. if (!nskb) {
  653. dev->stats.tx_errors++;
  654. dev->stats.tx_dropped++;
  655. return -ENOMEM;
  656. }
  657. skb = nskb;
  658. }
  659. tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb));
  660. /* Zero-out TSB by default */
  661. memset(tsb, 0, sizeof(*tsb));
  662. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  663. ip_ver = htons(skb->protocol);
  664. switch (ip_ver) {
  665. case ETH_P_IP:
  666. ip_proto = ip_hdr(skb)->protocol;
  667. break;
  668. case ETH_P_IPV6:
  669. ip_proto = ipv6_hdr(skb)->nexthdr;
  670. break;
  671. default:
  672. return 0;
  673. }
  674. /* Get the checksum offset and the L4 (transport) offset */
  675. csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
  676. csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
  677. csum_info |= (csum_start << L4_PTR_SHIFT);
  678. if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
  679. csum_info |= L4_LENGTH_VALID;
  680. if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
  681. csum_info |= L4_UDP;
  682. } else
  683. csum_info = 0;
  684. tsb->l4_ptr_dest_map = csum_info;
  685. }
  686. return 0;
  687. }
  688. static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
  689. struct net_device *dev)
  690. {
  691. struct bcm_sysport_priv *priv = netdev_priv(dev);
  692. struct device *kdev = &priv->pdev->dev;
  693. struct bcm_sysport_tx_ring *ring;
  694. struct bcm_sysport_cb *cb;
  695. struct netdev_queue *txq;
  696. struct dma_desc *desc;
  697. unsigned int skb_len;
  698. unsigned long flags;
  699. dma_addr_t mapping;
  700. u32 len_status;
  701. u16 queue;
  702. int ret;
  703. queue = skb_get_queue_mapping(skb);
  704. txq = netdev_get_tx_queue(dev, queue);
  705. ring = &priv->tx_rings[queue];
  706. /* lock against tx reclaim in BH context and TX ring full interrupt */
  707. spin_lock_irqsave(&ring->lock, flags);
  708. if (unlikely(ring->desc_count == 0)) {
  709. netif_tx_stop_queue(txq);
  710. netdev_err(dev, "queue %d awake and ring full!\n", queue);
  711. ret = NETDEV_TX_BUSY;
  712. goto out;
  713. }
  714. /* Insert TSB and checksum infos */
  715. if (priv->tsb_en) {
  716. ret = bcm_sysport_insert_tsb(skb, dev);
  717. if (ret) {
  718. ret = NETDEV_TX_OK;
  719. goto out;
  720. }
  721. }
  722. /* The Ethernet switch we are interfaced with needs packets to be at
  723. * least 64 bytes (including FCS) otherwise they will be discarded when
  724. * they enter the switch port logic. When Broadcom tags are enabled, we
  725. * need to make sure that packets are at least 68 bytes
  726. * (including FCS and tag) because the length verification is done after
  727. * the Broadcom tag is stripped off the ingress packet.
  728. */
  729. if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
  730. ret = NETDEV_TX_OK;
  731. goto out;
  732. }
  733. skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
  734. ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
  735. mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
  736. if (dma_mapping_error(kdev, mapping)) {
  737. netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
  738. skb->data, skb_len);
  739. ret = NETDEV_TX_OK;
  740. goto out;
  741. }
  742. /* Remember the SKB for future freeing */
  743. cb = &ring->cbs[ring->curr_desc];
  744. cb->skb = skb;
  745. dma_unmap_addr_set(cb, dma_addr, mapping);
  746. dma_unmap_len_set(cb, dma_len, skb_len);
  747. /* Fetch a descriptor entry from our pool */
  748. desc = ring->desc_cpu;
  749. desc->addr_lo = lower_32_bits(mapping);
  750. len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
  751. len_status |= (skb_len << DESC_LEN_SHIFT);
  752. len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
  753. DESC_STATUS_SHIFT;
  754. if (skb->ip_summed == CHECKSUM_PARTIAL)
  755. len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
  756. ring->curr_desc++;
  757. if (ring->curr_desc == ring->size)
  758. ring->curr_desc = 0;
  759. ring->desc_count--;
  760. /* Ensure write completion of the descriptor status/length
  761. * in DRAM before the System Port WRITE_PORT register latches
  762. * the value
  763. */
  764. wmb();
  765. desc->addr_status_len = len_status;
  766. wmb();
  767. /* Write this descriptor address to the RING write port */
  768. tdma_port_write_desc_addr(priv, desc, ring->index);
  769. /* Check ring space and update SW control flow */
  770. if (ring->desc_count == 0)
  771. netif_tx_stop_queue(txq);
  772. netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
  773. ring->index, ring->desc_count, ring->curr_desc);
  774. ret = NETDEV_TX_OK;
  775. out:
  776. spin_unlock_irqrestore(&ring->lock, flags);
  777. return ret;
  778. }
  779. static void bcm_sysport_tx_timeout(struct net_device *dev)
  780. {
  781. netdev_warn(dev, "transmit timeout!\n");
  782. dev->trans_start = jiffies;
  783. dev->stats.tx_errors++;
  784. netif_tx_wake_all_queues(dev);
  785. }
  786. /* phylib adjust link callback */
  787. static void bcm_sysport_adj_link(struct net_device *dev)
  788. {
  789. struct bcm_sysport_priv *priv = netdev_priv(dev);
  790. struct phy_device *phydev = priv->phydev;
  791. unsigned int changed = 0;
  792. u32 cmd_bits = 0, reg;
  793. if (priv->old_link != phydev->link) {
  794. changed = 1;
  795. priv->old_link = phydev->link;
  796. }
  797. if (priv->old_duplex != phydev->duplex) {
  798. changed = 1;
  799. priv->old_duplex = phydev->duplex;
  800. }
  801. switch (phydev->speed) {
  802. case SPEED_2500:
  803. cmd_bits = CMD_SPEED_2500;
  804. break;
  805. case SPEED_1000:
  806. cmd_bits = CMD_SPEED_1000;
  807. break;
  808. case SPEED_100:
  809. cmd_bits = CMD_SPEED_100;
  810. break;
  811. case SPEED_10:
  812. cmd_bits = CMD_SPEED_10;
  813. break;
  814. default:
  815. break;
  816. }
  817. cmd_bits <<= CMD_SPEED_SHIFT;
  818. if (phydev->duplex == DUPLEX_HALF)
  819. cmd_bits |= CMD_HD_EN;
  820. if (priv->old_pause != phydev->pause) {
  821. changed = 1;
  822. priv->old_pause = phydev->pause;
  823. }
  824. if (!phydev->pause)
  825. cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
  826. if (changed) {
  827. reg = umac_readl(priv, UMAC_CMD);
  828. reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
  829. CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
  830. CMD_TX_PAUSE_IGNORE);
  831. reg |= cmd_bits;
  832. umac_writel(priv, reg, UMAC_CMD);
  833. phy_print_status(priv->phydev);
  834. }
  835. }
  836. static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
  837. unsigned int index)
  838. {
  839. struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
  840. struct device *kdev = &priv->pdev->dev;
  841. size_t size;
  842. void *p;
  843. u32 reg;
  844. /* Simple descriptors partitioning for now */
  845. size = 256;
  846. /* We just need one DMA descriptor which is DMA-able, since writing to
  847. * the port will allocate a new descriptor in its internal linked-list
  848. */
  849. p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL);
  850. if (!p) {
  851. netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
  852. return -ENOMEM;
  853. }
  854. ring->cbs = kzalloc(sizeof(struct bcm_sysport_cb) * size, GFP_KERNEL);
  855. if (!ring->cbs) {
  856. netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
  857. return -ENOMEM;
  858. }
  859. /* Initialize SW view of the ring */
  860. spin_lock_init(&ring->lock);
  861. ring->priv = priv;
  862. netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
  863. ring->index = index;
  864. ring->size = size;
  865. ring->alloc_size = ring->size;
  866. ring->desc_cpu = p;
  867. ring->desc_count = ring->size;
  868. ring->curr_desc = 0;
  869. /* Initialize HW ring */
  870. tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
  871. tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
  872. tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
  873. tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
  874. tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
  875. tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
  876. /* Program the number of descriptors as MAX_THRESHOLD and half of
  877. * its size for the hysteresis trigger
  878. */
  879. tdma_writel(priv, ring->size |
  880. 1 << RING_HYST_THRESH_SHIFT,
  881. TDMA_DESC_RING_MAX_HYST(index));
  882. /* Enable the ring queue in the arbiter */
  883. reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
  884. reg |= (1 << index);
  885. tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
  886. napi_enable(&ring->napi);
  887. netif_dbg(priv, hw, priv->netdev,
  888. "TDMA cfg, size=%d, desc_cpu=%p\n",
  889. ring->size, ring->desc_cpu);
  890. return 0;
  891. }
  892. static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
  893. unsigned int index)
  894. {
  895. struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
  896. struct device *kdev = &priv->pdev->dev;
  897. u32 reg;
  898. /* Caller should stop the TDMA engine */
  899. reg = tdma_readl(priv, TDMA_STATUS);
  900. if (!(reg & TDMA_DISABLED))
  901. netdev_warn(priv->netdev, "TDMA not stopped!\n");
  902. napi_disable(&ring->napi);
  903. netif_napi_del(&ring->napi);
  904. bcm_sysport_tx_reclaim(priv, ring);
  905. kfree(ring->cbs);
  906. ring->cbs = NULL;
  907. if (ring->desc_dma) {
  908. dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma);
  909. ring->desc_dma = 0;
  910. }
  911. ring->size = 0;
  912. ring->alloc_size = 0;
  913. netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
  914. }
  915. /* RDMA helper */
  916. static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
  917. unsigned int enable)
  918. {
  919. unsigned int timeout = 1000;
  920. u32 reg;
  921. reg = rdma_readl(priv, RDMA_CONTROL);
  922. if (enable)
  923. reg |= RDMA_EN;
  924. else
  925. reg &= ~RDMA_EN;
  926. rdma_writel(priv, reg, RDMA_CONTROL);
  927. /* Poll for RMDA disabling completion */
  928. do {
  929. reg = rdma_readl(priv, RDMA_STATUS);
  930. if (!!(reg & RDMA_DISABLED) == !enable)
  931. return 0;
  932. usleep_range(1000, 2000);
  933. } while (timeout-- > 0);
  934. netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
  935. return -ETIMEDOUT;
  936. }
  937. /* TDMA helper */
  938. static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
  939. unsigned int enable)
  940. {
  941. unsigned int timeout = 1000;
  942. u32 reg;
  943. reg = tdma_readl(priv, TDMA_CONTROL);
  944. if (enable)
  945. reg |= TDMA_EN;
  946. else
  947. reg &= ~TDMA_EN;
  948. tdma_writel(priv, reg, TDMA_CONTROL);
  949. /* Poll for TMDA disabling completion */
  950. do {
  951. reg = tdma_readl(priv, TDMA_STATUS);
  952. if (!!(reg & TDMA_DISABLED) == !enable)
  953. return 0;
  954. usleep_range(1000, 2000);
  955. } while (timeout-- > 0);
  956. netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
  957. return -ETIMEDOUT;
  958. }
  959. static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
  960. {
  961. u32 reg;
  962. int ret;
  963. /* Initialize SW view of the RX ring */
  964. priv->num_rx_bds = NUM_RX_DESC;
  965. priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
  966. priv->rx_bd_assign_ptr = priv->rx_bds;
  967. priv->rx_bd_assign_index = 0;
  968. priv->rx_c_index = 0;
  969. priv->rx_read_ptr = 0;
  970. priv->rx_cbs = kzalloc(priv->num_rx_bds *
  971. sizeof(struct bcm_sysport_cb), GFP_KERNEL);
  972. if (!priv->rx_cbs) {
  973. netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
  974. return -ENOMEM;
  975. }
  976. ret = bcm_sysport_alloc_rx_bufs(priv);
  977. if (ret) {
  978. netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
  979. return ret;
  980. }
  981. /* Initialize HW, ensure RDMA is disabled */
  982. reg = rdma_readl(priv, RDMA_STATUS);
  983. if (!(reg & RDMA_DISABLED))
  984. rdma_enable_set(priv, 0);
  985. rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
  986. rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
  987. rdma_writel(priv, 0, RDMA_PROD_INDEX);
  988. rdma_writel(priv, 0, RDMA_CONS_INDEX);
  989. rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
  990. RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
  991. /* Operate the queue in ring mode */
  992. rdma_writel(priv, 0, RDMA_START_ADDR_HI);
  993. rdma_writel(priv, 0, RDMA_START_ADDR_LO);
  994. rdma_writel(priv, 0, RDMA_END_ADDR_HI);
  995. rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO);
  996. rdma_writel(priv, 1, RDMA_MBDONE_INTR);
  997. netif_dbg(priv, hw, priv->netdev,
  998. "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
  999. priv->num_rx_bds, priv->rx_bds);
  1000. return 0;
  1001. }
  1002. static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
  1003. {
  1004. struct bcm_sysport_cb *cb;
  1005. unsigned int i;
  1006. u32 reg;
  1007. /* Caller should ensure RDMA is disabled */
  1008. reg = rdma_readl(priv, RDMA_STATUS);
  1009. if (!(reg & RDMA_DISABLED))
  1010. netdev_warn(priv->netdev, "RDMA not stopped!\n");
  1011. for (i = 0; i < priv->num_rx_bds; i++) {
  1012. cb = &priv->rx_cbs[i];
  1013. if (dma_unmap_addr(cb, dma_addr))
  1014. dma_unmap_single(&priv->pdev->dev,
  1015. dma_unmap_addr(cb, dma_addr),
  1016. RX_BUF_LENGTH, DMA_FROM_DEVICE);
  1017. bcm_sysport_free_cb(cb);
  1018. }
  1019. kfree(priv->rx_cbs);
  1020. priv->rx_cbs = NULL;
  1021. netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
  1022. }
  1023. static void bcm_sysport_set_rx_mode(struct net_device *dev)
  1024. {
  1025. struct bcm_sysport_priv *priv = netdev_priv(dev);
  1026. u32 reg;
  1027. reg = umac_readl(priv, UMAC_CMD);
  1028. if (dev->flags & IFF_PROMISC)
  1029. reg |= CMD_PROMISC;
  1030. else
  1031. reg &= ~CMD_PROMISC;
  1032. umac_writel(priv, reg, UMAC_CMD);
  1033. /* No support for ALLMULTI */
  1034. if (dev->flags & IFF_ALLMULTI)
  1035. return;
  1036. }
  1037. static inline void umac_enable_set(struct bcm_sysport_priv *priv,
  1038. unsigned int enable)
  1039. {
  1040. u32 reg;
  1041. reg = umac_readl(priv, UMAC_CMD);
  1042. if (enable)
  1043. reg |= CMD_RX_EN | CMD_TX_EN;
  1044. else
  1045. reg &= ~(CMD_RX_EN | CMD_TX_EN);
  1046. umac_writel(priv, reg, UMAC_CMD);
  1047. /* UniMAC stops on a packet boundary, wait for a full-sized packet
  1048. * to be processed (1 msec).
  1049. */
  1050. if (enable == 0)
  1051. usleep_range(1000, 2000);
  1052. }
  1053. static inline void umac_reset(struct bcm_sysport_priv *priv)
  1054. {
  1055. u32 reg;
  1056. reg = umac_readl(priv, UMAC_CMD);
  1057. reg |= CMD_SW_RESET;
  1058. umac_writel(priv, reg, UMAC_CMD);
  1059. udelay(10);
  1060. reg = umac_readl(priv, UMAC_CMD);
  1061. reg &= ~CMD_SW_RESET;
  1062. umac_writel(priv, reg, UMAC_CMD);
  1063. }
  1064. static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
  1065. unsigned char *addr)
  1066. {
  1067. umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
  1068. (addr[2] << 8) | addr[3], UMAC_MAC0);
  1069. umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
  1070. }
  1071. static void topctrl_flush(struct bcm_sysport_priv *priv)
  1072. {
  1073. topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
  1074. topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
  1075. mdelay(1);
  1076. topctrl_writel(priv, 0, RX_FLUSH_CNTL);
  1077. topctrl_writel(priv, 0, TX_FLUSH_CNTL);
  1078. }
  1079. static int bcm_sysport_open(struct net_device *dev)
  1080. {
  1081. struct bcm_sysport_priv *priv = netdev_priv(dev);
  1082. unsigned int i;
  1083. u32 reg;
  1084. int ret;
  1085. /* Reset UniMAC */
  1086. umac_reset(priv);
  1087. /* Flush TX and RX FIFOs at TOPCTRL level */
  1088. topctrl_flush(priv);
  1089. /* Disable the UniMAC RX/TX */
  1090. umac_enable_set(priv, 0);
  1091. /* Enable RBUF 2bytes alignment and Receive Status Block */
  1092. reg = rbuf_readl(priv, RBUF_CONTROL);
  1093. reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
  1094. rbuf_writel(priv, reg, RBUF_CONTROL);
  1095. /* Set maximum frame length */
  1096. umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
  1097. /* Set MAC address */
  1098. umac_set_hw_addr(priv, dev->dev_addr);
  1099. /* Read CRC forward */
  1100. priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
  1101. priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
  1102. 0, priv->phy_interface);
  1103. if (!priv->phydev) {
  1104. netdev_err(dev, "could not attach to PHY\n");
  1105. return -ENODEV;
  1106. }
  1107. /* Reset house keeping link status */
  1108. priv->old_duplex = -1;
  1109. priv->old_link = -1;
  1110. priv->old_pause = -1;
  1111. /* mask all interrupts and request them */
  1112. intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
  1113. intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
  1114. intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
  1115. intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
  1116. intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
  1117. intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
  1118. ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
  1119. if (ret) {
  1120. netdev_err(dev, "failed to request RX interrupt\n");
  1121. goto out_phy_disconnect;
  1122. }
  1123. ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev);
  1124. if (ret) {
  1125. netdev_err(dev, "failed to request TX interrupt\n");
  1126. goto out_free_irq0;
  1127. }
  1128. /* Initialize both hardware and software ring */
  1129. for (i = 0; i < dev->num_tx_queues; i++) {
  1130. ret = bcm_sysport_init_tx_ring(priv, i);
  1131. if (ret) {
  1132. netdev_err(dev, "failed to initialize TX ring %d\n",
  1133. i);
  1134. goto out_free_tx_ring;
  1135. }
  1136. }
  1137. /* Initialize linked-list */
  1138. tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
  1139. /* Initialize RX ring */
  1140. ret = bcm_sysport_init_rx_ring(priv);
  1141. if (ret) {
  1142. netdev_err(dev, "failed to initialize RX ring\n");
  1143. goto out_free_rx_ring;
  1144. }
  1145. /* Turn on RDMA */
  1146. ret = rdma_enable_set(priv, 1);
  1147. if (ret)
  1148. goto out_free_rx_ring;
  1149. /* Enable RX interrupt and TX ring full interrupt */
  1150. intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
  1151. /* Turn on TDMA */
  1152. ret = tdma_enable_set(priv, 1);
  1153. if (ret)
  1154. goto out_clear_rx_int;
  1155. /* Enable NAPI */
  1156. napi_enable(&priv->napi);
  1157. /* Turn on UniMAC TX/RX */
  1158. umac_enable_set(priv, 1);
  1159. phy_start(priv->phydev);
  1160. /* Enable TX interrupts for the 32 TXQs */
  1161. intrl2_1_mask_clear(priv, 0xffffffff);
  1162. /* Last call before we start the real business */
  1163. netif_tx_start_all_queues(dev);
  1164. return 0;
  1165. out_clear_rx_int:
  1166. intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
  1167. out_free_rx_ring:
  1168. bcm_sysport_fini_rx_ring(priv);
  1169. out_free_tx_ring:
  1170. for (i = 0; i < dev->num_tx_queues; i++)
  1171. bcm_sysport_fini_tx_ring(priv, i);
  1172. free_irq(priv->irq1, dev);
  1173. out_free_irq0:
  1174. free_irq(priv->irq0, dev);
  1175. out_phy_disconnect:
  1176. phy_disconnect(priv->phydev);
  1177. return ret;
  1178. }
  1179. static int bcm_sysport_stop(struct net_device *dev)
  1180. {
  1181. struct bcm_sysport_priv *priv = netdev_priv(dev);
  1182. unsigned int i;
  1183. u32 reg;
  1184. int ret;
  1185. /* stop all software from updating hardware */
  1186. netif_tx_stop_all_queues(dev);
  1187. napi_disable(&priv->napi);
  1188. phy_stop(priv->phydev);
  1189. /* mask all interrupts */
  1190. intrl2_0_mask_set(priv, 0xffffffff);
  1191. intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
  1192. intrl2_1_mask_set(priv, 0xffffffff);
  1193. intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
  1194. /* Disable UniMAC RX */
  1195. reg = umac_readl(priv, UMAC_CMD);
  1196. reg &= ~CMD_RX_EN;
  1197. umac_writel(priv, reg, UMAC_CMD);
  1198. ret = tdma_enable_set(priv, 0);
  1199. if (ret) {
  1200. netdev_err(dev, "timeout disabling RDMA\n");
  1201. return ret;
  1202. }
  1203. /* Wait for a maximum packet size to be drained */
  1204. usleep_range(2000, 3000);
  1205. ret = rdma_enable_set(priv, 0);
  1206. if (ret) {
  1207. netdev_err(dev, "timeout disabling TDMA\n");
  1208. return ret;
  1209. }
  1210. /* Disable UniMAC TX */
  1211. reg = umac_readl(priv, UMAC_CMD);
  1212. reg &= ~CMD_TX_EN;
  1213. umac_writel(priv, reg, UMAC_CMD);
  1214. /* Free RX/TX rings SW structures */
  1215. for (i = 0; i < dev->num_tx_queues; i++)
  1216. bcm_sysport_fini_tx_ring(priv, i);
  1217. bcm_sysport_fini_rx_ring(priv);
  1218. free_irq(priv->irq0, dev);
  1219. free_irq(priv->irq1, dev);
  1220. /* Disconnect from PHY */
  1221. phy_disconnect(priv->phydev);
  1222. return 0;
  1223. }
  1224. static struct ethtool_ops bcm_sysport_ethtool_ops = {
  1225. .get_settings = bcm_sysport_get_settings,
  1226. .set_settings = bcm_sysport_set_settings,
  1227. .get_drvinfo = bcm_sysport_get_drvinfo,
  1228. .get_msglevel = bcm_sysport_get_msglvl,
  1229. .set_msglevel = bcm_sysport_set_msglvl,
  1230. .get_link = ethtool_op_get_link,
  1231. .get_strings = bcm_sysport_get_strings,
  1232. .get_ethtool_stats = bcm_sysport_get_stats,
  1233. .get_sset_count = bcm_sysport_get_sset_count,
  1234. };
  1235. static const struct net_device_ops bcm_sysport_netdev_ops = {
  1236. .ndo_start_xmit = bcm_sysport_xmit,
  1237. .ndo_tx_timeout = bcm_sysport_tx_timeout,
  1238. .ndo_open = bcm_sysport_open,
  1239. .ndo_stop = bcm_sysport_stop,
  1240. .ndo_set_features = bcm_sysport_set_features,
  1241. .ndo_set_rx_mode = bcm_sysport_set_rx_mode,
  1242. };
  1243. #define REV_FMT "v%2x.%02x"
  1244. static int bcm_sysport_probe(struct platform_device *pdev)
  1245. {
  1246. struct bcm_sysport_priv *priv;
  1247. struct device_node *dn;
  1248. struct net_device *dev;
  1249. const void *macaddr;
  1250. struct resource *r;
  1251. u32 txq, rxq;
  1252. int ret;
  1253. dn = pdev->dev.of_node;
  1254. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1255. /* Read the Transmit/Receive Queue properties */
  1256. if (of_property_read_u32(dn, "systemport,num-txq", &txq))
  1257. txq = TDMA_NUM_RINGS;
  1258. if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
  1259. rxq = 1;
  1260. dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
  1261. if (!dev)
  1262. return -ENOMEM;
  1263. /* Initialize private members */
  1264. priv = netdev_priv(dev);
  1265. priv->irq0 = platform_get_irq(pdev, 0);
  1266. priv->irq1 = platform_get_irq(pdev, 1);
  1267. if (priv->irq0 <= 0 || priv->irq1 <= 0) {
  1268. dev_err(&pdev->dev, "invalid interrupts\n");
  1269. ret = -EINVAL;
  1270. goto err;
  1271. }
  1272. priv->base = devm_ioremap_resource(&pdev->dev, r);
  1273. if (IS_ERR(priv->base)) {
  1274. ret = PTR_ERR(priv->base);
  1275. goto err;
  1276. }
  1277. priv->netdev = dev;
  1278. priv->pdev = pdev;
  1279. priv->phy_interface = of_get_phy_mode(dn);
  1280. /* Default to GMII interface mode */
  1281. if (priv->phy_interface < 0)
  1282. priv->phy_interface = PHY_INTERFACE_MODE_GMII;
  1283. /* In the case of a fixed PHY, the DT node associated
  1284. * to the PHY is the Ethernet MAC DT node.
  1285. */
  1286. if (of_phy_is_fixed_link(dn)) {
  1287. ret = of_phy_register_fixed_link(dn);
  1288. if (ret) {
  1289. dev_err(&pdev->dev, "failed to register fixed PHY\n");
  1290. goto err;
  1291. }
  1292. priv->phy_dn = dn;
  1293. }
  1294. /* Initialize netdevice members */
  1295. macaddr = of_get_mac_address(dn);
  1296. if (!macaddr || !is_valid_ether_addr(macaddr)) {
  1297. dev_warn(&pdev->dev, "using random Ethernet MAC\n");
  1298. random_ether_addr(dev->dev_addr);
  1299. } else {
  1300. ether_addr_copy(dev->dev_addr, macaddr);
  1301. }
  1302. SET_NETDEV_DEV(dev, &pdev->dev);
  1303. dev_set_drvdata(&pdev->dev, dev);
  1304. dev->ethtool_ops = &bcm_sysport_ethtool_ops;
  1305. dev->netdev_ops = &bcm_sysport_netdev_ops;
  1306. netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
  1307. /* HW supported features, none enabled by default */
  1308. dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
  1309. NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
  1310. /* Set the needed headroom once and for all */
  1311. BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
  1312. dev->needed_headroom += sizeof(struct bcm_tsb);
  1313. /* libphy will adjust the link state accordingly */
  1314. netif_carrier_off(dev);
  1315. ret = register_netdev(dev);
  1316. if (ret) {
  1317. dev_err(&pdev->dev, "failed to register net_device\n");
  1318. goto err;
  1319. }
  1320. priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
  1321. dev_info(&pdev->dev,
  1322. "Broadcom SYSTEMPORT" REV_FMT
  1323. " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
  1324. (priv->rev >> 8) & 0xff, priv->rev & 0xff,
  1325. priv->base, priv->irq0, priv->irq1, txq, rxq);
  1326. return 0;
  1327. err:
  1328. free_netdev(dev);
  1329. return ret;
  1330. }
  1331. static int bcm_sysport_remove(struct platform_device *pdev)
  1332. {
  1333. struct net_device *dev = dev_get_drvdata(&pdev->dev);
  1334. /* Not much to do, ndo_close has been called
  1335. * and we use managed allocations
  1336. */
  1337. unregister_netdev(dev);
  1338. free_netdev(dev);
  1339. dev_set_drvdata(&pdev->dev, NULL);
  1340. return 0;
  1341. }
  1342. static const struct of_device_id bcm_sysport_of_match[] = {
  1343. { .compatible = "brcm,systemport-v1.00" },
  1344. { .compatible = "brcm,systemport" },
  1345. { /* sentinel */ }
  1346. };
  1347. static struct platform_driver bcm_sysport_driver = {
  1348. .probe = bcm_sysport_probe,
  1349. .remove = bcm_sysport_remove,
  1350. .driver = {
  1351. .name = "brcm-systemport",
  1352. .owner = THIS_MODULE,
  1353. .of_match_table = bcm_sysport_of_match,
  1354. },
  1355. };
  1356. module_platform_driver(bcm_sysport_driver);
  1357. MODULE_AUTHOR("Broadcom Corporation");
  1358. MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
  1359. MODULE_ALIAS("platform:brcm-systemport");
  1360. MODULE_LICENSE("GPL");