bcmsysport.c 69 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644
  1. /*
  2. * Broadcom BCM7xxx System Port Ethernet MAC driver
  3. *
  4. * Copyright (C) 2014 Broadcom Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <linux/init.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/module.h>
  14. #include <linux/kernel.h>
  15. #include <linux/netdevice.h>
  16. #include <linux/etherdevice.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/of.h>
  19. #include <linux/of_net.h>
  20. #include <linux/of_mdio.h>
  21. #include <linux/phy.h>
  22. #include <linux/phy_fixed.h>
  23. #include <net/dsa.h>
  24. #include <net/ip.h>
  25. #include <net/ipv6.h>
  26. #include "bcmsysport.h"
  27. /* I/O accessors register helpers */
  28. #define BCM_SYSPORT_IO_MACRO(name, offset) \
  29. static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
  30. { \
  31. u32 reg = readl_relaxed(priv->base + offset + off); \
  32. return reg; \
  33. } \
  34. static inline void name##_writel(struct bcm_sysport_priv *priv, \
  35. u32 val, u32 off) \
  36. { \
  37. writel_relaxed(val, priv->base + offset + off); \
  38. } \
  39. BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
  40. BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
  41. BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
  42. BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
  43. BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
  44. BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
  45. BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
  46. BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
  47. BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
  48. BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
  49. /* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
  50. * same layout, except it has been moved by 4 bytes up, *sigh*
  51. */
  52. static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
  53. {
  54. if (priv->is_lite && off >= RDMA_STATUS)
  55. off += 4;
  56. return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off);
  57. }
  58. static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
  59. {
  60. if (priv->is_lite && off >= RDMA_STATUS)
  61. off += 4;
  62. writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
  63. }
  64. static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
  65. {
  66. if (!priv->is_lite) {
  67. return BIT(bit);
  68. } else {
  69. if (bit >= ACB_ALGO)
  70. return BIT(bit + 1);
  71. else
  72. return BIT(bit);
  73. }
  74. }
  75. /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
  76. * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
  77. */
  78. #define BCM_SYSPORT_INTR_L2(which) \
  79. static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
  80. u32 mask) \
  81. { \
  82. priv->irq##which##_mask &= ~(mask); \
  83. intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
  84. } \
  85. static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
  86. u32 mask) \
  87. { \
  88. intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
  89. priv->irq##which##_mask |= (mask); \
  90. } \
  91. BCM_SYSPORT_INTR_L2(0)
  92. BCM_SYSPORT_INTR_L2(1)
  93. /* Register accesses to GISB/RBUS registers are expensive (few hundred
  94. * nanoseconds), so keep the check for 64-bits explicit here to save
  95. * one register write per-packet on 32-bits platforms.
  96. */
  97. static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
  98. void __iomem *d,
  99. dma_addr_t addr)
  100. {
  101. #ifdef CONFIG_PHYS_ADDR_T_64BIT
  102. writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
  103. d + DESC_ADDR_HI_STATUS_LEN);
  104. #endif
  105. writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO);
  106. }
  107. static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
  108. struct dma_desc *desc,
  109. unsigned int port)
  110. {
  111. /* Ports are latched, so write upper address first */
  112. tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
  113. tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
  114. }
  115. /* Ethtool operations */
  116. static int bcm_sysport_set_rx_csum(struct net_device *dev,
  117. netdev_features_t wanted)
  118. {
  119. struct bcm_sysport_priv *priv = netdev_priv(dev);
  120. u32 reg;
  121. priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
  122. reg = rxchk_readl(priv, RXCHK_CONTROL);
  123. if (priv->rx_chk_en)
  124. reg |= RXCHK_EN;
  125. else
  126. reg &= ~RXCHK_EN;
  127. /* If UniMAC forwards CRC, we need to skip over it to get
  128. * a valid CHK bit to be set in the per-packet status word
  129. */
  130. if (priv->rx_chk_en && priv->crc_fwd)
  131. reg |= RXCHK_SKIP_FCS;
  132. else
  133. reg &= ~RXCHK_SKIP_FCS;
  134. /* If Broadcom tags are enabled (e.g: using a switch), make
  135. * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
  136. * tag after the Ethernet MAC Source Address.
  137. */
  138. if (netdev_uses_dsa(dev))
  139. reg |= RXCHK_BRCM_TAG_EN;
  140. else
  141. reg &= ~RXCHK_BRCM_TAG_EN;
  142. rxchk_writel(priv, reg, RXCHK_CONTROL);
  143. return 0;
  144. }
  145. static int bcm_sysport_set_tx_csum(struct net_device *dev,
  146. netdev_features_t wanted)
  147. {
  148. struct bcm_sysport_priv *priv = netdev_priv(dev);
  149. u32 reg;
  150. /* Hardware transmit checksum requires us to enable the Transmit status
  151. * block prepended to the packet contents
  152. */
  153. priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
  154. reg = tdma_readl(priv, TDMA_CONTROL);
  155. if (priv->tsb_en)
  156. reg |= tdma_control_bit(priv, TSB_EN);
  157. else
  158. reg &= ~tdma_control_bit(priv, TSB_EN);
  159. tdma_writel(priv, reg, TDMA_CONTROL);
  160. return 0;
  161. }
  162. static int bcm_sysport_set_features(struct net_device *dev,
  163. netdev_features_t features)
  164. {
  165. netdev_features_t changed = features ^ dev->features;
  166. netdev_features_t wanted = dev->wanted_features;
  167. int ret = 0;
  168. if (changed & NETIF_F_RXCSUM)
  169. ret = bcm_sysport_set_rx_csum(dev, wanted);
  170. if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
  171. ret = bcm_sysport_set_tx_csum(dev, wanted);
  172. return ret;
  173. }
  174. /* Hardware counters must be kept in sync because the order/offset
  175. * is important here (order in structure declaration = order in hardware)
  176. */
  177. static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
  178. /* general stats */
  179. STAT_NETDEV64(rx_packets),
  180. STAT_NETDEV64(tx_packets),
  181. STAT_NETDEV64(rx_bytes),
  182. STAT_NETDEV64(tx_bytes),
  183. STAT_NETDEV(rx_errors),
  184. STAT_NETDEV(tx_errors),
  185. STAT_NETDEV(rx_dropped),
  186. STAT_NETDEV(tx_dropped),
  187. STAT_NETDEV(multicast),
  188. /* UniMAC RSV counters */
  189. STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
  190. STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
  191. STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
  192. STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
  193. STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
  194. STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
  195. STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
  196. STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
  197. STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
  198. STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
  199. STAT_MIB_RX("rx_pkts", mib.rx.pkt),
  200. STAT_MIB_RX("rx_bytes", mib.rx.bytes),
  201. STAT_MIB_RX("rx_multicast", mib.rx.mca),
  202. STAT_MIB_RX("rx_broadcast", mib.rx.bca),
  203. STAT_MIB_RX("rx_fcs", mib.rx.fcs),
  204. STAT_MIB_RX("rx_control", mib.rx.cf),
  205. STAT_MIB_RX("rx_pause", mib.rx.pf),
  206. STAT_MIB_RX("rx_unknown", mib.rx.uo),
  207. STAT_MIB_RX("rx_align", mib.rx.aln),
  208. STAT_MIB_RX("rx_outrange", mib.rx.flr),
  209. STAT_MIB_RX("rx_code", mib.rx.cde),
  210. STAT_MIB_RX("rx_carrier", mib.rx.fcr),
  211. STAT_MIB_RX("rx_oversize", mib.rx.ovr),
  212. STAT_MIB_RX("rx_jabber", mib.rx.jbr),
  213. STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
  214. STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
  215. STAT_MIB_RX("rx_unicast", mib.rx.uc),
  216. STAT_MIB_RX("rx_ppp", mib.rx.ppp),
  217. STAT_MIB_RX("rx_crc", mib.rx.rcrc),
  218. /* UniMAC TSV counters */
  219. STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
  220. STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
  221. STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
  222. STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
  223. STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
  224. STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
  225. STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
  226. STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
  227. STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
  228. STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
  229. STAT_MIB_TX("tx_pkts", mib.tx.pkts),
  230. STAT_MIB_TX("tx_multicast", mib.tx.mca),
  231. STAT_MIB_TX("tx_broadcast", mib.tx.bca),
  232. STAT_MIB_TX("tx_pause", mib.tx.pf),
  233. STAT_MIB_TX("tx_control", mib.tx.cf),
  234. STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
  235. STAT_MIB_TX("tx_oversize", mib.tx.ovr),
  236. STAT_MIB_TX("tx_defer", mib.tx.drf),
  237. STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
  238. STAT_MIB_TX("tx_single_col", mib.tx.scl),
  239. STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
  240. STAT_MIB_TX("tx_late_col", mib.tx.lcl),
  241. STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
  242. STAT_MIB_TX("tx_frags", mib.tx.frg),
  243. STAT_MIB_TX("tx_total_col", mib.tx.ncl),
  244. STAT_MIB_TX("tx_jabber", mib.tx.jbr),
  245. STAT_MIB_TX("tx_bytes", mib.tx.bytes),
  246. STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
  247. STAT_MIB_TX("tx_unicast", mib.tx.uc),
  248. /* UniMAC RUNT counters */
  249. STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
  250. STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
  251. STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
  252. STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
  253. /* RXCHK misc statistics */
  254. STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
  255. STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
  256. RXCHK_OTHER_DISC_CNTR),
  257. /* RBUF misc statistics */
  258. STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
  259. STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
  260. STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
  261. STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
  262. STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
  263. /* Per TX-queue statistics are dynamically appended */
  264. };
  265. #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
  266. static void bcm_sysport_get_drvinfo(struct net_device *dev,
  267. struct ethtool_drvinfo *info)
  268. {
  269. strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
  270. strlcpy(info->version, "0.1", sizeof(info->version));
  271. strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
  272. }
  273. static u32 bcm_sysport_get_msglvl(struct net_device *dev)
  274. {
  275. struct bcm_sysport_priv *priv = netdev_priv(dev);
  276. return priv->msg_enable;
  277. }
  278. static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
  279. {
  280. struct bcm_sysport_priv *priv = netdev_priv(dev);
  281. priv->msg_enable = enable;
  282. }
  283. static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
  284. {
  285. switch (type) {
  286. case BCM_SYSPORT_STAT_NETDEV:
  287. case BCM_SYSPORT_STAT_NETDEV64:
  288. case BCM_SYSPORT_STAT_RXCHK:
  289. case BCM_SYSPORT_STAT_RBUF:
  290. case BCM_SYSPORT_STAT_SOFT:
  291. return true;
  292. default:
  293. return false;
  294. }
  295. }
  296. static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
  297. {
  298. struct bcm_sysport_priv *priv = netdev_priv(dev);
  299. const struct bcm_sysport_stats *s;
  300. unsigned int i, j;
  301. switch (string_set) {
  302. case ETH_SS_STATS:
  303. for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
  304. s = &bcm_sysport_gstrings_stats[i];
  305. if (priv->is_lite &&
  306. !bcm_sysport_lite_stat_valid(s->type))
  307. continue;
  308. j++;
  309. }
  310. /* Include per-queue statistics */
  311. return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
  312. default:
  313. return -EOPNOTSUPP;
  314. }
  315. }
  316. static void bcm_sysport_get_strings(struct net_device *dev,
  317. u32 stringset, u8 *data)
  318. {
  319. struct bcm_sysport_priv *priv = netdev_priv(dev);
  320. const struct bcm_sysport_stats *s;
  321. char buf[128];
  322. int i, j;
  323. switch (stringset) {
  324. case ETH_SS_STATS:
  325. for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
  326. s = &bcm_sysport_gstrings_stats[i];
  327. if (priv->is_lite &&
  328. !bcm_sysport_lite_stat_valid(s->type))
  329. continue;
  330. memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
  331. ETH_GSTRING_LEN);
  332. j++;
  333. }
  334. for (i = 0; i < dev->num_tx_queues; i++) {
  335. snprintf(buf, sizeof(buf), "txq%d_packets", i);
  336. memcpy(data + j * ETH_GSTRING_LEN, buf,
  337. ETH_GSTRING_LEN);
  338. j++;
  339. snprintf(buf, sizeof(buf), "txq%d_bytes", i);
  340. memcpy(data + j * ETH_GSTRING_LEN, buf,
  341. ETH_GSTRING_LEN);
  342. j++;
  343. }
  344. break;
  345. default:
  346. break;
  347. }
  348. }
  349. static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
  350. {
  351. int i, j = 0;
  352. for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
  353. const struct bcm_sysport_stats *s;
  354. u8 offset = 0;
  355. u32 val = 0;
  356. char *p;
  357. s = &bcm_sysport_gstrings_stats[i];
  358. switch (s->type) {
  359. case BCM_SYSPORT_STAT_NETDEV:
  360. case BCM_SYSPORT_STAT_NETDEV64:
  361. case BCM_SYSPORT_STAT_SOFT:
  362. continue;
  363. case BCM_SYSPORT_STAT_MIB_RX:
  364. case BCM_SYSPORT_STAT_MIB_TX:
  365. case BCM_SYSPORT_STAT_RUNT:
  366. if (priv->is_lite)
  367. continue;
  368. if (s->type != BCM_SYSPORT_STAT_MIB_RX)
  369. offset = UMAC_MIB_STAT_OFFSET;
  370. val = umac_readl(priv, UMAC_MIB_START + j + offset);
  371. break;
  372. case BCM_SYSPORT_STAT_RXCHK:
  373. val = rxchk_readl(priv, s->reg_offset);
  374. if (val == ~0)
  375. rxchk_writel(priv, 0, s->reg_offset);
  376. break;
  377. case BCM_SYSPORT_STAT_RBUF:
  378. val = rbuf_readl(priv, s->reg_offset);
  379. if (val == ~0)
  380. rbuf_writel(priv, 0, s->reg_offset);
  381. break;
  382. }
  383. j += s->stat_sizeof;
  384. p = (char *)priv + s->stat_offset;
  385. *(u32 *)p = val;
  386. }
  387. netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
  388. }
  389. static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
  390. u64 *tx_bytes, u64 *tx_packets)
  391. {
  392. struct bcm_sysport_tx_ring *ring;
  393. u64 bytes = 0, packets = 0;
  394. unsigned int start;
  395. unsigned int q;
  396. for (q = 0; q < priv->netdev->num_tx_queues; q++) {
  397. ring = &priv->tx_rings[q];
  398. do {
  399. start = u64_stats_fetch_begin_irq(&priv->syncp);
  400. bytes = ring->bytes;
  401. packets = ring->packets;
  402. } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
  403. *tx_bytes += bytes;
  404. *tx_packets += packets;
  405. }
  406. }
  407. static void bcm_sysport_get_stats(struct net_device *dev,
  408. struct ethtool_stats *stats, u64 *data)
  409. {
  410. struct bcm_sysport_priv *priv = netdev_priv(dev);
  411. struct bcm_sysport_stats64 *stats64 = &priv->stats64;
  412. struct u64_stats_sync *syncp = &priv->syncp;
  413. struct bcm_sysport_tx_ring *ring;
  414. u64 tx_bytes = 0, tx_packets = 0;
  415. unsigned int start;
  416. int i, j;
  417. if (netif_running(dev)) {
  418. bcm_sysport_update_mib_counters(priv);
  419. bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
  420. stats64->tx_bytes = tx_bytes;
  421. stats64->tx_packets = tx_packets;
  422. }
  423. for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
  424. const struct bcm_sysport_stats *s;
  425. char *p;
  426. s = &bcm_sysport_gstrings_stats[i];
  427. if (s->type == BCM_SYSPORT_STAT_NETDEV)
  428. p = (char *)&dev->stats;
  429. else if (s->type == BCM_SYSPORT_STAT_NETDEV64)
  430. p = (char *)stats64;
  431. else
  432. p = (char *)priv;
  433. if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
  434. continue;
  435. p += s->stat_offset;
  436. if (s->stat_sizeof == sizeof(u64) &&
  437. s->type == BCM_SYSPORT_STAT_NETDEV64) {
  438. do {
  439. start = u64_stats_fetch_begin_irq(syncp);
  440. data[i] = *(u64 *)p;
  441. } while (u64_stats_fetch_retry_irq(syncp, start));
  442. } else
  443. data[i] = *(u32 *)p;
  444. j++;
  445. }
  446. /* For SYSTEMPORT Lite since we have holes in our statistics, j would
  447. * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
  448. * needs to point to how many total statistics we have minus the
  449. * number of per TX queue statistics
  450. */
  451. j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
  452. dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
  453. for (i = 0; i < dev->num_tx_queues; i++) {
  454. ring = &priv->tx_rings[i];
  455. data[j] = ring->packets;
  456. j++;
  457. data[j] = ring->bytes;
  458. j++;
  459. }
  460. }
  461. static void bcm_sysport_get_wol(struct net_device *dev,
  462. struct ethtool_wolinfo *wol)
  463. {
  464. struct bcm_sysport_priv *priv = netdev_priv(dev);
  465. u32 reg;
  466. wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
  467. wol->wolopts = priv->wolopts;
  468. if (!(priv->wolopts & WAKE_MAGICSECURE))
  469. return;
  470. /* Return the programmed SecureOn password */
  471. reg = umac_readl(priv, UMAC_PSW_MS);
  472. put_unaligned_be16(reg, &wol->sopass[0]);
  473. reg = umac_readl(priv, UMAC_PSW_LS);
  474. put_unaligned_be32(reg, &wol->sopass[2]);
  475. }
  476. static int bcm_sysport_set_wol(struct net_device *dev,
  477. struct ethtool_wolinfo *wol)
  478. {
  479. struct bcm_sysport_priv *priv = netdev_priv(dev);
  480. struct device *kdev = &priv->pdev->dev;
  481. u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE;
  482. if (!device_can_wakeup(kdev))
  483. return -ENOTSUPP;
  484. if (wol->wolopts & ~supported)
  485. return -EINVAL;
  486. /* Program the SecureOn password */
  487. if (wol->wolopts & WAKE_MAGICSECURE) {
  488. umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
  489. UMAC_PSW_MS);
  490. umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
  491. UMAC_PSW_LS);
  492. }
  493. /* Flag the device and relevant IRQ as wakeup capable */
  494. if (wol->wolopts) {
  495. device_set_wakeup_enable(kdev, 1);
  496. if (priv->wol_irq_disabled)
  497. enable_irq_wake(priv->wol_irq);
  498. priv->wol_irq_disabled = 0;
  499. } else {
  500. device_set_wakeup_enable(kdev, 0);
  501. /* Avoid unbalanced disable_irq_wake calls */
  502. if (!priv->wol_irq_disabled)
  503. disable_irq_wake(priv->wol_irq);
  504. priv->wol_irq_disabled = 1;
  505. }
  506. priv->wolopts = wol->wolopts;
  507. return 0;
  508. }
  509. static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv,
  510. u32 usecs, u32 pkts)
  511. {
  512. u32 reg;
  513. reg = rdma_readl(priv, RDMA_MBDONE_INTR);
  514. reg &= ~(RDMA_INTR_THRESH_MASK |
  515. RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
  516. reg |= pkts;
  517. reg |= DIV_ROUND_UP(usecs * 1000, 8192) << RDMA_TIMEOUT_SHIFT;
  518. rdma_writel(priv, reg, RDMA_MBDONE_INTR);
  519. }
  520. static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring,
  521. struct ethtool_coalesce *ec)
  522. {
  523. struct bcm_sysport_priv *priv = ring->priv;
  524. u32 reg;
  525. reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index));
  526. reg &= ~(RING_INTR_THRESH_MASK |
  527. RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
  528. reg |= ec->tx_max_coalesced_frames;
  529. reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
  530. RING_TIMEOUT_SHIFT;
  531. tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index));
  532. }
  533. static int bcm_sysport_get_coalesce(struct net_device *dev,
  534. struct ethtool_coalesce *ec)
  535. {
  536. struct bcm_sysport_priv *priv = netdev_priv(dev);
  537. u32 reg;
  538. reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
  539. ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
  540. ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
  541. reg = rdma_readl(priv, RDMA_MBDONE_INTR);
  542. ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
  543. ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
  544. ec->use_adaptive_rx_coalesce = priv->dim.use_dim;
  545. return 0;
  546. }
  547. static int bcm_sysport_set_coalesce(struct net_device *dev,
  548. struct ethtool_coalesce *ec)
  549. {
  550. struct bcm_sysport_priv *priv = netdev_priv(dev);
  551. struct net_dim_cq_moder moder;
  552. u32 usecs, pkts;
  553. unsigned int i;
  554. /* Base system clock is 125Mhz, DMA timeout is this reference clock
  555. * divided by 1024, which yield roughly 8.192 us, our maximum value has
  556. * to fit in the RING_TIMEOUT_MASK (16 bits).
  557. */
  558. if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
  559. ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
  560. ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
  561. ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
  562. return -EINVAL;
  563. if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
  564. (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) ||
  565. ec->use_adaptive_tx_coalesce)
  566. return -EINVAL;
  567. for (i = 0; i < dev->num_tx_queues; i++)
  568. bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec);
  569. priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
  570. priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
  571. usecs = priv->rx_coalesce_usecs;
  572. pkts = priv->rx_max_coalesced_frames;
  573. if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) {
  574. moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode);
  575. usecs = moder.usec;
  576. pkts = moder.pkts;
  577. }
  578. priv->dim.use_dim = ec->use_adaptive_rx_coalesce;
  579. /* Apply desired coalescing parameters */
  580. bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
  581. return 0;
  582. }
  583. static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
  584. {
  585. dev_consume_skb_any(cb->skb);
  586. cb->skb = NULL;
  587. dma_unmap_addr_set(cb, dma_addr, 0);
  588. }
  589. static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
  590. struct bcm_sysport_cb *cb)
  591. {
  592. struct device *kdev = &priv->pdev->dev;
  593. struct net_device *ndev = priv->netdev;
  594. struct sk_buff *skb, *rx_skb;
  595. dma_addr_t mapping;
  596. /* Allocate a new SKB for a new packet */
  597. skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
  598. if (!skb) {
  599. priv->mib.alloc_rx_buff_failed++;
  600. netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
  601. return NULL;
  602. }
  603. mapping = dma_map_single(kdev, skb->data,
  604. RX_BUF_LENGTH, DMA_FROM_DEVICE);
  605. if (dma_mapping_error(kdev, mapping)) {
  606. priv->mib.rx_dma_failed++;
  607. dev_kfree_skb_any(skb);
  608. netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
  609. return NULL;
  610. }
  611. /* Grab the current SKB on the ring */
  612. rx_skb = cb->skb;
  613. if (likely(rx_skb))
  614. dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
  615. RX_BUF_LENGTH, DMA_FROM_DEVICE);
  616. /* Put the new SKB on the ring */
  617. cb->skb = skb;
  618. dma_unmap_addr_set(cb, dma_addr, mapping);
  619. dma_desc_set_addr(priv, cb->bd_addr, mapping);
  620. netif_dbg(priv, rx_status, ndev, "RX refill\n");
  621. /* Return the current SKB to the caller */
  622. return rx_skb;
  623. }
  624. static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
  625. {
  626. struct bcm_sysport_cb *cb;
  627. struct sk_buff *skb;
  628. unsigned int i;
  629. for (i = 0; i < priv->num_rx_bds; i++) {
  630. cb = &priv->rx_cbs[i];
  631. skb = bcm_sysport_rx_refill(priv, cb);
  632. if (skb)
  633. dev_kfree_skb(skb);
  634. if (!cb->skb)
  635. return -ENOMEM;
  636. }
  637. return 0;
  638. }
  639. /* Poll the hardware for up to budget packets to process */
  640. static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
  641. unsigned int budget)
  642. {
  643. struct bcm_sysport_stats64 *stats64 = &priv->stats64;
  644. struct net_device *ndev = priv->netdev;
  645. unsigned int processed = 0, to_process;
  646. unsigned int processed_bytes = 0;
  647. struct bcm_sysport_cb *cb;
  648. struct sk_buff *skb;
  649. unsigned int p_index;
  650. u16 len, status;
  651. struct bcm_rsb *rsb;
  652. /* Clear status before servicing to reduce spurious interrupts */
  653. intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);
  654. /* Determine how much we should process since last call, SYSTEMPORT Lite
  655. * groups the producer and consumer indexes into the same 32-bit
  656. * which we access using RDMA_CONS_INDEX
  657. */
  658. if (!priv->is_lite)
  659. p_index = rdma_readl(priv, RDMA_PROD_INDEX);
  660. else
  661. p_index = rdma_readl(priv, RDMA_CONS_INDEX);
  662. p_index &= RDMA_PROD_INDEX_MASK;
  663. to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
  664. netif_dbg(priv, rx_status, ndev,
  665. "p_index=%d rx_c_index=%d to_process=%d\n",
  666. p_index, priv->rx_c_index, to_process);
  667. while ((processed < to_process) && (processed < budget)) {
  668. cb = &priv->rx_cbs[priv->rx_read_ptr];
  669. skb = bcm_sysport_rx_refill(priv, cb);
  670. /* We do not have a backing SKB, so we do not a corresponding
  671. * DMA mapping for this incoming packet since
  672. * bcm_sysport_rx_refill always either has both skb and mapping
  673. * or none.
  674. */
  675. if (unlikely(!skb)) {
  676. netif_err(priv, rx_err, ndev, "out of memory!\n");
  677. ndev->stats.rx_dropped++;
  678. ndev->stats.rx_errors++;
  679. goto next;
  680. }
  681. /* Extract the Receive Status Block prepended */
  682. rsb = (struct bcm_rsb *)skb->data;
  683. len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
  684. status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
  685. DESC_STATUS_MASK;
  686. netif_dbg(priv, rx_status, ndev,
  687. "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
  688. p_index, priv->rx_c_index, priv->rx_read_ptr,
  689. len, status);
  690. if (unlikely(len > RX_BUF_LENGTH)) {
  691. netif_err(priv, rx_status, ndev, "oversized packet\n");
  692. ndev->stats.rx_length_errors++;
  693. ndev->stats.rx_errors++;
  694. dev_kfree_skb_any(skb);
  695. goto next;
  696. }
  697. if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
  698. netif_err(priv, rx_status, ndev, "fragmented packet!\n");
  699. ndev->stats.rx_dropped++;
  700. ndev->stats.rx_errors++;
  701. dev_kfree_skb_any(skb);
  702. goto next;
  703. }
  704. if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
  705. netif_err(priv, rx_err, ndev, "error packet\n");
  706. if (status & RX_STATUS_OVFLOW)
  707. ndev->stats.rx_over_errors++;
  708. ndev->stats.rx_dropped++;
  709. ndev->stats.rx_errors++;
  710. dev_kfree_skb_any(skb);
  711. goto next;
  712. }
  713. skb_put(skb, len);
  714. /* Hardware validated our checksum */
  715. if (likely(status & DESC_L4_CSUM))
  716. skb->ip_summed = CHECKSUM_UNNECESSARY;
  717. /* Hardware pre-pends packets with 2bytes before Ethernet
  718. * header plus we have the Receive Status Block, strip off all
  719. * of this from the SKB.
  720. */
  721. skb_pull(skb, sizeof(*rsb) + 2);
  722. len -= (sizeof(*rsb) + 2);
  723. processed_bytes += len;
  724. /* UniMAC may forward CRC */
  725. if (priv->crc_fwd) {
  726. skb_trim(skb, len - ETH_FCS_LEN);
  727. len -= ETH_FCS_LEN;
  728. }
  729. skb->protocol = eth_type_trans(skb, ndev);
  730. ndev->stats.rx_packets++;
  731. ndev->stats.rx_bytes += len;
  732. u64_stats_update_begin(&priv->syncp);
  733. stats64->rx_packets++;
  734. stats64->rx_bytes += len;
  735. u64_stats_update_end(&priv->syncp);
  736. napi_gro_receive(&priv->napi, skb);
  737. next:
  738. processed++;
  739. priv->rx_read_ptr++;
  740. if (priv->rx_read_ptr == priv->num_rx_bds)
  741. priv->rx_read_ptr = 0;
  742. }
  743. priv->dim.packets = processed;
  744. priv->dim.bytes = processed_bytes;
  745. return processed;
  746. }
  747. static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
  748. struct bcm_sysport_cb *cb,
  749. unsigned int *bytes_compl,
  750. unsigned int *pkts_compl)
  751. {
  752. struct bcm_sysport_priv *priv = ring->priv;
  753. struct device *kdev = &priv->pdev->dev;
  754. if (cb->skb) {
  755. *bytes_compl += cb->skb->len;
  756. dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
  757. dma_unmap_len(cb, dma_len),
  758. DMA_TO_DEVICE);
  759. (*pkts_compl)++;
  760. bcm_sysport_free_cb(cb);
  761. /* SKB fragment */
  762. } else if (dma_unmap_addr(cb, dma_addr)) {
  763. *bytes_compl += dma_unmap_len(cb, dma_len);
  764. dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
  765. dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
  766. dma_unmap_addr_set(cb, dma_addr, 0);
  767. }
  768. }
  769. /* Reclaim queued SKBs for transmission completion, lockless version */
  770. static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
  771. struct bcm_sysport_tx_ring *ring)
  772. {
  773. unsigned int pkts_compl = 0, bytes_compl = 0;
  774. struct net_device *ndev = priv->netdev;
  775. unsigned int txbds_processed = 0;
  776. struct bcm_sysport_cb *cb;
  777. unsigned int txbds_ready;
  778. unsigned int c_index;
  779. u32 hw_ind;
  780. /* Clear status before servicing to reduce spurious interrupts */
  781. if (!ring->priv->is_lite)
  782. intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
  783. else
  784. intrl2_0_writel(ring->priv, BIT(ring->index +
  785. INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR);
  786. /* Compute how many descriptors have been processed since last call */
  787. hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
  788. c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
  789. txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
  790. netif_dbg(priv, tx_done, ndev,
  791. "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
  792. ring->index, ring->c_index, c_index, txbds_ready);
  793. while (txbds_processed < txbds_ready) {
  794. cb = &ring->cbs[ring->clean_index];
  795. bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
  796. ring->desc_count++;
  797. txbds_processed++;
  798. if (likely(ring->clean_index < ring->size - 1))
  799. ring->clean_index++;
  800. else
  801. ring->clean_index = 0;
  802. }
  803. u64_stats_update_begin(&priv->syncp);
  804. ring->packets += pkts_compl;
  805. ring->bytes += bytes_compl;
  806. u64_stats_update_end(&priv->syncp);
  807. ring->c_index = c_index;
  808. netif_dbg(priv, tx_done, ndev,
  809. "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
  810. ring->index, ring->c_index, pkts_compl, bytes_compl);
  811. return pkts_compl;
  812. }
  813. /* Locked version of the per-ring TX reclaim routine */
  814. static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
  815. struct bcm_sysport_tx_ring *ring)
  816. {
  817. struct netdev_queue *txq;
  818. unsigned int released;
  819. unsigned long flags;
  820. txq = netdev_get_tx_queue(priv->netdev, ring->index);
  821. spin_lock_irqsave(&ring->lock, flags);
  822. released = __bcm_sysport_tx_reclaim(priv, ring);
  823. if (released)
  824. netif_tx_wake_queue(txq);
  825. spin_unlock_irqrestore(&ring->lock, flags);
  826. return released;
  827. }
  828. /* Locked version of the per-ring TX reclaim, but does not wake the queue */
  829. static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
  830. struct bcm_sysport_tx_ring *ring)
  831. {
  832. unsigned long flags;
  833. spin_lock_irqsave(&ring->lock, flags);
  834. __bcm_sysport_tx_reclaim(priv, ring);
  835. spin_unlock_irqrestore(&ring->lock, flags);
  836. }
  837. static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
  838. {
  839. struct bcm_sysport_tx_ring *ring =
  840. container_of(napi, struct bcm_sysport_tx_ring, napi);
  841. unsigned int work_done = 0;
  842. work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
  843. if (work_done == 0) {
  844. napi_complete(napi);
  845. /* re-enable TX interrupt */
  846. if (!ring->priv->is_lite)
  847. intrl2_1_mask_clear(ring->priv, BIT(ring->index));
  848. else
  849. intrl2_0_mask_clear(ring->priv, BIT(ring->index +
  850. INTRL2_0_TDMA_MBDONE_SHIFT));
  851. return 0;
  852. }
  853. return budget;
  854. }
  855. static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
  856. {
  857. unsigned int q;
  858. for (q = 0; q < priv->netdev->num_tx_queues; q++)
  859. bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
  860. }
  861. static int bcm_sysport_poll(struct napi_struct *napi, int budget)
  862. {
  863. struct bcm_sysport_priv *priv =
  864. container_of(napi, struct bcm_sysport_priv, napi);
  865. struct net_dim_sample dim_sample;
  866. unsigned int work_done = 0;
  867. work_done = bcm_sysport_desc_rx(priv, budget);
  868. priv->rx_c_index += work_done;
  869. priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
  870. /* SYSTEMPORT Lite groups the producer/consumer index, producer is
  871. * maintained by HW, but writes to it will be ignore while RDMA
  872. * is active
  873. */
  874. if (!priv->is_lite)
  875. rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
  876. else
  877. rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
  878. if (work_done < budget) {
  879. napi_complete_done(napi, work_done);
  880. /* re-enable RX interrupts */
  881. intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
  882. }
  883. if (priv->dim.use_dim) {
  884. net_dim_sample(priv->dim.event_ctr, priv->dim.packets,
  885. priv->dim.bytes, &dim_sample);
  886. net_dim(&priv->dim.dim, dim_sample);
  887. }
  888. return work_done;
  889. }
  890. static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
  891. {
  892. u32 reg;
  893. /* Stop monitoring MPD interrupt */
  894. intrl2_0_mask_set(priv, INTRL2_0_MPD);
  895. /* Clear the MagicPacket detection logic */
  896. reg = umac_readl(priv, UMAC_MPD_CTRL);
  897. reg &= ~MPD_EN;
  898. umac_writel(priv, reg, UMAC_MPD_CTRL);
  899. netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
  900. }
  901. static void bcm_sysport_dim_work(struct work_struct *work)
  902. {
  903. struct net_dim *dim = container_of(work, struct net_dim, work);
  904. struct bcm_sysport_net_dim *ndim =
  905. container_of(dim, struct bcm_sysport_net_dim, dim);
  906. struct bcm_sysport_priv *priv =
  907. container_of(ndim, struct bcm_sysport_priv, dim);
  908. struct net_dim_cq_moder cur_profile =
  909. net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
  910. bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts);
  911. dim->state = NET_DIM_START_MEASURE;
  912. }
  913. /* RX and misc interrupt routine */
  914. static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
  915. {
  916. struct net_device *dev = dev_id;
  917. struct bcm_sysport_priv *priv = netdev_priv(dev);
  918. struct bcm_sysport_tx_ring *txr;
  919. unsigned int ring, ring_bit;
  920. priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
  921. ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
  922. intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
  923. if (unlikely(priv->irq0_stat == 0)) {
  924. netdev_warn(priv->netdev, "spurious RX interrupt\n");
  925. return IRQ_NONE;
  926. }
  927. if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
  928. priv->dim.event_ctr++;
  929. if (likely(napi_schedule_prep(&priv->napi))) {
  930. /* disable RX interrupts */
  931. intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
  932. __napi_schedule_irqoff(&priv->napi);
  933. }
  934. }
  935. /* TX ring is full, perform a full reclaim since we do not know
  936. * which one would trigger this interrupt
  937. */
  938. if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
  939. bcm_sysport_tx_reclaim_all(priv);
  940. if (priv->irq0_stat & INTRL2_0_MPD) {
  941. netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
  942. bcm_sysport_resume_from_wol(priv);
  943. }
  944. if (!priv->is_lite)
  945. goto out;
  946. for (ring = 0; ring < dev->num_tx_queues; ring++) {
  947. ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
  948. if (!(priv->irq0_stat & ring_bit))
  949. continue;
  950. txr = &priv->tx_rings[ring];
  951. if (likely(napi_schedule_prep(&txr->napi))) {
  952. intrl2_0_mask_set(priv, ring_bit);
  953. __napi_schedule(&txr->napi);
  954. }
  955. }
  956. out:
  957. return IRQ_HANDLED;
  958. }
  959. /* TX interrupt service routine */
  960. static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
  961. {
  962. struct net_device *dev = dev_id;
  963. struct bcm_sysport_priv *priv = netdev_priv(dev);
  964. struct bcm_sysport_tx_ring *txr;
  965. unsigned int ring;
  966. priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
  967. ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
  968. intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
  969. if (unlikely(priv->irq1_stat == 0)) {
  970. netdev_warn(priv->netdev, "spurious TX interrupt\n");
  971. return IRQ_NONE;
  972. }
  973. for (ring = 0; ring < dev->num_tx_queues; ring++) {
  974. if (!(priv->irq1_stat & BIT(ring)))
  975. continue;
  976. txr = &priv->tx_rings[ring];
  977. if (likely(napi_schedule_prep(&txr->napi))) {
  978. intrl2_1_mask_set(priv, BIT(ring));
  979. __napi_schedule_irqoff(&txr->napi);
  980. }
  981. }
  982. return IRQ_HANDLED;
  983. }
  984. static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
  985. {
  986. struct bcm_sysport_priv *priv = dev_id;
  987. pm_wakeup_event(&priv->pdev->dev, 0);
  988. return IRQ_HANDLED;
  989. }
  990. #ifdef CONFIG_NET_POLL_CONTROLLER
  991. static void bcm_sysport_poll_controller(struct net_device *dev)
  992. {
  993. struct bcm_sysport_priv *priv = netdev_priv(dev);
  994. disable_irq(priv->irq0);
  995. bcm_sysport_rx_isr(priv->irq0, priv);
  996. enable_irq(priv->irq0);
  997. if (!priv->is_lite) {
  998. disable_irq(priv->irq1);
  999. bcm_sysport_tx_isr(priv->irq1, priv);
  1000. enable_irq(priv->irq1);
  1001. }
  1002. }
  1003. #endif
  1004. static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
  1005. struct net_device *dev)
  1006. {
  1007. struct sk_buff *nskb;
  1008. struct bcm_tsb *tsb;
  1009. u32 csum_info;
  1010. u8 ip_proto;
  1011. u16 csum_start;
  1012. __be16 ip_ver;
  1013. /* Re-allocate SKB if needed */
  1014. if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
  1015. nskb = skb_realloc_headroom(skb, sizeof(*tsb));
  1016. dev_kfree_skb(skb);
  1017. if (!nskb) {
  1018. dev->stats.tx_errors++;
  1019. dev->stats.tx_dropped++;
  1020. return NULL;
  1021. }
  1022. skb = nskb;
  1023. }
  1024. tsb = skb_push(skb, sizeof(*tsb));
  1025. /* Zero-out TSB by default */
  1026. memset(tsb, 0, sizeof(*tsb));
  1027. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  1028. ip_ver = skb->protocol;
  1029. switch (ip_ver) {
  1030. case htons(ETH_P_IP):
  1031. ip_proto = ip_hdr(skb)->protocol;
  1032. break;
  1033. case htons(ETH_P_IPV6):
  1034. ip_proto = ipv6_hdr(skb)->nexthdr;
  1035. break;
  1036. default:
  1037. return skb;
  1038. }
  1039. /* Get the checksum offset and the L4 (transport) offset */
  1040. csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
  1041. csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
  1042. csum_info |= (csum_start << L4_PTR_SHIFT);
  1043. if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
  1044. csum_info |= L4_LENGTH_VALID;
  1045. if (ip_proto == IPPROTO_UDP &&
  1046. ip_ver == htons(ETH_P_IP))
  1047. csum_info |= L4_UDP;
  1048. } else {
  1049. csum_info = 0;
  1050. }
  1051. tsb->l4_ptr_dest_map = csum_info;
  1052. }
  1053. return skb;
  1054. }
  1055. static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
  1056. struct net_device *dev)
  1057. {
  1058. struct bcm_sysport_priv *priv = netdev_priv(dev);
  1059. struct device *kdev = &priv->pdev->dev;
  1060. struct bcm_sysport_tx_ring *ring;
  1061. struct bcm_sysport_cb *cb;
  1062. struct netdev_queue *txq;
  1063. struct dma_desc *desc;
  1064. unsigned int skb_len;
  1065. unsigned long flags;
  1066. dma_addr_t mapping;
  1067. u32 len_status;
  1068. u16 queue;
  1069. int ret;
  1070. queue = skb_get_queue_mapping(skb);
  1071. txq = netdev_get_tx_queue(dev, queue);
  1072. ring = &priv->tx_rings[queue];
  1073. /* lock against tx reclaim in BH context and TX ring full interrupt */
  1074. spin_lock_irqsave(&ring->lock, flags);
  1075. if (unlikely(ring->desc_count == 0)) {
  1076. netif_tx_stop_queue(txq);
  1077. netdev_err(dev, "queue %d awake and ring full!\n", queue);
  1078. ret = NETDEV_TX_BUSY;
  1079. goto out;
  1080. }
  1081. /* Insert TSB and checksum infos */
  1082. if (priv->tsb_en) {
  1083. skb = bcm_sysport_insert_tsb(skb, dev);
  1084. if (!skb) {
  1085. ret = NETDEV_TX_OK;
  1086. goto out;
  1087. }
  1088. }
  1089. skb_len = skb->len;
  1090. mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
  1091. if (dma_mapping_error(kdev, mapping)) {
  1092. priv->mib.tx_dma_failed++;
  1093. netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
  1094. skb->data, skb_len);
  1095. ret = NETDEV_TX_OK;
  1096. goto out;
  1097. }
  1098. /* Remember the SKB for future freeing */
  1099. cb = &ring->cbs[ring->curr_desc];
  1100. cb->skb = skb;
  1101. dma_unmap_addr_set(cb, dma_addr, mapping);
  1102. dma_unmap_len_set(cb, dma_len, skb_len);
  1103. /* Fetch a descriptor entry from our pool */
  1104. desc = ring->desc_cpu;
  1105. desc->addr_lo = lower_32_bits(mapping);
  1106. len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
  1107. len_status |= (skb_len << DESC_LEN_SHIFT);
  1108. len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
  1109. DESC_STATUS_SHIFT;
  1110. if (skb->ip_summed == CHECKSUM_PARTIAL)
  1111. len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
  1112. ring->curr_desc++;
  1113. if (ring->curr_desc == ring->size)
  1114. ring->curr_desc = 0;
  1115. ring->desc_count--;
  1116. /* Ensure write completion of the descriptor status/length
  1117. * in DRAM before the System Port WRITE_PORT register latches
  1118. * the value
  1119. */
  1120. wmb();
  1121. desc->addr_status_len = len_status;
  1122. wmb();
  1123. /* Write this descriptor address to the RING write port */
  1124. tdma_port_write_desc_addr(priv, desc, ring->index);
  1125. /* Check ring space and update SW control flow */
  1126. if (ring->desc_count == 0)
  1127. netif_tx_stop_queue(txq);
  1128. netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
  1129. ring->index, ring->desc_count, ring->curr_desc);
  1130. ret = NETDEV_TX_OK;
  1131. out:
  1132. spin_unlock_irqrestore(&ring->lock, flags);
  1133. return ret;
  1134. }
  1135. static void bcm_sysport_tx_timeout(struct net_device *dev)
  1136. {
  1137. netdev_warn(dev, "transmit timeout!\n");
  1138. netif_trans_update(dev);
  1139. dev->stats.tx_errors++;
  1140. netif_tx_wake_all_queues(dev);
  1141. }
  1142. /* phylib adjust link callback */
  1143. static void bcm_sysport_adj_link(struct net_device *dev)
  1144. {
  1145. struct bcm_sysport_priv *priv = netdev_priv(dev);
  1146. struct phy_device *phydev = dev->phydev;
  1147. unsigned int changed = 0;
  1148. u32 cmd_bits = 0, reg;
  1149. if (priv->old_link != phydev->link) {
  1150. changed = 1;
  1151. priv->old_link = phydev->link;
  1152. }
  1153. if (priv->old_duplex != phydev->duplex) {
  1154. changed = 1;
  1155. priv->old_duplex = phydev->duplex;
  1156. }
  1157. if (priv->is_lite)
  1158. goto out;
  1159. switch (phydev->speed) {
  1160. case SPEED_2500:
  1161. cmd_bits = CMD_SPEED_2500;
  1162. break;
  1163. case SPEED_1000:
  1164. cmd_bits = CMD_SPEED_1000;
  1165. break;
  1166. case SPEED_100:
  1167. cmd_bits = CMD_SPEED_100;
  1168. break;
  1169. case SPEED_10:
  1170. cmd_bits = CMD_SPEED_10;
  1171. break;
  1172. default:
  1173. break;
  1174. }
  1175. cmd_bits <<= CMD_SPEED_SHIFT;
  1176. if (phydev->duplex == DUPLEX_HALF)
  1177. cmd_bits |= CMD_HD_EN;
  1178. if (priv->old_pause != phydev->pause) {
  1179. changed = 1;
  1180. priv->old_pause = phydev->pause;
  1181. }
  1182. if (!phydev->pause)
  1183. cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
  1184. if (!changed)
  1185. return;
  1186. if (phydev->link) {
  1187. reg = umac_readl(priv, UMAC_CMD);
  1188. reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
  1189. CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
  1190. CMD_TX_PAUSE_IGNORE);
  1191. reg |= cmd_bits;
  1192. umac_writel(priv, reg, UMAC_CMD);
  1193. }
  1194. out:
  1195. if (changed)
  1196. phy_print_status(phydev);
  1197. }
  1198. static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv,
  1199. void (*cb)(struct work_struct *work))
  1200. {
  1201. struct bcm_sysport_net_dim *dim = &priv->dim;
  1202. INIT_WORK(&dim->dim.work, cb);
  1203. dim->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
  1204. dim->event_ctr = 0;
  1205. dim->packets = 0;
  1206. dim->bytes = 0;
  1207. }
  1208. static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv)
  1209. {
  1210. struct bcm_sysport_net_dim *dim = &priv->dim;
  1211. struct net_dim_cq_moder moder;
  1212. u32 usecs, pkts;
  1213. usecs = priv->rx_coalesce_usecs;
  1214. pkts = priv->rx_max_coalesced_frames;
  1215. /* If DIM was enabled, re-apply default parameters */
  1216. if (dim->use_dim) {
  1217. moder = net_dim_get_def_rx_moderation(dim->dim.mode);
  1218. usecs = moder.usec;
  1219. pkts = moder.pkts;
  1220. }
  1221. bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
  1222. }
  1223. static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
  1224. unsigned int index)
  1225. {
  1226. struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
  1227. struct device *kdev = &priv->pdev->dev;
  1228. size_t size;
  1229. void *p;
  1230. u32 reg;
  1231. /* Simple descriptors partitioning for now */
  1232. size = 256;
  1233. /* We just need one DMA descriptor which is DMA-able, since writing to
  1234. * the port will allocate a new descriptor in its internal linked-list
  1235. */
  1236. p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
  1237. GFP_KERNEL);
  1238. if (!p) {
  1239. netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
  1240. return -ENOMEM;
  1241. }
  1242. ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
  1243. if (!ring->cbs) {
  1244. dma_free_coherent(kdev, sizeof(struct dma_desc),
  1245. ring->desc_cpu, ring->desc_dma);
  1246. netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
  1247. return -ENOMEM;
  1248. }
  1249. /* Initialize SW view of the ring */
  1250. spin_lock_init(&ring->lock);
  1251. ring->priv = priv;
  1252. netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
  1253. ring->index = index;
  1254. ring->size = size;
  1255. ring->clean_index = 0;
  1256. ring->alloc_size = ring->size;
  1257. ring->desc_cpu = p;
  1258. ring->desc_count = ring->size;
  1259. ring->curr_desc = 0;
  1260. /* Initialize HW ring */
  1261. tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
  1262. tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
  1263. tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
  1264. tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
  1265. /* Configure QID and port mapping */
  1266. reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index));
  1267. reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT);
  1268. if (ring->inspect) {
  1269. reg |= ring->switch_queue & RING_QID_MASK;
  1270. reg |= ring->switch_port << RING_PORT_ID_SHIFT;
  1271. } else {
  1272. reg |= RING_IGNORE_STATUS;
  1273. }
  1274. tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index));
  1275. tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
  1276. /* Enable ACB algorithm 2 */
  1277. reg = tdma_readl(priv, TDMA_CONTROL);
  1278. reg |= tdma_control_bit(priv, ACB_ALGO);
  1279. tdma_writel(priv, reg, TDMA_CONTROL);
  1280. /* Do not use tdma_control_bit() here because TSB_SWAP1 collides
  1281. * with the original definition of ACB_ALGO
  1282. */
  1283. reg = tdma_readl(priv, TDMA_CONTROL);
  1284. if (priv->is_lite)
  1285. reg &= ~BIT(TSB_SWAP1);
  1286. /* Set a correct TSB format based on host endian */
  1287. if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
  1288. reg |= tdma_control_bit(priv, TSB_SWAP0);
  1289. else
  1290. reg &= ~tdma_control_bit(priv, TSB_SWAP0);
  1291. tdma_writel(priv, reg, TDMA_CONTROL);
  1292. /* Program the number of descriptors as MAX_THRESHOLD and half of
  1293. * its size for the hysteresis trigger
  1294. */
  1295. tdma_writel(priv, ring->size |
  1296. 1 << RING_HYST_THRESH_SHIFT,
  1297. TDMA_DESC_RING_MAX_HYST(index));
  1298. /* Enable the ring queue in the arbiter */
  1299. reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
  1300. reg |= (1 << index);
  1301. tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
  1302. napi_enable(&ring->napi);
  1303. netif_dbg(priv, hw, priv->netdev,
  1304. "TDMA cfg, size=%d, desc_cpu=%p switch q=%d,port=%d\n",
  1305. ring->size, ring->desc_cpu, ring->switch_queue,
  1306. ring->switch_port);
  1307. return 0;
  1308. }
  1309. static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
  1310. unsigned int index)
  1311. {
  1312. struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
  1313. struct device *kdev = &priv->pdev->dev;
  1314. u32 reg;
  1315. /* Caller should stop the TDMA engine */
  1316. reg = tdma_readl(priv, TDMA_STATUS);
  1317. if (!(reg & TDMA_DISABLED))
  1318. netdev_warn(priv->netdev, "TDMA not stopped!\n");
  1319. /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
  1320. * fail, so by checking this pointer we know whether the TX ring was
  1321. * fully initialized or not.
  1322. */
  1323. if (!ring->cbs)
  1324. return;
  1325. napi_disable(&ring->napi);
  1326. netif_napi_del(&ring->napi);
  1327. bcm_sysport_tx_clean(priv, ring);
  1328. kfree(ring->cbs);
  1329. ring->cbs = NULL;
  1330. if (ring->desc_dma) {
  1331. dma_free_coherent(kdev, sizeof(struct dma_desc),
  1332. ring->desc_cpu, ring->desc_dma);
  1333. ring->desc_dma = 0;
  1334. }
  1335. ring->size = 0;
  1336. ring->alloc_size = 0;
  1337. netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
  1338. }
  1339. /* RDMA helper */
  1340. static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
  1341. unsigned int enable)
  1342. {
  1343. unsigned int timeout = 1000;
  1344. u32 reg;
  1345. reg = rdma_readl(priv, RDMA_CONTROL);
  1346. if (enable)
  1347. reg |= RDMA_EN;
  1348. else
  1349. reg &= ~RDMA_EN;
  1350. rdma_writel(priv, reg, RDMA_CONTROL);
  1351. /* Poll for RMDA disabling completion */
  1352. do {
  1353. reg = rdma_readl(priv, RDMA_STATUS);
  1354. if (!!(reg & RDMA_DISABLED) == !enable)
  1355. return 0;
  1356. usleep_range(1000, 2000);
  1357. } while (timeout-- > 0);
  1358. netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
  1359. return -ETIMEDOUT;
  1360. }
  1361. /* TDMA helper */
  1362. static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
  1363. unsigned int enable)
  1364. {
  1365. unsigned int timeout = 1000;
  1366. u32 reg;
  1367. reg = tdma_readl(priv, TDMA_CONTROL);
  1368. if (enable)
  1369. reg |= tdma_control_bit(priv, TDMA_EN);
  1370. else
  1371. reg &= ~tdma_control_bit(priv, TDMA_EN);
  1372. tdma_writel(priv, reg, TDMA_CONTROL);
  1373. /* Poll for TMDA disabling completion */
  1374. do {
  1375. reg = tdma_readl(priv, TDMA_STATUS);
  1376. if (!!(reg & TDMA_DISABLED) == !enable)
  1377. return 0;
  1378. usleep_range(1000, 2000);
  1379. } while (timeout-- > 0);
  1380. netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
  1381. return -ETIMEDOUT;
  1382. }
  1383. static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
  1384. {
  1385. struct bcm_sysport_cb *cb;
  1386. u32 reg;
  1387. int ret;
  1388. int i;
  1389. /* Initialize SW view of the RX ring */
  1390. priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
  1391. priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
  1392. priv->rx_c_index = 0;
  1393. priv->rx_read_ptr = 0;
  1394. priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
  1395. GFP_KERNEL);
  1396. if (!priv->rx_cbs) {
  1397. netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
  1398. return -ENOMEM;
  1399. }
  1400. for (i = 0; i < priv->num_rx_bds; i++) {
  1401. cb = priv->rx_cbs + i;
  1402. cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
  1403. }
  1404. ret = bcm_sysport_alloc_rx_bufs(priv);
  1405. if (ret) {
  1406. netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
  1407. return ret;
  1408. }
  1409. /* Initialize HW, ensure RDMA is disabled */
  1410. reg = rdma_readl(priv, RDMA_STATUS);
  1411. if (!(reg & RDMA_DISABLED))
  1412. rdma_enable_set(priv, 0);
  1413. rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
  1414. rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
  1415. rdma_writel(priv, 0, RDMA_PROD_INDEX);
  1416. rdma_writel(priv, 0, RDMA_CONS_INDEX);
  1417. rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
  1418. RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
  1419. /* Operate the queue in ring mode */
  1420. rdma_writel(priv, 0, RDMA_START_ADDR_HI);
  1421. rdma_writel(priv, 0, RDMA_START_ADDR_LO);
  1422. rdma_writel(priv, 0, RDMA_END_ADDR_HI);
  1423. rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
  1424. netif_dbg(priv, hw, priv->netdev,
  1425. "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
  1426. priv->num_rx_bds, priv->rx_bds);
  1427. return 0;
  1428. }
  1429. static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
  1430. {
  1431. struct bcm_sysport_cb *cb;
  1432. unsigned int i;
  1433. u32 reg;
  1434. /* Caller should ensure RDMA is disabled */
  1435. reg = rdma_readl(priv, RDMA_STATUS);
  1436. if (!(reg & RDMA_DISABLED))
  1437. netdev_warn(priv->netdev, "RDMA not stopped!\n");
  1438. for (i = 0; i < priv->num_rx_bds; i++) {
  1439. cb = &priv->rx_cbs[i];
  1440. if (dma_unmap_addr(cb, dma_addr))
  1441. dma_unmap_single(&priv->pdev->dev,
  1442. dma_unmap_addr(cb, dma_addr),
  1443. RX_BUF_LENGTH, DMA_FROM_DEVICE);
  1444. bcm_sysport_free_cb(cb);
  1445. }
  1446. kfree(priv->rx_cbs);
  1447. priv->rx_cbs = NULL;
  1448. netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
  1449. }
  1450. static void bcm_sysport_set_rx_mode(struct net_device *dev)
  1451. {
  1452. struct bcm_sysport_priv *priv = netdev_priv(dev);
  1453. u32 reg;
  1454. if (priv->is_lite)
  1455. return;
  1456. reg = umac_readl(priv, UMAC_CMD);
  1457. if (dev->flags & IFF_PROMISC)
  1458. reg |= CMD_PROMISC;
  1459. else
  1460. reg &= ~CMD_PROMISC;
  1461. umac_writel(priv, reg, UMAC_CMD);
  1462. /* No support for ALLMULTI */
  1463. if (dev->flags & IFF_ALLMULTI)
  1464. return;
  1465. }
  1466. static inline void umac_enable_set(struct bcm_sysport_priv *priv,
  1467. u32 mask, unsigned int enable)
  1468. {
  1469. u32 reg;
  1470. if (!priv->is_lite) {
  1471. reg = umac_readl(priv, UMAC_CMD);
  1472. if (enable)
  1473. reg |= mask;
  1474. else
  1475. reg &= ~mask;
  1476. umac_writel(priv, reg, UMAC_CMD);
  1477. } else {
  1478. reg = gib_readl(priv, GIB_CONTROL);
  1479. if (enable)
  1480. reg |= mask;
  1481. else
  1482. reg &= ~mask;
  1483. gib_writel(priv, reg, GIB_CONTROL);
  1484. }
  1485. /* UniMAC stops on a packet boundary, wait for a full-sized packet
  1486. * to be processed (1 msec).
  1487. */
  1488. if (enable == 0)
  1489. usleep_range(1000, 2000);
  1490. }
  1491. static inline void umac_reset(struct bcm_sysport_priv *priv)
  1492. {
  1493. u32 reg;
  1494. if (priv->is_lite)
  1495. return;
  1496. reg = umac_readl(priv, UMAC_CMD);
  1497. reg |= CMD_SW_RESET;
  1498. umac_writel(priv, reg, UMAC_CMD);
  1499. udelay(10);
  1500. reg = umac_readl(priv, UMAC_CMD);
  1501. reg &= ~CMD_SW_RESET;
  1502. umac_writel(priv, reg, UMAC_CMD);
  1503. }
  1504. static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
  1505. unsigned char *addr)
  1506. {
  1507. u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
  1508. addr[3];
  1509. u32 mac1 = (addr[4] << 8) | addr[5];
  1510. if (!priv->is_lite) {
  1511. umac_writel(priv, mac0, UMAC_MAC0);
  1512. umac_writel(priv, mac1, UMAC_MAC1);
  1513. } else {
  1514. gib_writel(priv, mac0, GIB_MAC0);
  1515. gib_writel(priv, mac1, GIB_MAC1);
  1516. }
  1517. }
  1518. static void topctrl_flush(struct bcm_sysport_priv *priv)
  1519. {
  1520. topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
  1521. topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
  1522. mdelay(1);
  1523. topctrl_writel(priv, 0, RX_FLUSH_CNTL);
  1524. topctrl_writel(priv, 0, TX_FLUSH_CNTL);
  1525. }
  1526. static int bcm_sysport_change_mac(struct net_device *dev, void *p)
  1527. {
  1528. struct bcm_sysport_priv *priv = netdev_priv(dev);
  1529. struct sockaddr *addr = p;
  1530. if (!is_valid_ether_addr(addr->sa_data))
  1531. return -EINVAL;
  1532. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  1533. /* interface is disabled, changes to MAC will be reflected on next
  1534. * open call
  1535. */
  1536. if (!netif_running(dev))
  1537. return 0;
  1538. umac_set_hw_addr(priv, dev->dev_addr);
  1539. return 0;
  1540. }
  1541. static void bcm_sysport_get_stats64(struct net_device *dev,
  1542. struct rtnl_link_stats64 *stats)
  1543. {
  1544. struct bcm_sysport_priv *priv = netdev_priv(dev);
  1545. struct bcm_sysport_stats64 *stats64 = &priv->stats64;
  1546. unsigned int start;
  1547. netdev_stats_to_stats64(stats, &dev->stats);
  1548. bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
  1549. &stats->tx_packets);
  1550. do {
  1551. start = u64_stats_fetch_begin_irq(&priv->syncp);
  1552. stats->rx_packets = stats64->rx_packets;
  1553. stats->rx_bytes = stats64->rx_bytes;
  1554. } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
  1555. }
  1556. static void bcm_sysport_netif_start(struct net_device *dev)
  1557. {
  1558. struct bcm_sysport_priv *priv = netdev_priv(dev);
  1559. /* Enable NAPI */
  1560. bcm_sysport_init_dim(priv, bcm_sysport_dim_work);
  1561. bcm_sysport_init_rx_coalesce(priv);
  1562. napi_enable(&priv->napi);
  1563. /* Enable RX interrupt and TX ring full interrupt */
  1564. intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
  1565. phy_start(dev->phydev);
  1566. /* Enable TX interrupts for the TXQs */
  1567. if (!priv->is_lite)
  1568. intrl2_1_mask_clear(priv, 0xffffffff);
  1569. else
  1570. intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
  1571. /* Last call before we start the real business */
  1572. netif_tx_start_all_queues(dev);
  1573. }
  1574. static void rbuf_init(struct bcm_sysport_priv *priv)
  1575. {
  1576. u32 reg;
  1577. reg = rbuf_readl(priv, RBUF_CONTROL);
  1578. reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
  1579. /* Set a correct RSB format on SYSTEMPORT Lite */
  1580. if (priv->is_lite)
  1581. reg &= ~RBUF_RSB_SWAP1;
  1582. /* Set a correct RSB format based on host endian */
  1583. if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
  1584. reg |= RBUF_RSB_SWAP0;
  1585. else
  1586. reg &= ~RBUF_RSB_SWAP0;
  1587. rbuf_writel(priv, reg, RBUF_CONTROL);
  1588. }
  1589. static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
  1590. {
  1591. intrl2_0_mask_set(priv, 0xffffffff);
  1592. intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
  1593. if (!priv->is_lite) {
  1594. intrl2_1_mask_set(priv, 0xffffffff);
  1595. intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
  1596. }
  1597. }
  1598. static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
  1599. {
  1600. u32 reg;
  1601. reg = gib_readl(priv, GIB_CONTROL);
  1602. /* Include Broadcom tag in pad extension and fix up IPG_LENGTH */
  1603. if (netdev_uses_dsa(priv->netdev)) {
  1604. reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
  1605. reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
  1606. }
  1607. reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT);
  1608. reg |= 12 << GIB_IPG_LEN_SHIFT;
  1609. gib_writel(priv, reg, GIB_CONTROL);
  1610. }
  1611. static int bcm_sysport_open(struct net_device *dev)
  1612. {
  1613. struct bcm_sysport_priv *priv = netdev_priv(dev);
  1614. struct phy_device *phydev;
  1615. unsigned int i;
  1616. int ret;
  1617. /* Reset UniMAC */
  1618. umac_reset(priv);
  1619. /* Flush TX and RX FIFOs at TOPCTRL level */
  1620. topctrl_flush(priv);
  1621. /* Disable the UniMAC RX/TX */
  1622. umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
  1623. /* Enable RBUF 2bytes alignment and Receive Status Block */
  1624. rbuf_init(priv);
  1625. /* Set maximum frame length */
  1626. if (!priv->is_lite)
  1627. umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
  1628. else
  1629. gib_set_pad_extension(priv);
  1630. /* Set MAC address */
  1631. umac_set_hw_addr(priv, dev->dev_addr);
  1632. /* Read CRC forward */
  1633. if (!priv->is_lite)
  1634. priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
  1635. else
  1636. priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) &
  1637. GIB_FCS_STRIP);
  1638. phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
  1639. 0, priv->phy_interface);
  1640. if (!phydev) {
  1641. netdev_err(dev, "could not attach to PHY\n");
  1642. return -ENODEV;
  1643. }
  1644. /* Reset house keeping link status */
  1645. priv->old_duplex = -1;
  1646. priv->old_link = -1;
  1647. priv->old_pause = -1;
  1648. /* mask all interrupts and request them */
  1649. bcm_sysport_mask_all_intrs(priv);
  1650. ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
  1651. if (ret) {
  1652. netdev_err(dev, "failed to request RX interrupt\n");
  1653. goto out_phy_disconnect;
  1654. }
  1655. if (!priv->is_lite) {
  1656. ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
  1657. dev->name, dev);
  1658. if (ret) {
  1659. netdev_err(dev, "failed to request TX interrupt\n");
  1660. goto out_free_irq0;
  1661. }
  1662. }
  1663. /* Initialize both hardware and software ring */
  1664. for (i = 0; i < dev->num_tx_queues; i++) {
  1665. ret = bcm_sysport_init_tx_ring(priv, i);
  1666. if (ret) {
  1667. netdev_err(dev, "failed to initialize TX ring %d\n",
  1668. i);
  1669. goto out_free_tx_ring;
  1670. }
  1671. }
  1672. /* Initialize linked-list */
  1673. tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
  1674. /* Initialize RX ring */
  1675. ret = bcm_sysport_init_rx_ring(priv);
  1676. if (ret) {
  1677. netdev_err(dev, "failed to initialize RX ring\n");
  1678. goto out_free_rx_ring;
  1679. }
  1680. /* Turn on RDMA */
  1681. ret = rdma_enable_set(priv, 1);
  1682. if (ret)
  1683. goto out_free_rx_ring;
  1684. /* Turn on TDMA */
  1685. ret = tdma_enable_set(priv, 1);
  1686. if (ret)
  1687. goto out_clear_rx_int;
  1688. /* Turn on UniMAC TX/RX */
  1689. umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
  1690. bcm_sysport_netif_start(dev);
  1691. return 0;
  1692. out_clear_rx_int:
  1693. intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
  1694. out_free_rx_ring:
  1695. bcm_sysport_fini_rx_ring(priv);
  1696. out_free_tx_ring:
  1697. for (i = 0; i < dev->num_tx_queues; i++)
  1698. bcm_sysport_fini_tx_ring(priv, i);
  1699. if (!priv->is_lite)
  1700. free_irq(priv->irq1, dev);
  1701. out_free_irq0:
  1702. free_irq(priv->irq0, dev);
  1703. out_phy_disconnect:
  1704. phy_disconnect(phydev);
  1705. return ret;
  1706. }
  1707. static void bcm_sysport_netif_stop(struct net_device *dev)
  1708. {
  1709. struct bcm_sysport_priv *priv = netdev_priv(dev);
  1710. /* stop all software from updating hardware */
  1711. netif_tx_stop_all_queues(dev);
  1712. napi_disable(&priv->napi);
  1713. cancel_work_sync(&priv->dim.dim.work);
  1714. phy_stop(dev->phydev);
  1715. /* mask all interrupts */
  1716. bcm_sysport_mask_all_intrs(priv);
  1717. }
  1718. static int bcm_sysport_stop(struct net_device *dev)
  1719. {
  1720. struct bcm_sysport_priv *priv = netdev_priv(dev);
  1721. unsigned int i;
  1722. int ret;
  1723. bcm_sysport_netif_stop(dev);
  1724. /* Disable UniMAC RX */
  1725. umac_enable_set(priv, CMD_RX_EN, 0);
  1726. ret = tdma_enable_set(priv, 0);
  1727. if (ret) {
  1728. netdev_err(dev, "timeout disabling RDMA\n");
  1729. return ret;
  1730. }
  1731. /* Wait for a maximum packet size to be drained */
  1732. usleep_range(2000, 3000);
  1733. ret = rdma_enable_set(priv, 0);
  1734. if (ret) {
  1735. netdev_err(dev, "timeout disabling TDMA\n");
  1736. return ret;
  1737. }
  1738. /* Disable UniMAC TX */
  1739. umac_enable_set(priv, CMD_TX_EN, 0);
  1740. /* Free RX/TX rings SW structures */
  1741. for (i = 0; i < dev->num_tx_queues; i++)
  1742. bcm_sysport_fini_tx_ring(priv, i);
  1743. bcm_sysport_fini_rx_ring(priv);
  1744. free_irq(priv->irq0, dev);
  1745. if (!priv->is_lite)
  1746. free_irq(priv->irq1, dev);
  1747. /* Disconnect from PHY */
  1748. phy_disconnect(dev->phydev);
  1749. return 0;
  1750. }
  1751. static const struct ethtool_ops bcm_sysport_ethtool_ops = {
  1752. .get_drvinfo = bcm_sysport_get_drvinfo,
  1753. .get_msglevel = bcm_sysport_get_msglvl,
  1754. .set_msglevel = bcm_sysport_set_msglvl,
  1755. .get_link = ethtool_op_get_link,
  1756. .get_strings = bcm_sysport_get_strings,
  1757. .get_ethtool_stats = bcm_sysport_get_stats,
  1758. .get_sset_count = bcm_sysport_get_sset_count,
  1759. .get_wol = bcm_sysport_get_wol,
  1760. .set_wol = bcm_sysport_set_wol,
  1761. .get_coalesce = bcm_sysport_get_coalesce,
  1762. .set_coalesce = bcm_sysport_set_coalesce,
  1763. .get_link_ksettings = phy_ethtool_get_link_ksettings,
  1764. .set_link_ksettings = phy_ethtool_set_link_ksettings,
  1765. };
  1766. static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
  1767. void *accel_priv,
  1768. select_queue_fallback_t fallback)
  1769. {
  1770. struct bcm_sysport_priv *priv = netdev_priv(dev);
  1771. u16 queue = skb_get_queue_mapping(skb);
  1772. struct bcm_sysport_tx_ring *tx_ring;
  1773. unsigned int q, port;
  1774. if (!netdev_uses_dsa(dev))
  1775. return fallback(dev, skb);
  1776. /* DSA tagging layer will have configured the correct queue */
  1777. q = BRCM_TAG_GET_QUEUE(queue);
  1778. port = BRCM_TAG_GET_PORT(queue);
  1779. tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
  1780. if (unlikely(!tx_ring))
  1781. return fallback(dev, skb);
  1782. return tx_ring->index;
  1783. }
  1784. static const struct net_device_ops bcm_sysport_netdev_ops = {
  1785. .ndo_start_xmit = bcm_sysport_xmit,
  1786. .ndo_tx_timeout = bcm_sysport_tx_timeout,
  1787. .ndo_open = bcm_sysport_open,
  1788. .ndo_stop = bcm_sysport_stop,
  1789. .ndo_set_features = bcm_sysport_set_features,
  1790. .ndo_set_rx_mode = bcm_sysport_set_rx_mode,
  1791. .ndo_set_mac_address = bcm_sysport_change_mac,
  1792. #ifdef CONFIG_NET_POLL_CONTROLLER
  1793. .ndo_poll_controller = bcm_sysport_poll_controller,
  1794. #endif
  1795. .ndo_get_stats64 = bcm_sysport_get_stats64,
  1796. .ndo_select_queue = bcm_sysport_select_queue,
  1797. };
  1798. static int bcm_sysport_map_queues(struct notifier_block *nb,
  1799. struct dsa_notifier_register_info *info)
  1800. {
  1801. struct bcm_sysport_tx_ring *ring;
  1802. struct bcm_sysport_priv *priv;
  1803. struct net_device *slave_dev;
  1804. unsigned int num_tx_queues;
  1805. unsigned int q, start, port;
  1806. struct net_device *dev;
  1807. priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
  1808. if (priv->netdev != info->master)
  1809. return 0;
  1810. dev = info->master;
  1811. /* We can't be setting up queue inspection for non directly attached
  1812. * switches
  1813. */
  1814. if (info->switch_number)
  1815. return 0;
  1816. if (dev->netdev_ops != &bcm_sysport_netdev_ops)
  1817. return 0;
  1818. port = info->port_number;
  1819. slave_dev = info->info.dev;
  1820. /* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
  1821. * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
  1822. * per-port (slave_dev) network devices queue, we achieve just that.
  1823. * This need to happen now before any slave network device is used such
  1824. * it accurately reflects the number of real TX queues.
  1825. */
  1826. if (priv->is_lite)
  1827. netif_set_real_num_tx_queues(slave_dev,
  1828. slave_dev->num_tx_queues / 2);
  1829. num_tx_queues = slave_dev->real_num_tx_queues;
  1830. if (priv->per_port_num_tx_queues &&
  1831. priv->per_port_num_tx_queues != num_tx_queues)
  1832. netdev_warn(slave_dev, "asymmetric number of per-port queues\n");
  1833. priv->per_port_num_tx_queues = num_tx_queues;
  1834. start = find_first_zero_bit(&priv->queue_bitmap, dev->num_tx_queues);
  1835. for (q = 0; q < num_tx_queues; q++) {
  1836. ring = &priv->tx_rings[q + start];
  1837. /* Just remember the mapping actual programming done
  1838. * during bcm_sysport_init_tx_ring
  1839. */
  1840. ring->switch_queue = q;
  1841. ring->switch_port = port;
  1842. ring->inspect = true;
  1843. priv->ring_map[q + port * num_tx_queues] = ring;
  1844. /* Set all queues as being used now */
  1845. set_bit(q + start, &priv->queue_bitmap);
  1846. }
  1847. return 0;
  1848. }
  1849. static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
  1850. unsigned long event, void *ptr)
  1851. {
  1852. struct dsa_notifier_register_info *info;
  1853. if (event != DSA_PORT_REGISTER)
  1854. return NOTIFY_DONE;
  1855. info = ptr;
  1856. return notifier_from_errno(bcm_sysport_map_queues(nb, info));
  1857. }
  1858. #define REV_FMT "v%2x.%02x"
  1859. static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
  1860. [SYSTEMPORT] = {
  1861. .is_lite = false,
  1862. .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS,
  1863. },
  1864. [SYSTEMPORT_LITE] = {
  1865. .is_lite = true,
  1866. .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS,
  1867. },
  1868. };
  1869. static const struct of_device_id bcm_sysport_of_match[] = {
  1870. { .compatible = "brcm,systemportlite-v1.00",
  1871. .data = &bcm_sysport_params[SYSTEMPORT_LITE] },
  1872. { .compatible = "brcm,systemport-v1.00",
  1873. .data = &bcm_sysport_params[SYSTEMPORT] },
  1874. { .compatible = "brcm,systemport",
  1875. .data = &bcm_sysport_params[SYSTEMPORT] },
  1876. { /* sentinel */ }
  1877. };
  1878. MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
  1879. static int bcm_sysport_probe(struct platform_device *pdev)
  1880. {
  1881. const struct bcm_sysport_hw_params *params;
  1882. const struct of_device_id *of_id = NULL;
  1883. struct bcm_sysport_priv *priv;
  1884. struct device_node *dn;
  1885. struct net_device *dev;
  1886. const void *macaddr;
  1887. struct resource *r;
  1888. u32 txq, rxq;
  1889. int ret;
  1890. dn = pdev->dev.of_node;
  1891. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1892. of_id = of_match_node(bcm_sysport_of_match, dn);
  1893. if (!of_id || !of_id->data)
  1894. return -EINVAL;
  1895. /* Fairly quickly we need to know the type of adapter we have */
  1896. params = of_id->data;
  1897. /* Read the Transmit/Receive Queue properties */
  1898. if (of_property_read_u32(dn, "systemport,num-txq", &txq))
  1899. txq = TDMA_NUM_RINGS;
  1900. if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
  1901. rxq = 1;
  1902. /* Sanity check the number of transmit queues */
  1903. if (!txq || txq > TDMA_NUM_RINGS)
  1904. return -EINVAL;
  1905. dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
  1906. if (!dev)
  1907. return -ENOMEM;
  1908. /* Initialize private members */
  1909. priv = netdev_priv(dev);
  1910. /* Allocate number of TX rings */
  1911. priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
  1912. sizeof(struct bcm_sysport_tx_ring),
  1913. GFP_KERNEL);
  1914. if (!priv->tx_rings)
  1915. return -ENOMEM;
  1916. priv->is_lite = params->is_lite;
  1917. priv->num_rx_desc_words = params->num_rx_desc_words;
  1918. priv->irq0 = platform_get_irq(pdev, 0);
  1919. if (!priv->is_lite) {
  1920. priv->irq1 = platform_get_irq(pdev, 1);
  1921. priv->wol_irq = platform_get_irq(pdev, 2);
  1922. } else {
  1923. priv->wol_irq = platform_get_irq(pdev, 1);
  1924. }
  1925. if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
  1926. dev_err(&pdev->dev, "invalid interrupts\n");
  1927. ret = -EINVAL;
  1928. goto err_free_netdev;
  1929. }
  1930. priv->base = devm_ioremap_resource(&pdev->dev, r);
  1931. if (IS_ERR(priv->base)) {
  1932. ret = PTR_ERR(priv->base);
  1933. goto err_free_netdev;
  1934. }
  1935. priv->netdev = dev;
  1936. priv->pdev = pdev;
  1937. priv->phy_interface = of_get_phy_mode(dn);
  1938. /* Default to GMII interface mode */
  1939. if (priv->phy_interface < 0)
  1940. priv->phy_interface = PHY_INTERFACE_MODE_GMII;
  1941. /* In the case of a fixed PHY, the DT node associated
  1942. * to the PHY is the Ethernet MAC DT node.
  1943. */
  1944. if (of_phy_is_fixed_link(dn)) {
  1945. ret = of_phy_register_fixed_link(dn);
  1946. if (ret) {
  1947. dev_err(&pdev->dev, "failed to register fixed PHY\n");
  1948. goto err_free_netdev;
  1949. }
  1950. priv->phy_dn = dn;
  1951. }
  1952. /* Initialize netdevice members */
  1953. macaddr = of_get_mac_address(dn);
  1954. if (!macaddr || !is_valid_ether_addr(macaddr)) {
  1955. dev_warn(&pdev->dev, "using random Ethernet MAC\n");
  1956. eth_hw_addr_random(dev);
  1957. } else {
  1958. ether_addr_copy(dev->dev_addr, macaddr);
  1959. }
  1960. SET_NETDEV_DEV(dev, &pdev->dev);
  1961. dev_set_drvdata(&pdev->dev, dev);
  1962. dev->ethtool_ops = &bcm_sysport_ethtool_ops;
  1963. dev->netdev_ops = &bcm_sysport_netdev_ops;
  1964. netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
  1965. /* HW supported features, none enabled by default */
  1966. dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
  1967. NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
  1968. /* Request the WOL interrupt and advertise suspend if available */
  1969. priv->wol_irq_disabled = 1;
  1970. ret = devm_request_irq(&pdev->dev, priv->wol_irq,
  1971. bcm_sysport_wol_isr, 0, dev->name, priv);
  1972. if (!ret)
  1973. device_set_wakeup_capable(&pdev->dev, 1);
  1974. /* Set the needed headroom once and for all */
  1975. BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
  1976. dev->needed_headroom += sizeof(struct bcm_tsb);
  1977. /* libphy will adjust the link state accordingly */
  1978. netif_carrier_off(dev);
  1979. priv->rx_max_coalesced_frames = 1;
  1980. u64_stats_init(&priv->syncp);
  1981. priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier;
  1982. ret = register_dsa_notifier(&priv->dsa_notifier);
  1983. if (ret) {
  1984. dev_err(&pdev->dev, "failed to register DSA notifier\n");
  1985. goto err_deregister_fixed_link;
  1986. }
  1987. ret = register_netdev(dev);
  1988. if (ret) {
  1989. dev_err(&pdev->dev, "failed to register net_device\n");
  1990. goto err_deregister_notifier;
  1991. }
  1992. priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
  1993. dev_info(&pdev->dev,
  1994. "Broadcom SYSTEMPORT%s" REV_FMT
  1995. " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
  1996. priv->is_lite ? " Lite" : "",
  1997. (priv->rev >> 8) & 0xff, priv->rev & 0xff,
  1998. priv->base, priv->irq0, priv->irq1, txq, rxq);
  1999. return 0;
  2000. err_deregister_notifier:
  2001. unregister_dsa_notifier(&priv->dsa_notifier);
  2002. err_deregister_fixed_link:
  2003. if (of_phy_is_fixed_link(dn))
  2004. of_phy_deregister_fixed_link(dn);
  2005. err_free_netdev:
  2006. free_netdev(dev);
  2007. return ret;
  2008. }
  2009. static int bcm_sysport_remove(struct platform_device *pdev)
  2010. {
  2011. struct net_device *dev = dev_get_drvdata(&pdev->dev);
  2012. struct bcm_sysport_priv *priv = netdev_priv(dev);
  2013. struct device_node *dn = pdev->dev.of_node;
  2014. /* Not much to do, ndo_close has been called
  2015. * and we use managed allocations
  2016. */
  2017. unregister_dsa_notifier(&priv->dsa_notifier);
  2018. unregister_netdev(dev);
  2019. if (of_phy_is_fixed_link(dn))
  2020. of_phy_deregister_fixed_link(dn);
  2021. free_netdev(dev);
  2022. dev_set_drvdata(&pdev->dev, NULL);
  2023. return 0;
  2024. }
  2025. #ifdef CONFIG_PM_SLEEP
  2026. static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
  2027. {
  2028. struct net_device *ndev = priv->netdev;
  2029. unsigned int timeout = 1000;
  2030. u32 reg;
  2031. /* Password has already been programmed */
  2032. reg = umac_readl(priv, UMAC_MPD_CTRL);
  2033. reg |= MPD_EN;
  2034. reg &= ~PSW_EN;
  2035. if (priv->wolopts & WAKE_MAGICSECURE)
  2036. reg |= PSW_EN;
  2037. umac_writel(priv, reg, UMAC_MPD_CTRL);
  2038. /* Make sure RBUF entered WoL mode as result */
  2039. do {
  2040. reg = rbuf_readl(priv, RBUF_STATUS);
  2041. if (reg & RBUF_WOL_MODE)
  2042. break;
  2043. udelay(10);
  2044. } while (timeout-- > 0);
  2045. /* Do not leave the UniMAC RBUF matching only MPD packets */
  2046. if (!timeout) {
  2047. reg = umac_readl(priv, UMAC_MPD_CTRL);
  2048. reg &= ~MPD_EN;
  2049. umac_writel(priv, reg, UMAC_MPD_CTRL);
  2050. netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
  2051. return -ETIMEDOUT;
  2052. }
  2053. /* UniMAC receive needs to be turned on */
  2054. umac_enable_set(priv, CMD_RX_EN, 1);
  2055. /* Enable the interrupt wake-up source */
  2056. intrl2_0_mask_clear(priv, INTRL2_0_MPD);
  2057. netif_dbg(priv, wol, ndev, "entered WOL mode\n");
  2058. return 0;
  2059. }
  2060. static int bcm_sysport_suspend(struct device *d)
  2061. {
  2062. struct net_device *dev = dev_get_drvdata(d);
  2063. struct bcm_sysport_priv *priv = netdev_priv(dev);
  2064. unsigned int i;
  2065. int ret = 0;
  2066. u32 reg;
  2067. if (!netif_running(dev))
  2068. return 0;
  2069. bcm_sysport_netif_stop(dev);
  2070. phy_suspend(dev->phydev);
  2071. netif_device_detach(dev);
  2072. /* Disable UniMAC RX */
  2073. umac_enable_set(priv, CMD_RX_EN, 0);
  2074. ret = rdma_enable_set(priv, 0);
  2075. if (ret) {
  2076. netdev_err(dev, "RDMA timeout!\n");
  2077. return ret;
  2078. }
  2079. /* Disable RXCHK if enabled */
  2080. if (priv->rx_chk_en) {
  2081. reg = rxchk_readl(priv, RXCHK_CONTROL);
  2082. reg &= ~RXCHK_EN;
  2083. rxchk_writel(priv, reg, RXCHK_CONTROL);
  2084. }
  2085. /* Flush RX pipe */
  2086. if (!priv->wolopts)
  2087. topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
  2088. ret = tdma_enable_set(priv, 0);
  2089. if (ret) {
  2090. netdev_err(dev, "TDMA timeout!\n");
  2091. return ret;
  2092. }
  2093. /* Wait for a packet boundary */
  2094. usleep_range(2000, 3000);
  2095. umac_enable_set(priv, CMD_TX_EN, 0);
  2096. topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
  2097. /* Free RX/TX rings SW structures */
  2098. for (i = 0; i < dev->num_tx_queues; i++)
  2099. bcm_sysport_fini_tx_ring(priv, i);
  2100. bcm_sysport_fini_rx_ring(priv);
  2101. /* Get prepared for Wake-on-LAN */
  2102. if (device_may_wakeup(d) && priv->wolopts)
  2103. ret = bcm_sysport_suspend_to_wol(priv);
  2104. return ret;
  2105. }
  2106. static int bcm_sysport_resume(struct device *d)
  2107. {
  2108. struct net_device *dev = dev_get_drvdata(d);
  2109. struct bcm_sysport_priv *priv = netdev_priv(dev);
  2110. unsigned int i;
  2111. u32 reg;
  2112. int ret;
  2113. if (!netif_running(dev))
  2114. return 0;
  2115. umac_reset(priv);
  2116. /* We may have been suspended and never received a WOL event that
  2117. * would turn off MPD detection, take care of that now
  2118. */
  2119. bcm_sysport_resume_from_wol(priv);
  2120. /* Initialize both hardware and software ring */
  2121. for (i = 0; i < dev->num_tx_queues; i++) {
  2122. ret = bcm_sysport_init_tx_ring(priv, i);
  2123. if (ret) {
  2124. netdev_err(dev, "failed to initialize TX ring %d\n",
  2125. i);
  2126. goto out_free_tx_rings;
  2127. }
  2128. }
  2129. /* Initialize linked-list */
  2130. tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
  2131. /* Initialize RX ring */
  2132. ret = bcm_sysport_init_rx_ring(priv);
  2133. if (ret) {
  2134. netdev_err(dev, "failed to initialize RX ring\n");
  2135. goto out_free_rx_ring;
  2136. }
  2137. netif_device_attach(dev);
  2138. /* RX pipe enable */
  2139. topctrl_writel(priv, 0, RX_FLUSH_CNTL);
  2140. ret = rdma_enable_set(priv, 1);
  2141. if (ret) {
  2142. netdev_err(dev, "failed to enable RDMA\n");
  2143. goto out_free_rx_ring;
  2144. }
  2145. /* Enable rxhck */
  2146. if (priv->rx_chk_en) {
  2147. reg = rxchk_readl(priv, RXCHK_CONTROL);
  2148. reg |= RXCHK_EN;
  2149. rxchk_writel(priv, reg, RXCHK_CONTROL);
  2150. }
  2151. rbuf_init(priv);
  2152. /* Set maximum frame length */
  2153. if (!priv->is_lite)
  2154. umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
  2155. else
  2156. gib_set_pad_extension(priv);
  2157. /* Set MAC address */
  2158. umac_set_hw_addr(priv, dev->dev_addr);
  2159. umac_enable_set(priv, CMD_RX_EN, 1);
  2160. /* TX pipe enable */
  2161. topctrl_writel(priv, 0, TX_FLUSH_CNTL);
  2162. umac_enable_set(priv, CMD_TX_EN, 1);
  2163. ret = tdma_enable_set(priv, 1);
  2164. if (ret) {
  2165. netdev_err(dev, "TDMA timeout!\n");
  2166. goto out_free_rx_ring;
  2167. }
  2168. phy_resume(dev->phydev);
  2169. bcm_sysport_netif_start(dev);
  2170. return 0;
  2171. out_free_rx_ring:
  2172. bcm_sysport_fini_rx_ring(priv);
  2173. out_free_tx_rings:
  2174. for (i = 0; i < dev->num_tx_queues; i++)
  2175. bcm_sysport_fini_tx_ring(priv, i);
  2176. return ret;
  2177. }
  2178. #endif
  2179. static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
  2180. bcm_sysport_suspend, bcm_sysport_resume);
  2181. static struct platform_driver bcm_sysport_driver = {
  2182. .probe = bcm_sysport_probe,
  2183. .remove = bcm_sysport_remove,
  2184. .driver = {
  2185. .name = "brcm-systemport",
  2186. .of_match_table = bcm_sysport_of_match,
  2187. .pm = &bcm_sysport_pm_ops,
  2188. },
  2189. };
  2190. module_platform_driver(bcm_sysport_driver);
  2191. MODULE_AUTHOR("Broadcom Corporation");
  2192. MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
  2193. MODULE_ALIAS("platform:brcm-systemport");
  2194. MODULE_LICENSE("GPL");