fman_dtsec.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525
  1. /*
  2. * Copyright 2008-2015 Freescale Semiconductor Inc.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions are met:
  6. * * Redistributions of source code must retain the above copyright
  7. * notice, this list of conditions and the following disclaimer.
  8. * * Redistributions in binary form must reproduce the above copyright
  9. * notice, this list of conditions and the following disclaimer in the
  10. * documentation and/or other materials provided with the distribution.
  11. * * Neither the name of Freescale Semiconductor nor the
  12. * names of its contributors may be used to endorse or promote products
  13. * derived from this software without specific prior written permission.
  14. *
  15. *
  16. * ALTERNATIVELY, this software may be distributed under the terms of the
  17. * GNU General Public License ("GPL") as published by the Free Software
  18. * Foundation, either version 2 of that License or (at your option) any
  19. * later version.
  20. *
  21. * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  22. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  23. * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  24. * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  25. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  26. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  27. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  28. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  29. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  30. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  31. */
  32. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33. #include "fman_dtsec.h"
  34. #include "fman.h"
  35. #include <linux/slab.h>
  36. #include <linux/bitrev.h>
  37. #include <linux/io.h>
  38. #include <linux/delay.h>
  39. #include <linux/phy.h>
  40. #include <linux/crc32.h>
  41. #include <linux/of_mdio.h>
  42. #include <linux/mii.h>
  43. /* TBI register addresses */
  44. #define MII_TBICON 0x11
  45. /* TBICON register bit fields */
  46. #define TBICON_SOFT_RESET 0x8000 /* Soft reset */
  47. #define TBICON_DISABLE_RX_DIS 0x2000 /* Disable receive disparity */
  48. #define TBICON_DISABLE_TX_DIS 0x1000 /* Disable transmit disparity */
  49. #define TBICON_AN_SENSE 0x0100 /* Auto-negotiation sense enable */
  50. #define TBICON_CLK_SELECT 0x0020 /* Clock select */
  51. #define TBICON_MI_MODE 0x0010 /* GMII mode (TBI if not set) */
  52. #define TBIANA_SGMII 0x4001
  53. #define TBIANA_1000X 0x01a0
  54. /* Interrupt Mask Register (IMASK) */
  55. #define DTSEC_IMASK_BREN 0x80000000
  56. #define DTSEC_IMASK_RXCEN 0x40000000
  57. #define DTSEC_IMASK_MSROEN 0x04000000
  58. #define DTSEC_IMASK_GTSCEN 0x02000000
  59. #define DTSEC_IMASK_BTEN 0x01000000
  60. #define DTSEC_IMASK_TXCEN 0x00800000
  61. #define DTSEC_IMASK_TXEEN 0x00400000
  62. #define DTSEC_IMASK_LCEN 0x00040000
  63. #define DTSEC_IMASK_CRLEN 0x00020000
  64. #define DTSEC_IMASK_XFUNEN 0x00010000
  65. #define DTSEC_IMASK_ABRTEN 0x00008000
  66. #define DTSEC_IMASK_IFERREN 0x00004000
  67. #define DTSEC_IMASK_MAGEN 0x00000800
  68. #define DTSEC_IMASK_MMRDEN 0x00000400
  69. #define DTSEC_IMASK_MMWREN 0x00000200
  70. #define DTSEC_IMASK_GRSCEN 0x00000100
  71. #define DTSEC_IMASK_TDPEEN 0x00000002
  72. #define DTSEC_IMASK_RDPEEN 0x00000001
  73. #define DTSEC_EVENTS_MASK \
  74. ((u32)(DTSEC_IMASK_BREN | \
  75. DTSEC_IMASK_RXCEN | \
  76. DTSEC_IMASK_BTEN | \
  77. DTSEC_IMASK_TXCEN | \
  78. DTSEC_IMASK_TXEEN | \
  79. DTSEC_IMASK_ABRTEN | \
  80. DTSEC_IMASK_LCEN | \
  81. DTSEC_IMASK_CRLEN | \
  82. DTSEC_IMASK_XFUNEN | \
  83. DTSEC_IMASK_IFERREN | \
  84. DTSEC_IMASK_MAGEN | \
  85. DTSEC_IMASK_TDPEEN | \
  86. DTSEC_IMASK_RDPEEN))
  87. /* dtsec timestamp event bits */
  88. #define TMR_PEMASK_TSREEN 0x00010000
  89. #define TMR_PEVENT_TSRE 0x00010000
  90. /* Group address bit indication */
  91. #define MAC_GROUP_ADDRESS 0x0000010000000000ULL
  92. /* Defaults */
  93. #define DEFAULT_HALFDUP_RETRANSMIT 0xf
  94. #define DEFAULT_HALFDUP_COLL_WINDOW 0x37
  95. #define DEFAULT_TX_PAUSE_TIME 0xf000
  96. #define DEFAULT_RX_PREPEND 0
  97. #define DEFAULT_PREAMBLE_LEN 7
  98. #define DEFAULT_TX_PAUSE_TIME_EXTD 0
  99. #define DEFAULT_NON_BACK_TO_BACK_IPG1 0x40
  100. #define DEFAULT_NON_BACK_TO_BACK_IPG2 0x60
  101. #define DEFAULT_MIN_IFG_ENFORCEMENT 0x50
  102. #define DEFAULT_BACK_TO_BACK_IPG 0x60
  103. #define DEFAULT_MAXIMUM_FRAME 0x600
  104. /* register related defines (bits, field offsets..) */
  105. #define DTSEC_ID2_INT_REDUCED_OFF 0x00010000
  106. #define DTSEC_ECNTRL_GMIIM 0x00000040
  107. #define DTSEC_ECNTRL_TBIM 0x00000020
  108. #define DTSEC_ECNTRL_SGMIIM 0x00000002
  109. #define DTSEC_ECNTRL_RPM 0x00000010
  110. #define DTSEC_ECNTRL_R100M 0x00000008
  111. #define DTSEC_ECNTRL_QSGMIIM 0x00000001
  112. #define TCTRL_GTS 0x00000020
  113. #define RCTRL_PAL_MASK 0x001f0000
  114. #define RCTRL_PAL_SHIFT 16
  115. #define RCTRL_GHTX 0x00000400
  116. #define RCTRL_GRS 0x00000020
  117. #define RCTRL_MPROM 0x00000008
  118. #define RCTRL_RSF 0x00000004
  119. #define RCTRL_UPROM 0x00000001
  120. #define MACCFG1_SOFT_RESET 0x80000000
  121. #define MACCFG1_RX_FLOW 0x00000020
  122. #define MACCFG1_TX_FLOW 0x00000010
  123. #define MACCFG1_TX_EN 0x00000001
  124. #define MACCFG1_RX_EN 0x00000004
  125. #define MACCFG2_NIBBLE_MODE 0x00000100
  126. #define MACCFG2_BYTE_MODE 0x00000200
  127. #define MACCFG2_PAD_CRC_EN 0x00000004
  128. #define MACCFG2_FULL_DUPLEX 0x00000001
  129. #define MACCFG2_PREAMBLE_LENGTH_MASK 0x0000f000
  130. #define MACCFG2_PREAMBLE_LENGTH_SHIFT 12
  131. #define IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT 24
  132. #define IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT 16
  133. #define IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT 8
  134. #define IPGIFG_NON_BACK_TO_BACK_IPG_1 0x7F000000
  135. #define IPGIFG_NON_BACK_TO_BACK_IPG_2 0x007F0000
  136. #define IPGIFG_MIN_IFG_ENFORCEMENT 0x0000FF00
  137. #define IPGIFG_BACK_TO_BACK_IPG 0x0000007F
  138. #define HAFDUP_EXCESS_DEFER 0x00010000
  139. #define HAFDUP_COLLISION_WINDOW 0x000003ff
  140. #define HAFDUP_RETRANSMISSION_MAX_SHIFT 12
  141. #define HAFDUP_RETRANSMISSION_MAX 0x0000f000
  142. #define NUM_OF_HASH_REGS 8 /* Number of hash table registers */
  143. #define PTV_PTE_MASK 0xffff0000
  144. #define PTV_PT_MASK 0x0000ffff
  145. #define PTV_PTE_SHIFT 16
  146. #define MAX_PACKET_ALIGNMENT 31
  147. #define MAX_INTER_PACKET_GAP 0x7f
  148. #define MAX_RETRANSMISSION 0x0f
  149. #define MAX_COLLISION_WINDOW 0x03ff
  150. /* Hash table size (32 bits*8 regs) */
  151. #define DTSEC_HASH_TABLE_SIZE 256
  152. /* Extended Hash table size (32 bits*16 regs) */
  153. #define EXTENDED_HASH_TABLE_SIZE 512
  154. /* dTSEC Memory Map registers */
  155. struct dtsec_regs {
  156. /* dTSEC General Control and Status Registers */
  157. u32 tsec_id; /* 0x000 ETSEC_ID register */
  158. u32 tsec_id2; /* 0x004 ETSEC_ID2 register */
  159. u32 ievent; /* 0x008 Interrupt event register */
  160. u32 imask; /* 0x00C Interrupt mask register */
  161. u32 reserved0010[1];
  162. u32 ecntrl; /* 0x014 E control register */
  163. u32 ptv; /* 0x018 Pause time value register */
  164. u32 tbipa; /* 0x01C TBI PHY address register */
  165. u32 tmr_ctrl; /* 0x020 Time-stamp Control register */
  166. u32 tmr_pevent; /* 0x024 Time-stamp event register */
  167. u32 tmr_pemask; /* 0x028 Timer event mask register */
  168. u32 reserved002c[5];
  169. u32 tctrl; /* 0x040 Transmit control register */
  170. u32 reserved0044[3];
  171. u32 rctrl; /* 0x050 Receive control register */
  172. u32 reserved0054[11];
  173. u32 igaddr[8]; /* 0x080-0x09C Individual/group address */
  174. u32 gaddr[8]; /* 0x0A0-0x0BC Group address registers 0-7 */
  175. u32 reserved00c0[16];
  176. u32 maccfg1; /* 0x100 MAC configuration #1 */
  177. u32 maccfg2; /* 0x104 MAC configuration #2 */
  178. u32 ipgifg; /* 0x108 IPG/IFG */
  179. u32 hafdup; /* 0x10C Half-duplex */
  180. u32 maxfrm; /* 0x110 Maximum frame */
  181. u32 reserved0114[10];
  182. u32 ifstat; /* 0x13C Interface status */
  183. u32 macstnaddr1; /* 0x140 Station Address,part 1 */
  184. u32 macstnaddr2; /* 0x144 Station Address,part 2 */
  185. struct {
  186. u32 exact_match1; /* octets 1-4 */
  187. u32 exact_match2; /* octets 5-6 */
  188. } macaddr[15]; /* 0x148-0x1BC mac exact match addresses 1-15 */
  189. u32 reserved01c0[16];
  190. u32 tr64; /* 0x200 Tx and Rx 64 byte frame counter */
  191. u32 tr127; /* 0x204 Tx and Rx 65 to 127 byte frame counter */
  192. u32 tr255; /* 0x208 Tx and Rx 128 to 255 byte frame counter */
  193. u32 tr511; /* 0x20C Tx and Rx 256 to 511 byte frame counter */
  194. u32 tr1k; /* 0x210 Tx and Rx 512 to 1023 byte frame counter */
  195. u32 trmax; /* 0x214 Tx and Rx 1024 to 1518 byte frame counter */
  196. u32 trmgv;
  197. /* 0x218 Tx and Rx 1519 to 1522 byte good VLAN frame count */
  198. u32 rbyt; /* 0x21C receive byte counter */
  199. u32 rpkt; /* 0x220 receive packet counter */
  200. u32 rfcs; /* 0x224 receive FCS error counter */
  201. u32 rmca; /* 0x228 RMCA Rx multicast packet counter */
  202. u32 rbca; /* 0x22C Rx broadcast packet counter */
  203. u32 rxcf; /* 0x230 Rx control frame packet counter */
  204. u32 rxpf; /* 0x234 Rx pause frame packet counter */
  205. u32 rxuo; /* 0x238 Rx unknown OP code counter */
  206. u32 raln; /* 0x23C Rx alignment error counter */
  207. u32 rflr; /* 0x240 Rx frame length error counter */
  208. u32 rcde; /* 0x244 Rx code error counter */
  209. u32 rcse; /* 0x248 Rx carrier sense error counter */
  210. u32 rund; /* 0x24C Rx undersize packet counter */
  211. u32 rovr; /* 0x250 Rx oversize packet counter */
  212. u32 rfrg; /* 0x254 Rx fragments counter */
  213. u32 rjbr; /* 0x258 Rx jabber counter */
  214. u32 rdrp; /* 0x25C Rx drop */
  215. u32 tbyt; /* 0x260 Tx byte counter */
  216. u32 tpkt; /* 0x264 Tx packet counter */
  217. u32 tmca; /* 0x268 Tx multicast packet counter */
  218. u32 tbca; /* 0x26C Tx broadcast packet counter */
  219. u32 txpf; /* 0x270 Tx pause control frame counter */
  220. u32 tdfr; /* 0x274 Tx deferral packet counter */
  221. u32 tedf; /* 0x278 Tx excessive deferral packet counter */
  222. u32 tscl; /* 0x27C Tx single collision packet counter */
  223. u32 tmcl; /* 0x280 Tx multiple collision packet counter */
  224. u32 tlcl; /* 0x284 Tx late collision packet counter */
  225. u32 txcl; /* 0x288 Tx excessive collision packet counter */
  226. u32 tncl; /* 0x28C Tx total collision counter */
  227. u32 reserved0290[1];
  228. u32 tdrp; /* 0x294 Tx drop frame counter */
  229. u32 tjbr; /* 0x298 Tx jabber frame counter */
  230. u32 tfcs; /* 0x29C Tx FCS error counter */
  231. u32 txcf; /* 0x2A0 Tx control frame counter */
  232. u32 tovr; /* 0x2A4 Tx oversize frame counter */
  233. u32 tund; /* 0x2A8 Tx undersize frame counter */
  234. u32 tfrg; /* 0x2AC Tx fragments frame counter */
  235. u32 car1; /* 0x2B0 carry register one register* */
  236. u32 car2; /* 0x2B4 carry register two register* */
  237. u32 cam1; /* 0x2B8 carry register one mask register */
  238. u32 cam2; /* 0x2BC carry register two mask register */
  239. u32 reserved02c0[848];
  240. };
  241. /* struct dtsec_cfg - dTSEC configuration
  242. * Transmit half-duplex flow control, under software control for 10/100-Mbps
  243. * half-duplex media. If set, back pressure is applied to media by raising
  244. * carrier.
  245. * halfdup_retransmit:
  246. * Number of retransmission attempts following a collision.
  247. * If this is exceeded dTSEC aborts transmission due to excessive collisions.
  248. * The standard specifies the attempt limit to be 15.
  249. * halfdup_coll_window:
  250. * The number of bytes of the frame during which collisions may occur.
  251. * The default value of 55 corresponds to the frame byte at the end of the
  252. * standard 512-bit slot time window. If collisions are detected after this
  253. * byte, the late collision event is asserted and transmission of current
  254. * frame is aborted.
  255. * tx_pad_crc:
  256. * Pad and append CRC. If set, the MAC pads all ransmitted short frames and
  257. * appends a CRC to every frame regardless of padding requirement.
  258. * tx_pause_time:
  259. * Transmit pause time value. This pause value is used as part of the pause
  260. * frame to be sent when a transmit pause frame is initiated.
  261. * If set to 0 this disables transmission of pause frames.
  262. * preamble_len:
  263. * Length, in bytes, of the preamble field preceding each Ethernet
  264. * start-of-frame delimiter byte. The default value of 0x7 should be used in
  265. * order to guarantee reliable operation with IEEE 802.3 compliant hardware.
  266. * rx_prepend:
  267. * Packet alignment padding length. The specified number of bytes (1-31)
  268. * of zero padding are inserted before the start of each received frame.
  269. * For Ethernet, where optional preamble extraction is enabled, the padding
  270. * appears before the preamble, otherwise the padding precedes the
  271. * layer 2 header.
  272. *
  273. * This structure contains basic dTSEC configuration and must be passed to
  274. * init() function. A default set of configuration values can be
  275. * obtained by calling set_dflts().
  276. */
  277. struct dtsec_cfg {
  278. u16 halfdup_retransmit;
  279. u16 halfdup_coll_window;
  280. bool tx_pad_crc;
  281. u16 tx_pause_time;
  282. bool ptp_tsu_en;
  283. bool ptp_exception_en;
  284. u32 preamble_len;
  285. u32 rx_prepend;
  286. u16 tx_pause_time_extd;
  287. u16 maximum_frame;
  288. u32 non_back_to_back_ipg1;
  289. u32 non_back_to_back_ipg2;
  290. u32 min_ifg_enforcement;
  291. u32 back_to_back_ipg;
  292. };
  293. struct fman_mac {
  294. /* pointer to dTSEC memory mapped registers */
  295. struct dtsec_regs __iomem *regs;
  296. /* MAC address of device */
  297. u64 addr;
  298. /* Ethernet physical interface */
  299. phy_interface_t phy_if;
  300. u16 max_speed;
  301. void *dev_id; /* device cookie used by the exception cbs */
  302. fman_mac_exception_cb *exception_cb;
  303. fman_mac_exception_cb *event_cb;
  304. /* Number of individual addresses in registers for this station */
  305. u8 num_of_ind_addr_in_regs;
  306. /* pointer to driver's global address hash table */
  307. struct eth_hash_t *multicast_addr_hash;
  308. /* pointer to driver's individual address hash table */
  309. struct eth_hash_t *unicast_addr_hash;
  310. u8 mac_id;
  311. u32 exceptions;
  312. bool ptp_tsu_enabled;
  313. bool en_tsu_err_exception;
  314. struct dtsec_cfg *dtsec_drv_param;
  315. void *fm;
  316. struct fman_rev_info fm_rev_info;
  317. bool basex_if;
  318. struct phy_device *tbiphy;
  319. };
  320. static void set_dflts(struct dtsec_cfg *cfg)
  321. {
  322. cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT;
  323. cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW;
  324. cfg->tx_pad_crc = true;
  325. cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME;
  326. /* PHY address 0 is reserved (DPAA RM) */
  327. cfg->rx_prepend = DEFAULT_RX_PREPEND;
  328. cfg->ptp_tsu_en = true;
  329. cfg->ptp_exception_en = true;
  330. cfg->preamble_len = DEFAULT_PREAMBLE_LEN;
  331. cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD;
  332. cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1;
  333. cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2;
  334. cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT;
  335. cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG;
  336. cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
  337. }
  338. static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
  339. phy_interface_t iface, u16 iface_speed, u8 *macaddr,
  340. u32 exception_mask, u8 tbi_addr)
  341. {
  342. bool is_rgmii, is_sgmii, is_qsgmii;
  343. int i;
  344. u32 tmp;
  345. /* Soft reset */
  346. iowrite32be(MACCFG1_SOFT_RESET, &regs->maccfg1);
  347. iowrite32be(0, &regs->maccfg1);
  348. /* dtsec_id2 */
  349. tmp = ioread32be(&regs->tsec_id2);
  350. /* check RGMII support */
  351. if (iface == PHY_INTERFACE_MODE_RGMII ||
  352. iface == PHY_INTERFACE_MODE_RGMII_ID ||
  353. iface == PHY_INTERFACE_MODE_RGMII_RXID ||
  354. iface == PHY_INTERFACE_MODE_RGMII_TXID ||
  355. iface == PHY_INTERFACE_MODE_RMII)
  356. if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
  357. return -EINVAL;
  358. if (iface == PHY_INTERFACE_MODE_SGMII ||
  359. iface == PHY_INTERFACE_MODE_MII)
  360. if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
  361. return -EINVAL;
  362. is_rgmii = iface == PHY_INTERFACE_MODE_RGMII ||
  363. iface == PHY_INTERFACE_MODE_RGMII_ID ||
  364. iface == PHY_INTERFACE_MODE_RGMII_RXID ||
  365. iface == PHY_INTERFACE_MODE_RGMII_TXID;
  366. is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
  367. is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
  368. tmp = 0;
  369. if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII)
  370. tmp |= DTSEC_ECNTRL_GMIIM;
  371. if (is_sgmii)
  372. tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM);
  373. if (is_qsgmii)
  374. tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM |
  375. DTSEC_ECNTRL_QSGMIIM);
  376. if (is_rgmii)
  377. tmp |= DTSEC_ECNTRL_RPM;
  378. if (iface_speed == SPEED_100)
  379. tmp |= DTSEC_ECNTRL_R100M;
  380. iowrite32be(tmp, &regs->ecntrl);
  381. tmp = 0;
  382. if (cfg->tx_pause_time)
  383. tmp |= cfg->tx_pause_time;
  384. if (cfg->tx_pause_time_extd)
  385. tmp |= cfg->tx_pause_time_extd << PTV_PTE_SHIFT;
  386. iowrite32be(tmp, &regs->ptv);
  387. tmp = 0;
  388. tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK;
  389. /* Accept short frames */
  390. tmp |= RCTRL_RSF;
  391. iowrite32be(tmp, &regs->rctrl);
  392. /* Assign a Phy Address to the TBI (TBIPA).
  393. * Done also in cases where TBI is not selected to avoid conflict with
  394. * the external PHY's Physical address
  395. */
  396. iowrite32be(tbi_addr, &regs->tbipa);
  397. iowrite32be(0, &regs->tmr_ctrl);
  398. if (cfg->ptp_tsu_en) {
  399. tmp = 0;
  400. tmp |= TMR_PEVENT_TSRE;
  401. iowrite32be(tmp, &regs->tmr_pevent);
  402. if (cfg->ptp_exception_en) {
  403. tmp = 0;
  404. tmp |= TMR_PEMASK_TSREEN;
  405. iowrite32be(tmp, &regs->tmr_pemask);
  406. }
  407. }
  408. tmp = 0;
  409. tmp |= MACCFG1_RX_FLOW;
  410. tmp |= MACCFG1_TX_FLOW;
  411. iowrite32be(tmp, &regs->maccfg1);
  412. tmp = 0;
  413. if (iface_speed < SPEED_1000)
  414. tmp |= MACCFG2_NIBBLE_MODE;
  415. else if (iface_speed == SPEED_1000)
  416. tmp |= MACCFG2_BYTE_MODE;
  417. tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
  418. MACCFG2_PREAMBLE_LENGTH_MASK;
  419. if (cfg->tx_pad_crc)
  420. tmp |= MACCFG2_PAD_CRC_EN;
  421. /* Full Duplex */
  422. tmp |= MACCFG2_FULL_DUPLEX;
  423. iowrite32be(tmp, &regs->maccfg2);
  424. tmp = (((cfg->non_back_to_back_ipg1 <<
  425. IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT)
  426. & IPGIFG_NON_BACK_TO_BACK_IPG_1)
  427. | ((cfg->non_back_to_back_ipg2 <<
  428. IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT)
  429. & IPGIFG_NON_BACK_TO_BACK_IPG_2)
  430. | ((cfg->min_ifg_enforcement << IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT)
  431. & IPGIFG_MIN_IFG_ENFORCEMENT)
  432. | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG));
  433. iowrite32be(tmp, &regs->ipgifg);
  434. tmp = 0;
  435. tmp |= HAFDUP_EXCESS_DEFER;
  436. tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT)
  437. & HAFDUP_RETRANSMISSION_MAX);
  438. tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW);
  439. iowrite32be(tmp, &regs->hafdup);
  440. /* Initialize Maximum frame length */
  441. iowrite32be(cfg->maximum_frame, &regs->maxfrm);
  442. iowrite32be(0xffffffff, &regs->cam1);
  443. iowrite32be(0xffffffff, &regs->cam2);
  444. iowrite32be(exception_mask, &regs->imask);
  445. iowrite32be(0xffffffff, &regs->ievent);
  446. tmp = (u32)((macaddr[5] << 24) |
  447. (macaddr[4] << 16) | (macaddr[3] << 8) | macaddr[2]);
  448. iowrite32be(tmp, &regs->macstnaddr1);
  449. tmp = (u32)((macaddr[1] << 24) | (macaddr[0] << 16));
  450. iowrite32be(tmp, &regs->macstnaddr2);
  451. /* HASH */
  452. for (i = 0; i < NUM_OF_HASH_REGS; i++) {
  453. /* Initialize IADDRx */
  454. iowrite32be(0, &regs->igaddr[i]);
  455. /* Initialize GADDRx */
  456. iowrite32be(0, &regs->gaddr[i]);
  457. }
  458. return 0;
  459. }
  460. static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
  461. {
  462. u32 tmp;
  463. tmp = (u32)((adr[5] << 24) |
  464. (adr[4] << 16) | (adr[3] << 8) | adr[2]);
  465. iowrite32be(tmp, &regs->macstnaddr1);
  466. tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
  467. iowrite32be(tmp, &regs->macstnaddr2);
  468. }
  469. static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
  470. bool enable)
  471. {
  472. int reg_idx = (bucket >> 5) & 0xf;
  473. int bit_idx = bucket & 0x1f;
  474. u32 bit_mask = 0x80000000 >> bit_idx;
  475. u32 __iomem *reg;
  476. if (reg_idx > 7)
  477. reg = &regs->gaddr[reg_idx - 8];
  478. else
  479. reg = &regs->igaddr[reg_idx];
  480. if (enable)
  481. iowrite32be(ioread32be(reg) | bit_mask, reg);
  482. else
  483. iowrite32be(ioread32be(reg) & (~bit_mask), reg);
  484. }
  485. static int check_init_parameters(struct fman_mac *dtsec)
  486. {
  487. if (dtsec->max_speed >= SPEED_10000) {
  488. pr_err("1G MAC driver supports 1G or lower speeds\n");
  489. return -EINVAL;
  490. }
  491. if (dtsec->addr == 0) {
  492. pr_err("Ethernet MAC Must have a valid MAC Address\n");
  493. return -EINVAL;
  494. }
  495. if ((dtsec->dtsec_drv_param)->rx_prepend >
  496. MAX_PACKET_ALIGNMENT) {
  497. pr_err("packetAlignmentPadding can't be > than %d\n",
  498. MAX_PACKET_ALIGNMENT);
  499. return -EINVAL;
  500. }
  501. if (((dtsec->dtsec_drv_param)->non_back_to_back_ipg1 >
  502. MAX_INTER_PACKET_GAP) ||
  503. ((dtsec->dtsec_drv_param)->non_back_to_back_ipg2 >
  504. MAX_INTER_PACKET_GAP) ||
  505. ((dtsec->dtsec_drv_param)->back_to_back_ipg >
  506. MAX_INTER_PACKET_GAP)) {
  507. pr_err("Inter packet gap can't be greater than %d\n",
  508. MAX_INTER_PACKET_GAP);
  509. return -EINVAL;
  510. }
  511. if ((dtsec->dtsec_drv_param)->halfdup_retransmit >
  512. MAX_RETRANSMISSION) {
  513. pr_err("maxRetransmission can't be greater than %d\n",
  514. MAX_RETRANSMISSION);
  515. return -EINVAL;
  516. }
  517. if ((dtsec->dtsec_drv_param)->halfdup_coll_window >
  518. MAX_COLLISION_WINDOW) {
  519. pr_err("collisionWindow can't be greater than %d\n",
  520. MAX_COLLISION_WINDOW);
  521. return -EINVAL;
  522. /* If Auto negotiation process is disabled, need to set up the PHY
  523. * using the MII Management Interface
  524. */
  525. }
  526. if (!dtsec->exception_cb) {
  527. pr_err("uninitialized exception_cb\n");
  528. return -EINVAL;
  529. }
  530. if (!dtsec->event_cb) {
  531. pr_err("uninitialized event_cb\n");
  532. return -EINVAL;
  533. }
  534. return 0;
  535. }
  536. static int get_exception_flag(enum fman_mac_exceptions exception)
  537. {
  538. u32 bit_mask;
  539. switch (exception) {
  540. case FM_MAC_EX_1G_BAB_RX:
  541. bit_mask = DTSEC_IMASK_BREN;
  542. break;
  543. case FM_MAC_EX_1G_RX_CTL:
  544. bit_mask = DTSEC_IMASK_RXCEN;
  545. break;
  546. case FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET:
  547. bit_mask = DTSEC_IMASK_GTSCEN;
  548. break;
  549. case FM_MAC_EX_1G_BAB_TX:
  550. bit_mask = DTSEC_IMASK_BTEN;
  551. break;
  552. case FM_MAC_EX_1G_TX_CTL:
  553. bit_mask = DTSEC_IMASK_TXCEN;
  554. break;
  555. case FM_MAC_EX_1G_TX_ERR:
  556. bit_mask = DTSEC_IMASK_TXEEN;
  557. break;
  558. case FM_MAC_EX_1G_LATE_COL:
  559. bit_mask = DTSEC_IMASK_LCEN;
  560. break;
  561. case FM_MAC_EX_1G_COL_RET_LMT:
  562. bit_mask = DTSEC_IMASK_CRLEN;
  563. break;
  564. case FM_MAC_EX_1G_TX_FIFO_UNDRN:
  565. bit_mask = DTSEC_IMASK_XFUNEN;
  566. break;
  567. case FM_MAC_EX_1G_MAG_PCKT:
  568. bit_mask = DTSEC_IMASK_MAGEN;
  569. break;
  570. case FM_MAC_EX_1G_MII_MNG_RD_COMPLET:
  571. bit_mask = DTSEC_IMASK_MMRDEN;
  572. break;
  573. case FM_MAC_EX_1G_MII_MNG_WR_COMPLET:
  574. bit_mask = DTSEC_IMASK_MMWREN;
  575. break;
  576. case FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET:
  577. bit_mask = DTSEC_IMASK_GRSCEN;
  578. break;
  579. case FM_MAC_EX_1G_DATA_ERR:
  580. bit_mask = DTSEC_IMASK_TDPEEN;
  581. break;
  582. case FM_MAC_EX_1G_RX_MIB_CNT_OVFL:
  583. bit_mask = DTSEC_IMASK_MSROEN;
  584. break;
  585. default:
  586. bit_mask = 0;
  587. break;
  588. }
  589. return bit_mask;
  590. }
  591. static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
  592. {
  593. /* Checks if dTSEC driver parameters were initialized */
  594. if (!dtsec_drv_params)
  595. return true;
  596. return false;
  597. }
  598. static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
  599. {
  600. struct dtsec_regs __iomem *regs = dtsec->regs;
  601. if (is_init_done(dtsec->dtsec_drv_param))
  602. return 0;
  603. return (u16)ioread32be(&regs->maxfrm);
  604. }
  605. static void dtsec_isr(void *handle)
  606. {
  607. struct fman_mac *dtsec = (struct fman_mac *)handle;
  608. struct dtsec_regs __iomem *regs = dtsec->regs;
  609. u32 event;
  610. /* do not handle MDIO events */
  611. event = ioread32be(&regs->ievent) &
  612. (u32)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN));
  613. event &= ioread32be(&regs->imask);
  614. iowrite32be(event, &regs->ievent);
  615. if (event & DTSEC_IMASK_BREN)
  616. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_RX);
  617. if (event & DTSEC_IMASK_RXCEN)
  618. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_RX_CTL);
  619. if (event & DTSEC_IMASK_GTSCEN)
  620. dtsec->exception_cb(dtsec->dev_id,
  621. FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET);
  622. if (event & DTSEC_IMASK_BTEN)
  623. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_TX);
  624. if (event & DTSEC_IMASK_TXCEN)
  625. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_CTL);
  626. if (event & DTSEC_IMASK_TXEEN)
  627. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_ERR);
  628. if (event & DTSEC_IMASK_LCEN)
  629. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_LATE_COL);
  630. if (event & DTSEC_IMASK_CRLEN)
  631. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT);
  632. if (event & DTSEC_IMASK_XFUNEN) {
  633. /* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */
  634. if (dtsec->fm_rev_info.major == 2) {
  635. u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i;
  636. /* a. Write 0x00E0_0C00 to DTSEC_ID
  637. * This is a read only register
  638. * b. Read and save the value of TPKT
  639. */
  640. tpkt1 = ioread32be(&regs->tpkt);
  641. /* c. Read the register at dTSEC address offset 0x32C */
  642. tmp_reg1 = ioread32be(&regs->reserved02c0[27]);
  643. /* d. Compare bits [9:15] to bits [25:31] of the
  644. * register at address offset 0x32C.
  645. */
  646. if ((tmp_reg1 & 0x007F0000) !=
  647. (tmp_reg1 & 0x0000007F)) {
  648. /* If they are not equal, save the value of
  649. * this register and wait for at least
  650. * MAXFRM*16 ns
  651. */
  652. usleep_range((u32)(min
  653. (dtsec_get_max_frame_length(dtsec) *
  654. 16 / 1000, 1)), (u32)
  655. (min(dtsec_get_max_frame_length
  656. (dtsec) * 16 / 1000, 1) + 1));
  657. }
  658. /* e. Read and save TPKT again and read the register
  659. * at dTSEC address offset 0x32C again
  660. */
  661. tpkt2 = ioread32be(&regs->tpkt);
  662. tmp_reg2 = ioread32be(&regs->reserved02c0[27]);
  663. /* f. Compare the value of TPKT saved in step b to
  664. * value read in step e. Also compare bits [9:15] of
  665. * the register at offset 0x32C saved in step d to the
  666. * value of bits [9:15] saved in step e. If the two
  667. * registers values are unchanged, then the transmit
  668. * portion of the dTSEC controller is locked up and
  669. * the user should proceed to the recover sequence.
  670. */
  671. if ((tpkt1 == tpkt2) && ((tmp_reg1 & 0x007F0000) ==
  672. (tmp_reg2 & 0x007F0000))) {
  673. /* recover sequence */
  674. /* a.Write a 1 to RCTRL[GRS] */
  675. iowrite32be(ioread32be(&regs->rctrl) |
  676. RCTRL_GRS, &regs->rctrl);
  677. /* b.Wait until IEVENT[GRSC]=1, or at least
  678. * 100 us has elapsed.
  679. */
  680. for (i = 0; i < 100; i++) {
  681. if (ioread32be(&regs->ievent) &
  682. DTSEC_IMASK_GRSCEN)
  683. break;
  684. udelay(1);
  685. }
  686. if (ioread32be(&regs->ievent) &
  687. DTSEC_IMASK_GRSCEN)
  688. iowrite32be(DTSEC_IMASK_GRSCEN,
  689. &regs->ievent);
  690. else
  691. pr_debug("Rx lockup due to Tx lockup\n");
  692. /* c.Write a 1 to bit n of FM_RSTC
  693. * (offset 0x0CC of FPM)
  694. */
  695. fman_reset_mac(dtsec->fm, dtsec->mac_id);
  696. /* d.Wait 4 Tx clocks (32 ns) */
  697. udelay(1);
  698. /* e.Write a 0 to bit n of FM_RSTC. */
  699. /* cleared by FMAN
  700. */
  701. }
  702. }
  703. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_FIFO_UNDRN);
  704. }
  705. if (event & DTSEC_IMASK_MAGEN)
  706. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_MAG_PCKT);
  707. if (event & DTSEC_IMASK_GRSCEN)
  708. dtsec->exception_cb(dtsec->dev_id,
  709. FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET);
  710. if (event & DTSEC_IMASK_TDPEEN)
  711. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_DATA_ERR);
  712. if (event & DTSEC_IMASK_RDPEEN)
  713. dtsec->exception_cb(dtsec->dev_id, FM_MAC_1G_RX_DATA_ERR);
  714. /* masked interrupts */
  715. WARN_ON(event & DTSEC_IMASK_ABRTEN);
  716. WARN_ON(event & DTSEC_IMASK_IFERREN);
  717. }
  718. static void dtsec_1588_isr(void *handle)
  719. {
  720. struct fman_mac *dtsec = (struct fman_mac *)handle;
  721. struct dtsec_regs __iomem *regs = dtsec->regs;
  722. u32 event;
  723. if (dtsec->ptp_tsu_enabled) {
  724. event = ioread32be(&regs->tmr_pevent);
  725. event &= ioread32be(&regs->tmr_pemask);
  726. if (event) {
  727. iowrite32be(event, &regs->tmr_pevent);
  728. WARN_ON(event & TMR_PEVENT_TSRE);
  729. dtsec->exception_cb(dtsec->dev_id,
  730. FM_MAC_EX_1G_1588_TS_RX_ERR);
  731. }
  732. }
  733. }
  734. static void free_init_resources(struct fman_mac *dtsec)
  735. {
  736. fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
  737. FMAN_INTR_TYPE_ERR);
  738. fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
  739. FMAN_INTR_TYPE_NORMAL);
  740. /* release the driver's group hash table */
  741. free_hash_table(dtsec->multicast_addr_hash);
  742. dtsec->multicast_addr_hash = NULL;
  743. /* release the driver's individual hash table */
  744. free_hash_table(dtsec->unicast_addr_hash);
  745. dtsec->unicast_addr_hash = NULL;
  746. }
  747. int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val)
  748. {
  749. if (is_init_done(dtsec->dtsec_drv_param))
  750. return -EINVAL;
  751. dtsec->dtsec_drv_param->maximum_frame = new_val;
  752. return 0;
  753. }
  754. int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val)
  755. {
  756. if (is_init_done(dtsec->dtsec_drv_param))
  757. return -EINVAL;
  758. dtsec->dtsec_drv_param->tx_pad_crc = new_val;
  759. return 0;
  760. }
  761. static void graceful_start(struct fman_mac *dtsec, enum comm_mode mode)
  762. {
  763. struct dtsec_regs __iomem *regs = dtsec->regs;
  764. if (mode & COMM_MODE_TX)
  765. iowrite32be(ioread32be(&regs->tctrl) &
  766. ~TCTRL_GTS, &regs->tctrl);
  767. if (mode & COMM_MODE_RX)
  768. iowrite32be(ioread32be(&regs->rctrl) &
  769. ~RCTRL_GRS, &regs->rctrl);
  770. }
  771. static void graceful_stop(struct fman_mac *dtsec, enum comm_mode mode)
  772. {
  773. struct dtsec_regs __iomem *regs = dtsec->regs;
  774. u32 tmp;
  775. /* Graceful stop - Assert the graceful Rx stop bit */
  776. if (mode & COMM_MODE_RX) {
  777. tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
  778. iowrite32be(tmp, &regs->rctrl);
  779. if (dtsec->fm_rev_info.major == 2) {
  780. /* Workaround for dTSEC Errata A002 */
  781. usleep_range(100, 200);
  782. } else {
  783. /* Workaround for dTSEC Errata A004839 */
  784. usleep_range(10, 50);
  785. }
  786. }
  787. /* Graceful stop - Assert the graceful Tx stop bit */
  788. if (mode & COMM_MODE_TX) {
  789. if (dtsec->fm_rev_info.major == 2) {
  790. /* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */
  791. pr_debug("GTS not supported due to DTSEC_A004 Errata.\n");
  792. } else {
  793. tmp = ioread32be(&regs->tctrl) | TCTRL_GTS;
  794. iowrite32be(tmp, &regs->tctrl);
  795. /* Workaround for dTSEC Errata A0012, A0014 */
  796. usleep_range(10, 50);
  797. }
  798. }
  799. }
  800. int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
  801. {
  802. struct dtsec_regs __iomem *regs = dtsec->regs;
  803. u32 tmp;
  804. if (!is_init_done(dtsec->dtsec_drv_param))
  805. return -EINVAL;
  806. /* Enable */
  807. tmp = ioread32be(&regs->maccfg1);
  808. if (mode & COMM_MODE_RX)
  809. tmp |= MACCFG1_RX_EN;
  810. if (mode & COMM_MODE_TX)
  811. tmp |= MACCFG1_TX_EN;
  812. iowrite32be(tmp, &regs->maccfg1);
  813. /* Graceful start - clear the graceful Rx/Tx stop bit */
  814. graceful_start(dtsec, mode);
  815. return 0;
  816. }
  817. int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode)
  818. {
  819. struct dtsec_regs __iomem *regs = dtsec->regs;
  820. u32 tmp;
  821. if (!is_init_done(dtsec->dtsec_drv_param))
  822. return -EINVAL;
  823. /* Graceful stop - Assert the graceful Rx/Tx stop bit */
  824. graceful_stop(dtsec, mode);
  825. tmp = ioread32be(&regs->maccfg1);
  826. if (mode & COMM_MODE_RX)
  827. tmp &= ~MACCFG1_RX_EN;
  828. if (mode & COMM_MODE_TX)
  829. tmp &= ~MACCFG1_TX_EN;
  830. iowrite32be(tmp, &regs->maccfg1);
  831. return 0;
  832. }
  833. int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
  834. u8 __maybe_unused priority,
  835. u16 pause_time, u16 __maybe_unused thresh_time)
  836. {
  837. struct dtsec_regs __iomem *regs = dtsec->regs;
  838. enum comm_mode mode = COMM_MODE_NONE;
  839. u32 ptv = 0;
  840. if (!is_init_done(dtsec->dtsec_drv_param))
  841. return -EINVAL;
  842. if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
  843. mode |= COMM_MODE_RX;
  844. if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
  845. mode |= COMM_MODE_TX;
  846. graceful_stop(dtsec, mode);
  847. if (pause_time) {
  848. /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
  849. if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
  850. pr_warn("pause-time: %d illegal.Should be > 320\n",
  851. pause_time);
  852. return -EINVAL;
  853. }
  854. ptv = ioread32be(&regs->ptv);
  855. ptv &= PTV_PTE_MASK;
  856. ptv |= pause_time & PTV_PT_MASK;
  857. iowrite32be(ptv, &regs->ptv);
  858. /* trigger the transmission of a flow-control pause frame */
  859. iowrite32be(ioread32be(&regs->maccfg1) | MACCFG1_TX_FLOW,
  860. &regs->maccfg1);
  861. } else
  862. iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
  863. &regs->maccfg1);
  864. graceful_start(dtsec, mode);
  865. return 0;
  866. }
  867. int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
  868. {
  869. struct dtsec_regs __iomem *regs = dtsec->regs;
  870. enum comm_mode mode = COMM_MODE_NONE;
  871. u32 tmp;
  872. if (!is_init_done(dtsec->dtsec_drv_param))
  873. return -EINVAL;
  874. if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
  875. mode |= COMM_MODE_RX;
  876. if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
  877. mode |= COMM_MODE_TX;
  878. graceful_stop(dtsec, mode);
  879. tmp = ioread32be(&regs->maccfg1);
  880. if (en)
  881. tmp |= MACCFG1_RX_FLOW;
  882. else
  883. tmp &= ~MACCFG1_RX_FLOW;
  884. iowrite32be(tmp, &regs->maccfg1);
  885. graceful_start(dtsec, mode);
  886. return 0;
  887. }
  888. int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr)
  889. {
  890. struct dtsec_regs __iomem *regs = dtsec->regs;
  891. enum comm_mode mode = COMM_MODE_NONE;
  892. if (!is_init_done(dtsec->dtsec_drv_param))
  893. return -EINVAL;
  894. if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
  895. mode |= COMM_MODE_RX;
  896. if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
  897. mode |= COMM_MODE_TX;
  898. graceful_stop(dtsec, mode);
  899. /* Initialize MAC Station Address registers (1 & 2)
  900. * Station address have to be swapped (big endian to little endian
  901. */
  902. dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
  903. set_mac_address(dtsec->regs, (u8 *)(*enet_addr));
  904. graceful_start(dtsec, mode);
  905. return 0;
  906. }
  907. int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
  908. {
  909. struct dtsec_regs __iomem *regs = dtsec->regs;
  910. struct eth_hash_entry *hash_entry;
  911. u64 addr;
  912. s32 bucket;
  913. u32 crc = 0xFFFFFFFF;
  914. bool mcast, ghtx;
  915. if (!is_init_done(dtsec->dtsec_drv_param))
  916. return -EINVAL;
  917. addr = ENET_ADDR_TO_UINT64(*eth_addr);
  918. ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
  919. mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
  920. /* Cannot handle unicast mac addr when GHTX is on */
  921. if (ghtx && !mcast) {
  922. pr_err("Could not compute hash bucket\n");
  923. return -EINVAL;
  924. }
  925. crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
  926. crc = bitrev32(crc);
  927. /* considering the 9 highest order bits in crc H[8:0]:
  928. *if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register
  929. *and H[5:1] (next 5 bits) identify the hash bit
  930. *if ghts = 1 H[8:5] (highest order 4 bits) identify the hash register
  931. *and H[4:0] (next 5 bits) identify the hash bit.
  932. *
  933. *In bucket index output the low 5 bits identify the hash register
  934. *bit, while the higher 4 bits identify the hash register
  935. */
  936. if (ghtx) {
  937. bucket = (s32)((crc >> 23) & 0x1ff);
  938. } else {
  939. bucket = (s32)((crc >> 24) & 0xff);
  940. /* if !ghtx and mcast the bit must be set in gaddr instead of
  941. *igaddr.
  942. */
  943. if (mcast)
  944. bucket += 0x100;
  945. }
  946. set_bucket(dtsec->regs, bucket, true);
  947. /* Create element to be added to the driver hash table */
  948. hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
  949. if (!hash_entry)
  950. return -ENOMEM;
  951. hash_entry->addr = addr;
  952. INIT_LIST_HEAD(&hash_entry->node);
  953. if (addr & MAC_GROUP_ADDRESS)
  954. /* Group Address */
  955. list_add_tail(&hash_entry->node,
  956. &dtsec->multicast_addr_hash->lsts[bucket]);
  957. else
  958. list_add_tail(&hash_entry->node,
  959. &dtsec->unicast_addr_hash->lsts[bucket]);
  960. return 0;
  961. }
  962. int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
  963. {
  964. struct dtsec_regs __iomem *regs = dtsec->regs;
  965. struct list_head *pos;
  966. struct eth_hash_entry *hash_entry = NULL;
  967. u64 addr;
  968. s32 bucket;
  969. u32 crc = 0xFFFFFFFF;
  970. bool mcast, ghtx;
  971. if (!is_init_done(dtsec->dtsec_drv_param))
  972. return -EINVAL;
  973. addr = ENET_ADDR_TO_UINT64(*eth_addr);
  974. ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
  975. mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
  976. /* Cannot handle unicast mac addr when GHTX is on */
  977. if (ghtx && !mcast) {
  978. pr_err("Could not compute hash bucket\n");
  979. return -EINVAL;
  980. }
  981. crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
  982. crc = bitrev32(crc);
  983. if (ghtx) {
  984. bucket = (s32)((crc >> 23) & 0x1ff);
  985. } else {
  986. bucket = (s32)((crc >> 24) & 0xff);
  987. /* if !ghtx and mcast the bit must be set
  988. * in gaddr instead of igaddr.
  989. */
  990. if (mcast)
  991. bucket += 0x100;
  992. }
  993. if (addr & MAC_GROUP_ADDRESS) {
  994. /* Group Address */
  995. list_for_each(pos,
  996. &dtsec->multicast_addr_hash->lsts[bucket]) {
  997. hash_entry = ETH_HASH_ENTRY_OBJ(pos);
  998. if (hash_entry->addr == addr) {
  999. list_del_init(&hash_entry->node);
  1000. kfree(hash_entry);
  1001. break;
  1002. }
  1003. }
  1004. if (list_empty(&dtsec->multicast_addr_hash->lsts[bucket]))
  1005. set_bucket(dtsec->regs, bucket, false);
  1006. } else {
  1007. /* Individual Address */
  1008. list_for_each(pos,
  1009. &dtsec->unicast_addr_hash->lsts[bucket]) {
  1010. hash_entry = ETH_HASH_ENTRY_OBJ(pos);
  1011. if (hash_entry->addr == addr) {
  1012. list_del_init(&hash_entry->node);
  1013. kfree(hash_entry);
  1014. break;
  1015. }
  1016. }
  1017. if (list_empty(&dtsec->unicast_addr_hash->lsts[bucket]))
  1018. set_bucket(dtsec->regs, bucket, false);
  1019. }
  1020. /* address does not exist */
  1021. WARN_ON(!hash_entry);
  1022. return 0;
  1023. }
  1024. int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
  1025. {
  1026. struct dtsec_regs __iomem *regs = dtsec->regs;
  1027. u32 tmp;
  1028. if (!is_init_done(dtsec->dtsec_drv_param))
  1029. return -EINVAL;
  1030. /* Set unicast promiscuous */
  1031. tmp = ioread32be(&regs->rctrl);
  1032. if (new_val)
  1033. tmp |= RCTRL_UPROM;
  1034. else
  1035. tmp &= ~RCTRL_UPROM;
  1036. iowrite32be(tmp, &regs->rctrl);
  1037. /* Set multicast promiscuous */
  1038. tmp = ioread32be(&regs->rctrl);
  1039. if (new_val)
  1040. tmp |= RCTRL_MPROM;
  1041. else
  1042. tmp &= ~RCTRL_MPROM;
  1043. iowrite32be(tmp, &regs->rctrl);
  1044. return 0;
  1045. }
  1046. int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
  1047. {
  1048. struct dtsec_regs __iomem *regs = dtsec->regs;
  1049. enum comm_mode mode = COMM_MODE_NONE;
  1050. u32 tmp;
  1051. if (!is_init_done(dtsec->dtsec_drv_param))
  1052. return -EINVAL;
  1053. if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
  1054. mode |= COMM_MODE_RX;
  1055. if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
  1056. mode |= COMM_MODE_TX;
  1057. graceful_stop(dtsec, mode);
  1058. tmp = ioread32be(&regs->maccfg2);
  1059. /* Full Duplex */
  1060. tmp |= MACCFG2_FULL_DUPLEX;
  1061. tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE);
  1062. if (speed < SPEED_1000)
  1063. tmp |= MACCFG2_NIBBLE_MODE;
  1064. else if (speed == SPEED_1000)
  1065. tmp |= MACCFG2_BYTE_MODE;
  1066. iowrite32be(tmp, &regs->maccfg2);
  1067. tmp = ioread32be(&regs->ecntrl);
  1068. if (speed == SPEED_100)
  1069. tmp |= DTSEC_ECNTRL_R100M;
  1070. else
  1071. tmp &= ~DTSEC_ECNTRL_R100M;
  1072. iowrite32be(tmp, &regs->ecntrl);
  1073. graceful_start(dtsec, mode);
  1074. return 0;
  1075. }
  1076. int dtsec_restart_autoneg(struct fman_mac *dtsec)
  1077. {
  1078. u16 tmp_reg16;
  1079. if (!is_init_done(dtsec->dtsec_drv_param))
  1080. return -EINVAL;
  1081. tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR);
  1082. tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
  1083. tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART |
  1084. BMCR_FULLDPLX | BMCR_SPEED1000);
  1085. phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
  1086. return 0;
  1087. }
  1088. int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version)
  1089. {
  1090. struct dtsec_regs __iomem *regs = dtsec->regs;
  1091. if (!is_init_done(dtsec->dtsec_drv_param))
  1092. return -EINVAL;
  1093. *mac_version = ioread32be(&regs->tsec_id);
  1094. return 0;
  1095. }
  1096. int dtsec_set_exception(struct fman_mac *dtsec,
  1097. enum fman_mac_exceptions exception, bool enable)
  1098. {
  1099. struct dtsec_regs __iomem *regs = dtsec->regs;
  1100. u32 bit_mask = 0;
  1101. if (!is_init_done(dtsec->dtsec_drv_param))
  1102. return -EINVAL;
  1103. if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) {
  1104. bit_mask = get_exception_flag(exception);
  1105. if (bit_mask) {
  1106. if (enable)
  1107. dtsec->exceptions |= bit_mask;
  1108. else
  1109. dtsec->exceptions &= ~bit_mask;
  1110. } else {
  1111. pr_err("Undefined exception\n");
  1112. return -EINVAL;
  1113. }
  1114. if (enable)
  1115. iowrite32be(ioread32be(&regs->imask) | bit_mask,
  1116. &regs->imask);
  1117. else
  1118. iowrite32be(ioread32be(&regs->imask) & ~bit_mask,
  1119. &regs->imask);
  1120. } else {
  1121. if (!dtsec->ptp_tsu_enabled) {
  1122. pr_err("Exception valid for 1588 only\n");
  1123. return -EINVAL;
  1124. }
  1125. switch (exception) {
  1126. case FM_MAC_EX_1G_1588_TS_RX_ERR:
  1127. if (enable) {
  1128. dtsec->en_tsu_err_exception = true;
  1129. iowrite32be(ioread32be(&regs->tmr_pemask) |
  1130. TMR_PEMASK_TSREEN,
  1131. &regs->tmr_pemask);
  1132. } else {
  1133. dtsec->en_tsu_err_exception = false;
  1134. iowrite32be(ioread32be(&regs->tmr_pemask) &
  1135. ~TMR_PEMASK_TSREEN,
  1136. &regs->tmr_pemask);
  1137. }
  1138. break;
  1139. default:
  1140. pr_err("Undefined exception\n");
  1141. return -EINVAL;
  1142. }
  1143. }
  1144. return 0;
  1145. }
  1146. int dtsec_init(struct fman_mac *dtsec)
  1147. {
  1148. struct dtsec_regs __iomem *regs = dtsec->regs;
  1149. struct dtsec_cfg *dtsec_drv_param;
  1150. int err;
  1151. u16 max_frm_ln;
  1152. enet_addr_t eth_addr;
  1153. if (is_init_done(dtsec->dtsec_drv_param))
  1154. return -EINVAL;
  1155. if (DEFAULT_RESET_ON_INIT &&
  1156. (fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) {
  1157. pr_err("Can't reset MAC!\n");
  1158. return -EINVAL;
  1159. }
  1160. err = check_init_parameters(dtsec);
  1161. if (err)
  1162. return err;
  1163. dtsec_drv_param = dtsec->dtsec_drv_param;
  1164. MAKE_ENET_ADDR_FROM_UINT64(dtsec->addr, eth_addr);
  1165. err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
  1166. dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions,
  1167. dtsec->tbiphy->mdio.addr);
  1168. if (err) {
  1169. free_init_resources(dtsec);
  1170. pr_err("DTSEC version doesn't support this i/f mode\n");
  1171. return err;
  1172. }
  1173. if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) {
  1174. u16 tmp_reg16;
  1175. /* Configure the TBI PHY Control Register */
  1176. tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
  1177. phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
  1178. tmp_reg16 = TBICON_CLK_SELECT;
  1179. phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
  1180. tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE |
  1181. BMCR_FULLDPLX | BMCR_SPEED1000);
  1182. phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
  1183. if (dtsec->basex_if)
  1184. tmp_reg16 = TBIANA_1000X;
  1185. else
  1186. tmp_reg16 = TBIANA_SGMII;
  1187. phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16);
  1188. tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART |
  1189. BMCR_FULLDPLX | BMCR_SPEED1000);
  1190. phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
  1191. }
  1192. /* Max Frame Length */
  1193. max_frm_ln = (u16)ioread32be(&regs->maxfrm);
  1194. err = fman_set_mac_max_frame(dtsec->fm, dtsec->mac_id, max_frm_ln);
  1195. if (err) {
  1196. pr_err("Setting max frame length failed\n");
  1197. free_init_resources(dtsec);
  1198. return -EINVAL;
  1199. }
  1200. dtsec->multicast_addr_hash =
  1201. alloc_hash_table(EXTENDED_HASH_TABLE_SIZE);
  1202. if (!dtsec->multicast_addr_hash) {
  1203. free_init_resources(dtsec);
  1204. pr_err("MC hash table is failed\n");
  1205. return -ENOMEM;
  1206. }
  1207. dtsec->unicast_addr_hash = alloc_hash_table(DTSEC_HASH_TABLE_SIZE);
  1208. if (!dtsec->unicast_addr_hash) {
  1209. free_init_resources(dtsec);
  1210. pr_err("UC hash table is failed\n");
  1211. return -ENOMEM;
  1212. }
  1213. /* register err intr handler for dtsec to FPM (err) */
  1214. fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
  1215. FMAN_INTR_TYPE_ERR, dtsec_isr, dtsec);
  1216. /* register 1588 intr handler for TMR to FPM (normal) */
  1217. fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
  1218. FMAN_INTR_TYPE_NORMAL, dtsec_1588_isr, dtsec);
  1219. kfree(dtsec_drv_param);
  1220. dtsec->dtsec_drv_param = NULL;
  1221. return 0;
  1222. }
  1223. int dtsec_free(struct fman_mac *dtsec)
  1224. {
  1225. free_init_resources(dtsec);
  1226. kfree(dtsec->dtsec_drv_param);
  1227. dtsec->dtsec_drv_param = NULL;
  1228. kfree(dtsec);
  1229. return 0;
  1230. }
  1231. struct fman_mac *dtsec_config(struct fman_mac_params *params)
  1232. {
  1233. struct fman_mac *dtsec;
  1234. struct dtsec_cfg *dtsec_drv_param;
  1235. void __iomem *base_addr;
  1236. base_addr = params->base_addr;
  1237. /* allocate memory for the UCC GETH data structure. */
  1238. dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL);
  1239. if (!dtsec)
  1240. return NULL;
  1241. /* allocate memory for the d_tsec driver parameters data structure. */
  1242. dtsec_drv_param = kzalloc(sizeof(*dtsec_drv_param), GFP_KERNEL);
  1243. if (!dtsec_drv_param)
  1244. goto err_dtsec;
  1245. /* Plant parameter structure pointer */
  1246. dtsec->dtsec_drv_param = dtsec_drv_param;
  1247. set_dflts(dtsec_drv_param);
  1248. dtsec->regs = base_addr;
  1249. dtsec->addr = ENET_ADDR_TO_UINT64(params->addr);
  1250. dtsec->max_speed = params->max_speed;
  1251. dtsec->phy_if = params->phy_if;
  1252. dtsec->mac_id = params->mac_id;
  1253. dtsec->exceptions = (DTSEC_IMASK_BREN |
  1254. DTSEC_IMASK_RXCEN |
  1255. DTSEC_IMASK_BTEN |
  1256. DTSEC_IMASK_TXCEN |
  1257. DTSEC_IMASK_TXEEN |
  1258. DTSEC_IMASK_ABRTEN |
  1259. DTSEC_IMASK_LCEN |
  1260. DTSEC_IMASK_CRLEN |
  1261. DTSEC_IMASK_XFUNEN |
  1262. DTSEC_IMASK_IFERREN |
  1263. DTSEC_IMASK_MAGEN |
  1264. DTSEC_IMASK_TDPEEN |
  1265. DTSEC_IMASK_RDPEEN);
  1266. dtsec->exception_cb = params->exception_cb;
  1267. dtsec->event_cb = params->event_cb;
  1268. dtsec->dev_id = params->dev_id;
  1269. dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
  1270. dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en;
  1271. dtsec->fm = params->fm;
  1272. dtsec->basex_if = params->basex_if;
  1273. if (!params->internal_phy_node) {
  1274. pr_err("TBI PHY node is not available\n");
  1275. goto err_dtsec_drv_param;
  1276. }
  1277. dtsec->tbiphy = of_phy_find_device(params->internal_phy_node);
  1278. if (!dtsec->tbiphy) {
  1279. pr_err("of_phy_find_device (TBI PHY) failed\n");
  1280. goto err_dtsec_drv_param;
  1281. }
  1282. put_device(&dtsec->tbiphy->mdio.dev);
  1283. /* Save FMan revision */
  1284. fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
  1285. return dtsec;
  1286. err_dtsec_drv_param:
  1287. kfree(dtsec_drv_param);
  1288. err_dtsec:
  1289. kfree(dtsec);
  1290. return NULL;
  1291. }