fman_dtsec.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544
  1. /*
  2. * Copyright 2008-2015 Freescale Semiconductor Inc.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions are met:
  6. * * Redistributions of source code must retain the above copyright
  7. * notice, this list of conditions and the following disclaimer.
  8. * * Redistributions in binary form must reproduce the above copyright
  9. * notice, this list of conditions and the following disclaimer in the
  10. * documentation and/or other materials provided with the distribution.
  11. * * Neither the name of Freescale Semiconductor nor the
  12. * names of its contributors may be used to endorse or promote products
  13. * derived from this software without specific prior written permission.
  14. *
  15. *
  16. * ALTERNATIVELY, this software may be distributed under the terms of the
  17. * GNU General Public License ("GPL") as published by the Free Software
  18. * Foundation, either version 2 of that License or (at your option) any
  19. * later version.
  20. *
  21. * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  22. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  23. * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  24. * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  25. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  26. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  27. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  28. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  29. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  30. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  31. */
  32. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33. #include "fman_dtsec.h"
  34. #include "fman.h"
  35. #include <linux/slab.h>
  36. #include <linux/bitrev.h>
  37. #include <linux/io.h>
  38. #include <linux/delay.h>
  39. #include <linux/phy.h>
  40. #include <linux/crc32.h>
  41. #include <linux/of_mdio.h>
  42. #include <linux/mii.h>
  43. /* TBI register addresses */
  44. #define MII_TBICON 0x11
  45. /* TBICON register bit fields */
  46. #define TBICON_SOFT_RESET 0x8000 /* Soft reset */
  47. #define TBICON_DISABLE_RX_DIS 0x2000 /* Disable receive disparity */
  48. #define TBICON_DISABLE_TX_DIS 0x1000 /* Disable transmit disparity */
  49. #define TBICON_AN_SENSE 0x0100 /* Auto-negotiation sense enable */
  50. #define TBICON_CLK_SELECT 0x0020 /* Clock select */
  51. #define TBICON_MI_MODE 0x0010 /* GMII mode (TBI if not set) */
  52. #define TBIANA_SGMII 0x4001
  53. #define TBIANA_1000X 0x01a0
  54. /* Interrupt Mask Register (IMASK) */
  55. #define DTSEC_IMASK_BREN 0x80000000
  56. #define DTSEC_IMASK_RXCEN 0x40000000
  57. #define DTSEC_IMASK_MSROEN 0x04000000
  58. #define DTSEC_IMASK_GTSCEN 0x02000000
  59. #define DTSEC_IMASK_BTEN 0x01000000
  60. #define DTSEC_IMASK_TXCEN 0x00800000
  61. #define DTSEC_IMASK_TXEEN 0x00400000
  62. #define DTSEC_IMASK_LCEN 0x00040000
  63. #define DTSEC_IMASK_CRLEN 0x00020000
  64. #define DTSEC_IMASK_XFUNEN 0x00010000
  65. #define DTSEC_IMASK_ABRTEN 0x00008000
  66. #define DTSEC_IMASK_IFERREN 0x00004000
  67. #define DTSEC_IMASK_MAGEN 0x00000800
  68. #define DTSEC_IMASK_MMRDEN 0x00000400
  69. #define DTSEC_IMASK_MMWREN 0x00000200
  70. #define DTSEC_IMASK_GRSCEN 0x00000100
  71. #define DTSEC_IMASK_TDPEEN 0x00000002
  72. #define DTSEC_IMASK_RDPEEN 0x00000001
  73. #define DTSEC_EVENTS_MASK \
  74. ((u32)(DTSEC_IMASK_BREN | \
  75. DTSEC_IMASK_RXCEN | \
  76. DTSEC_IMASK_BTEN | \
  77. DTSEC_IMASK_TXCEN | \
  78. DTSEC_IMASK_TXEEN | \
  79. DTSEC_IMASK_ABRTEN | \
  80. DTSEC_IMASK_LCEN | \
  81. DTSEC_IMASK_CRLEN | \
  82. DTSEC_IMASK_XFUNEN | \
  83. DTSEC_IMASK_IFERREN | \
  84. DTSEC_IMASK_MAGEN | \
  85. DTSEC_IMASK_TDPEEN | \
  86. DTSEC_IMASK_RDPEEN))
  87. /* dtsec timestamp event bits */
  88. #define TMR_PEMASK_TSREEN 0x00010000
  89. #define TMR_PEVENT_TSRE 0x00010000
  90. /* Group address bit indication */
  91. #define MAC_GROUP_ADDRESS 0x0000010000000000ULL
  92. /* Defaults */
  93. #define DEFAULT_HALFDUP_RETRANSMIT 0xf
  94. #define DEFAULT_HALFDUP_COLL_WINDOW 0x37
  95. #define DEFAULT_TX_PAUSE_TIME 0xf000
  96. #define DEFAULT_RX_PREPEND 0
  97. #define DEFAULT_PREAMBLE_LEN 7
  98. #define DEFAULT_TX_PAUSE_TIME_EXTD 0
  99. #define DEFAULT_NON_BACK_TO_BACK_IPG1 0x40
  100. #define DEFAULT_NON_BACK_TO_BACK_IPG2 0x60
  101. #define DEFAULT_MIN_IFG_ENFORCEMENT 0x50
  102. #define DEFAULT_BACK_TO_BACK_IPG 0x60
  103. #define DEFAULT_MAXIMUM_FRAME 0x600
  104. /* register related defines (bits, field offsets..) */
  105. #define DTSEC_ID2_INT_REDUCED_OFF 0x00010000
  106. #define DTSEC_ECNTRL_GMIIM 0x00000040
  107. #define DTSEC_ECNTRL_TBIM 0x00000020
  108. #define DTSEC_ECNTRL_SGMIIM 0x00000002
  109. #define DTSEC_ECNTRL_RPM 0x00000010
  110. #define DTSEC_ECNTRL_R100M 0x00000008
  111. #define DTSEC_ECNTRL_QSGMIIM 0x00000001
  112. #define TCTRL_GTS 0x00000020
  113. #define RCTRL_PAL_MASK 0x001f0000
  114. #define RCTRL_PAL_SHIFT 16
  115. #define RCTRL_GHTX 0x00000400
  116. #define RCTRL_GRS 0x00000020
  117. #define RCTRL_MPROM 0x00000008
  118. #define RCTRL_RSF 0x00000004
  119. #define RCTRL_UPROM 0x00000001
  120. #define MACCFG1_SOFT_RESET 0x80000000
  121. #define MACCFG1_RX_FLOW 0x00000020
  122. #define MACCFG1_TX_FLOW 0x00000010
  123. #define MACCFG1_TX_EN 0x00000001
  124. #define MACCFG1_RX_EN 0x00000004
  125. #define MACCFG2_NIBBLE_MODE 0x00000100
  126. #define MACCFG2_BYTE_MODE 0x00000200
  127. #define MACCFG2_PAD_CRC_EN 0x00000004
  128. #define MACCFG2_FULL_DUPLEX 0x00000001
  129. #define MACCFG2_PREAMBLE_LENGTH_MASK 0x0000f000
  130. #define MACCFG2_PREAMBLE_LENGTH_SHIFT 12
  131. #define IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT 24
  132. #define IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT 16
  133. #define IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT 8
  134. #define IPGIFG_NON_BACK_TO_BACK_IPG_1 0x7F000000
  135. #define IPGIFG_NON_BACK_TO_BACK_IPG_2 0x007F0000
  136. #define IPGIFG_MIN_IFG_ENFORCEMENT 0x0000FF00
  137. #define IPGIFG_BACK_TO_BACK_IPG 0x0000007F
  138. #define HAFDUP_EXCESS_DEFER 0x00010000
  139. #define HAFDUP_COLLISION_WINDOW 0x000003ff
  140. #define HAFDUP_RETRANSMISSION_MAX_SHIFT 12
  141. #define HAFDUP_RETRANSMISSION_MAX 0x0000f000
  142. #define NUM_OF_HASH_REGS 8 /* Number of hash table registers */
  143. #define PTV_PTE_MASK 0xffff0000
  144. #define PTV_PT_MASK 0x0000ffff
  145. #define PTV_PTE_SHIFT 16
  146. #define MAX_PACKET_ALIGNMENT 31
  147. #define MAX_INTER_PACKET_GAP 0x7f
  148. #define MAX_RETRANSMISSION 0x0f
  149. #define MAX_COLLISION_WINDOW 0x03ff
  150. /* Hash table size (32 bits*8 regs) */
  151. #define DTSEC_HASH_TABLE_SIZE 256
  152. /* Extended Hash table size (32 bits*16 regs) */
  153. #define EXTENDED_HASH_TABLE_SIZE 512
  154. /* dTSEC Memory Map registers */
  155. struct dtsec_regs {
  156. /* dTSEC General Control and Status Registers */
  157. u32 tsec_id; /* 0x000 ETSEC_ID register */
  158. u32 tsec_id2; /* 0x004 ETSEC_ID2 register */
  159. u32 ievent; /* 0x008 Interrupt event register */
  160. u32 imask; /* 0x00C Interrupt mask register */
  161. u32 reserved0010[1];
  162. u32 ecntrl; /* 0x014 E control register */
  163. u32 ptv; /* 0x018 Pause time value register */
  164. u32 tbipa; /* 0x01C TBI PHY address register */
  165. u32 tmr_ctrl; /* 0x020 Time-stamp Control register */
  166. u32 tmr_pevent; /* 0x024 Time-stamp event register */
  167. u32 tmr_pemask; /* 0x028 Timer event mask register */
  168. u32 reserved002c[5];
  169. u32 tctrl; /* 0x040 Transmit control register */
  170. u32 reserved0044[3];
  171. u32 rctrl; /* 0x050 Receive control register */
  172. u32 reserved0054[11];
  173. u32 igaddr[8]; /* 0x080-0x09C Individual/group address */
  174. u32 gaddr[8]; /* 0x0A0-0x0BC Group address registers 0-7 */
  175. u32 reserved00c0[16];
  176. u32 maccfg1; /* 0x100 MAC configuration #1 */
  177. u32 maccfg2; /* 0x104 MAC configuration #2 */
  178. u32 ipgifg; /* 0x108 IPG/IFG */
  179. u32 hafdup; /* 0x10C Half-duplex */
  180. u32 maxfrm; /* 0x110 Maximum frame */
  181. u32 reserved0114[10];
  182. u32 ifstat; /* 0x13C Interface status */
  183. u32 macstnaddr1; /* 0x140 Station Address,part 1 */
  184. u32 macstnaddr2; /* 0x144 Station Address,part 2 */
  185. struct {
  186. u32 exact_match1; /* octets 1-4 */
  187. u32 exact_match2; /* octets 5-6 */
  188. } macaddr[15]; /* 0x148-0x1BC mac exact match addresses 1-15 */
  189. u32 reserved01c0[16];
  190. u32 tr64; /* 0x200 Tx and Rx 64 byte frame counter */
  191. u32 tr127; /* 0x204 Tx and Rx 65 to 127 byte frame counter */
  192. u32 tr255; /* 0x208 Tx and Rx 128 to 255 byte frame counter */
  193. u32 tr511; /* 0x20C Tx and Rx 256 to 511 byte frame counter */
  194. u32 tr1k; /* 0x210 Tx and Rx 512 to 1023 byte frame counter */
  195. u32 trmax; /* 0x214 Tx and Rx 1024 to 1518 byte frame counter */
  196. u32 trmgv;
  197. /* 0x218 Tx and Rx 1519 to 1522 byte good VLAN frame count */
  198. u32 rbyt; /* 0x21C receive byte counter */
  199. u32 rpkt; /* 0x220 receive packet counter */
  200. u32 rfcs; /* 0x224 receive FCS error counter */
  201. u32 rmca; /* 0x228 RMCA Rx multicast packet counter */
  202. u32 rbca; /* 0x22C Rx broadcast packet counter */
  203. u32 rxcf; /* 0x230 Rx control frame packet counter */
  204. u32 rxpf; /* 0x234 Rx pause frame packet counter */
  205. u32 rxuo; /* 0x238 Rx unknown OP code counter */
  206. u32 raln; /* 0x23C Rx alignment error counter */
  207. u32 rflr; /* 0x240 Rx frame length error counter */
  208. u32 rcde; /* 0x244 Rx code error counter */
  209. u32 rcse; /* 0x248 Rx carrier sense error counter */
  210. u32 rund; /* 0x24C Rx undersize packet counter */
  211. u32 rovr; /* 0x250 Rx oversize packet counter */
  212. u32 rfrg; /* 0x254 Rx fragments counter */
  213. u32 rjbr; /* 0x258 Rx jabber counter */
  214. u32 rdrp; /* 0x25C Rx drop */
  215. u32 tbyt; /* 0x260 Tx byte counter */
  216. u32 tpkt; /* 0x264 Tx packet counter */
  217. u32 tmca; /* 0x268 Tx multicast packet counter */
  218. u32 tbca; /* 0x26C Tx broadcast packet counter */
  219. u32 txpf; /* 0x270 Tx pause control frame counter */
  220. u32 tdfr; /* 0x274 Tx deferral packet counter */
  221. u32 tedf; /* 0x278 Tx excessive deferral packet counter */
  222. u32 tscl; /* 0x27C Tx single collision packet counter */
  223. u32 tmcl; /* 0x280 Tx multiple collision packet counter */
  224. u32 tlcl; /* 0x284 Tx late collision packet counter */
  225. u32 txcl; /* 0x288 Tx excessive collision packet counter */
  226. u32 tncl; /* 0x28C Tx total collision counter */
  227. u32 reserved0290[1];
  228. u32 tdrp; /* 0x294 Tx drop frame counter */
  229. u32 tjbr; /* 0x298 Tx jabber frame counter */
  230. u32 tfcs; /* 0x29C Tx FCS error counter */
  231. u32 txcf; /* 0x2A0 Tx control frame counter */
  232. u32 tovr; /* 0x2A4 Tx oversize frame counter */
  233. u32 tund; /* 0x2A8 Tx undersize frame counter */
  234. u32 tfrg; /* 0x2AC Tx fragments frame counter */
  235. u32 car1; /* 0x2B0 carry register one register* */
  236. u32 car2; /* 0x2B4 carry register two register* */
  237. u32 cam1; /* 0x2B8 carry register one mask register */
  238. u32 cam2; /* 0x2BC carry register two mask register */
  239. u32 reserved02c0[848];
  240. };
  241. /* struct dtsec_cfg - dTSEC configuration
  242. * Transmit half-duplex flow control, under software control for 10/100-Mbps
  243. * half-duplex media. If set, back pressure is applied to media by raising
  244. * carrier.
  245. * halfdup_retransmit:
  246. * Number of retransmission attempts following a collision.
  247. * If this is exceeded dTSEC aborts transmission due to excessive collisions.
  248. * The standard specifies the attempt limit to be 15.
  249. * halfdup_coll_window:
  250. * The number of bytes of the frame during which collisions may occur.
  251. * The default value of 55 corresponds to the frame byte at the end of the
  252. * standard 512-bit slot time window. If collisions are detected after this
  253. * byte, the late collision event is asserted and transmission of current
  254. * frame is aborted.
  255. * tx_pad_crc:
  256. * Pad and append CRC. If set, the MAC pads all ransmitted short frames and
  257. * appends a CRC to every frame regardless of padding requirement.
  258. * tx_pause_time:
  259. * Transmit pause time value. This pause value is used as part of the pause
  260. * frame to be sent when a transmit pause frame is initiated.
  261. * If set to 0 this disables transmission of pause frames.
  262. * preamble_len:
  263. * Length, in bytes, of the preamble field preceding each Ethernet
  264. * start-of-frame delimiter byte. The default value of 0x7 should be used in
  265. * order to guarantee reliable operation with IEEE 802.3 compliant hardware.
  266. * rx_prepend:
  267. * Packet alignment padding length. The specified number of bytes (1-31)
  268. * of zero padding are inserted before the start of each received frame.
  269. * For Ethernet, where optional preamble extraction is enabled, the padding
  270. * appears before the preamble, otherwise the padding precedes the
  271. * layer 2 header.
  272. *
  273. * This structure contains basic dTSEC configuration and must be passed to
  274. * init() function. A default set of configuration values can be
  275. * obtained by calling set_dflts().
  276. */
  277. struct dtsec_cfg {
  278. u16 halfdup_retransmit;
  279. u16 halfdup_coll_window;
  280. bool tx_pad_crc;
  281. u16 tx_pause_time;
  282. bool ptp_tsu_en;
  283. bool ptp_exception_en;
  284. u32 preamble_len;
  285. u32 rx_prepend;
  286. u16 tx_pause_time_extd;
  287. u16 maximum_frame;
  288. u32 non_back_to_back_ipg1;
  289. u32 non_back_to_back_ipg2;
  290. u32 min_ifg_enforcement;
  291. u32 back_to_back_ipg;
  292. };
  293. struct fman_mac {
  294. /* pointer to dTSEC memory mapped registers */
  295. struct dtsec_regs __iomem *regs;
  296. /* MAC address of device */
  297. u64 addr;
  298. /* Ethernet physical interface */
  299. phy_interface_t phy_if;
  300. u16 max_speed;
  301. void *dev_id; /* device cookie used by the exception cbs */
  302. fman_mac_exception_cb *exception_cb;
  303. fman_mac_exception_cb *event_cb;
  304. /* Number of individual addresses in registers for this station */
  305. u8 num_of_ind_addr_in_regs;
  306. /* pointer to driver's global address hash table */
  307. struct eth_hash_t *multicast_addr_hash;
  308. /* pointer to driver's individual address hash table */
  309. struct eth_hash_t *unicast_addr_hash;
  310. u8 mac_id;
  311. u32 exceptions;
  312. bool ptp_tsu_enabled;
  313. bool en_tsu_err_exception;
  314. struct dtsec_cfg *dtsec_drv_param;
  315. void *fm;
  316. struct fman_rev_info fm_rev_info;
  317. bool basex_if;
  318. struct phy_device *tbiphy;
  319. };
  320. static void set_dflts(struct dtsec_cfg *cfg)
  321. {
  322. cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT;
  323. cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW;
  324. cfg->tx_pad_crc = true;
  325. cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME;
  326. /* PHY address 0 is reserved (DPAA RM) */
  327. cfg->rx_prepend = DEFAULT_RX_PREPEND;
  328. cfg->ptp_tsu_en = true;
  329. cfg->ptp_exception_en = true;
  330. cfg->preamble_len = DEFAULT_PREAMBLE_LEN;
  331. cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD;
  332. cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1;
  333. cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2;
  334. cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT;
  335. cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG;
  336. cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
  337. }
  338. static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
  339. phy_interface_t iface, u16 iface_speed, u8 *macaddr,
  340. u32 exception_mask, u8 tbi_addr)
  341. {
  342. bool is_rgmii, is_sgmii, is_qsgmii;
  343. int i;
  344. u32 tmp;
  345. /* Soft reset */
  346. iowrite32be(MACCFG1_SOFT_RESET, &regs->maccfg1);
  347. iowrite32be(0, &regs->maccfg1);
  348. /* dtsec_id2 */
  349. tmp = ioread32be(&regs->tsec_id2);
  350. /* check RGMII support */
  351. if (iface == PHY_INTERFACE_MODE_RGMII ||
  352. iface == PHY_INTERFACE_MODE_RGMII_ID ||
  353. iface == PHY_INTERFACE_MODE_RGMII_RXID ||
  354. iface == PHY_INTERFACE_MODE_RGMII_TXID ||
  355. iface == PHY_INTERFACE_MODE_RMII)
  356. if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
  357. return -EINVAL;
  358. if (iface == PHY_INTERFACE_MODE_SGMII ||
  359. iface == PHY_INTERFACE_MODE_MII)
  360. if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
  361. return -EINVAL;
  362. is_rgmii = iface == PHY_INTERFACE_MODE_RGMII ||
  363. iface == PHY_INTERFACE_MODE_RGMII_ID ||
  364. iface == PHY_INTERFACE_MODE_RGMII_RXID ||
  365. iface == PHY_INTERFACE_MODE_RGMII_TXID;
  366. is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
  367. is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
  368. tmp = 0;
  369. if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII)
  370. tmp |= DTSEC_ECNTRL_GMIIM;
  371. if (is_sgmii)
  372. tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM);
  373. if (is_qsgmii)
  374. tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM |
  375. DTSEC_ECNTRL_QSGMIIM);
  376. if (is_rgmii)
  377. tmp |= DTSEC_ECNTRL_RPM;
  378. if (iface_speed == SPEED_100)
  379. tmp |= DTSEC_ECNTRL_R100M;
  380. iowrite32be(tmp, &regs->ecntrl);
  381. tmp = 0;
  382. if (cfg->tx_pause_time)
  383. tmp |= cfg->tx_pause_time;
  384. if (cfg->tx_pause_time_extd)
  385. tmp |= cfg->tx_pause_time_extd << PTV_PTE_SHIFT;
  386. iowrite32be(tmp, &regs->ptv);
  387. tmp = 0;
  388. tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK;
  389. /* Accept short frames */
  390. tmp |= RCTRL_RSF;
  391. iowrite32be(tmp, &regs->rctrl);
  392. /* Assign a Phy Address to the TBI (TBIPA).
  393. * Done also in cases where TBI is not selected to avoid conflict with
  394. * the external PHY's Physical address
  395. */
  396. iowrite32be(tbi_addr, &regs->tbipa);
  397. iowrite32be(0, &regs->tmr_ctrl);
  398. if (cfg->ptp_tsu_en) {
  399. tmp = 0;
  400. tmp |= TMR_PEVENT_TSRE;
  401. iowrite32be(tmp, &regs->tmr_pevent);
  402. if (cfg->ptp_exception_en) {
  403. tmp = 0;
  404. tmp |= TMR_PEMASK_TSREEN;
  405. iowrite32be(tmp, &regs->tmr_pemask);
  406. }
  407. }
  408. tmp = 0;
  409. tmp |= MACCFG1_RX_FLOW;
  410. tmp |= MACCFG1_TX_FLOW;
  411. iowrite32be(tmp, &regs->maccfg1);
  412. tmp = 0;
  413. if (iface_speed < SPEED_1000)
  414. tmp |= MACCFG2_NIBBLE_MODE;
  415. else if (iface_speed == SPEED_1000)
  416. tmp |= MACCFG2_BYTE_MODE;
  417. tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
  418. MACCFG2_PREAMBLE_LENGTH_MASK;
  419. if (cfg->tx_pad_crc)
  420. tmp |= MACCFG2_PAD_CRC_EN;
  421. /* Full Duplex */
  422. tmp |= MACCFG2_FULL_DUPLEX;
  423. iowrite32be(tmp, &regs->maccfg2);
  424. tmp = (((cfg->non_back_to_back_ipg1 <<
  425. IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT)
  426. & IPGIFG_NON_BACK_TO_BACK_IPG_1)
  427. | ((cfg->non_back_to_back_ipg2 <<
  428. IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT)
  429. & IPGIFG_NON_BACK_TO_BACK_IPG_2)
  430. | ((cfg->min_ifg_enforcement << IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT)
  431. & IPGIFG_MIN_IFG_ENFORCEMENT)
  432. | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG));
  433. iowrite32be(tmp, &regs->ipgifg);
  434. tmp = 0;
  435. tmp |= HAFDUP_EXCESS_DEFER;
  436. tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT)
  437. & HAFDUP_RETRANSMISSION_MAX);
  438. tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW);
  439. iowrite32be(tmp, &regs->hafdup);
  440. /* Initialize Maximum frame length */
  441. iowrite32be(cfg->maximum_frame, &regs->maxfrm);
  442. iowrite32be(0xffffffff, &regs->cam1);
  443. iowrite32be(0xffffffff, &regs->cam2);
  444. iowrite32be(exception_mask, &regs->imask);
  445. iowrite32be(0xffffffff, &regs->ievent);
  446. tmp = (u32)((macaddr[5] << 24) |
  447. (macaddr[4] << 16) | (macaddr[3] << 8) | macaddr[2]);
  448. iowrite32be(tmp, &regs->macstnaddr1);
  449. tmp = (u32)((macaddr[1] << 24) | (macaddr[0] << 16));
  450. iowrite32be(tmp, &regs->macstnaddr2);
  451. /* HASH */
  452. for (i = 0; i < NUM_OF_HASH_REGS; i++) {
  453. /* Initialize IADDRx */
  454. iowrite32be(0, &regs->igaddr[i]);
  455. /* Initialize GADDRx */
  456. iowrite32be(0, &regs->gaddr[i]);
  457. }
  458. return 0;
  459. }
  460. static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
  461. {
  462. u32 tmp;
  463. tmp = (u32)((adr[5] << 24) |
  464. (adr[4] << 16) | (adr[3] << 8) | adr[2]);
  465. iowrite32be(tmp, &regs->macstnaddr1);
  466. tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
  467. iowrite32be(tmp, &regs->macstnaddr2);
  468. }
  469. static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
  470. bool enable)
  471. {
  472. int reg_idx = (bucket >> 5) & 0xf;
  473. int bit_idx = bucket & 0x1f;
  474. u32 bit_mask = 0x80000000 >> bit_idx;
  475. u32 __iomem *reg;
  476. if (reg_idx > 7)
  477. reg = &regs->gaddr[reg_idx - 8];
  478. else
  479. reg = &regs->igaddr[reg_idx];
  480. if (enable)
  481. iowrite32be(ioread32be(reg) | bit_mask, reg);
  482. else
  483. iowrite32be(ioread32be(reg) & (~bit_mask), reg);
  484. }
  485. static int check_init_parameters(struct fman_mac *dtsec)
  486. {
  487. if (dtsec->max_speed >= SPEED_10000) {
  488. pr_err("1G MAC driver supports 1G or lower speeds\n");
  489. return -EINVAL;
  490. }
  491. if (dtsec->addr == 0) {
  492. pr_err("Ethernet MAC Must have a valid MAC Address\n");
  493. return -EINVAL;
  494. }
  495. if ((dtsec->dtsec_drv_param)->rx_prepend >
  496. MAX_PACKET_ALIGNMENT) {
  497. pr_err("packetAlignmentPadding can't be > than %d\n",
  498. MAX_PACKET_ALIGNMENT);
  499. return -EINVAL;
  500. }
  501. if (((dtsec->dtsec_drv_param)->non_back_to_back_ipg1 >
  502. MAX_INTER_PACKET_GAP) ||
  503. ((dtsec->dtsec_drv_param)->non_back_to_back_ipg2 >
  504. MAX_INTER_PACKET_GAP) ||
  505. ((dtsec->dtsec_drv_param)->back_to_back_ipg >
  506. MAX_INTER_PACKET_GAP)) {
  507. pr_err("Inter packet gap can't be greater than %d\n",
  508. MAX_INTER_PACKET_GAP);
  509. return -EINVAL;
  510. }
  511. if ((dtsec->dtsec_drv_param)->halfdup_retransmit >
  512. MAX_RETRANSMISSION) {
  513. pr_err("maxRetransmission can't be greater than %d\n",
  514. MAX_RETRANSMISSION);
  515. return -EINVAL;
  516. }
  517. if ((dtsec->dtsec_drv_param)->halfdup_coll_window >
  518. MAX_COLLISION_WINDOW) {
  519. pr_err("collisionWindow can't be greater than %d\n",
  520. MAX_COLLISION_WINDOW);
  521. return -EINVAL;
  522. /* If Auto negotiation process is disabled, need to set up the PHY
  523. * using the MII Management Interface
  524. */
  525. }
  526. if (!dtsec->exception_cb) {
  527. pr_err("uninitialized exception_cb\n");
  528. return -EINVAL;
  529. }
  530. if (!dtsec->event_cb) {
  531. pr_err("uninitialized event_cb\n");
  532. return -EINVAL;
  533. }
  534. return 0;
  535. }
  536. static int get_exception_flag(enum fman_mac_exceptions exception)
  537. {
  538. u32 bit_mask;
  539. switch (exception) {
  540. case FM_MAC_EX_1G_BAB_RX:
  541. bit_mask = DTSEC_IMASK_BREN;
  542. break;
  543. case FM_MAC_EX_1G_RX_CTL:
  544. bit_mask = DTSEC_IMASK_RXCEN;
  545. break;
  546. case FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET:
  547. bit_mask = DTSEC_IMASK_GTSCEN;
  548. break;
  549. case FM_MAC_EX_1G_BAB_TX:
  550. bit_mask = DTSEC_IMASK_BTEN;
  551. break;
  552. case FM_MAC_EX_1G_TX_CTL:
  553. bit_mask = DTSEC_IMASK_TXCEN;
  554. break;
  555. case FM_MAC_EX_1G_TX_ERR:
  556. bit_mask = DTSEC_IMASK_TXEEN;
  557. break;
  558. case FM_MAC_EX_1G_LATE_COL:
  559. bit_mask = DTSEC_IMASK_LCEN;
  560. break;
  561. case FM_MAC_EX_1G_COL_RET_LMT:
  562. bit_mask = DTSEC_IMASK_CRLEN;
  563. break;
  564. case FM_MAC_EX_1G_TX_FIFO_UNDRN:
  565. bit_mask = DTSEC_IMASK_XFUNEN;
  566. break;
  567. case FM_MAC_EX_1G_MAG_PCKT:
  568. bit_mask = DTSEC_IMASK_MAGEN;
  569. break;
  570. case FM_MAC_EX_1G_MII_MNG_RD_COMPLET:
  571. bit_mask = DTSEC_IMASK_MMRDEN;
  572. break;
  573. case FM_MAC_EX_1G_MII_MNG_WR_COMPLET:
  574. bit_mask = DTSEC_IMASK_MMWREN;
  575. break;
  576. case FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET:
  577. bit_mask = DTSEC_IMASK_GRSCEN;
  578. break;
  579. case FM_MAC_EX_1G_DATA_ERR:
  580. bit_mask = DTSEC_IMASK_TDPEEN;
  581. break;
  582. case FM_MAC_EX_1G_RX_MIB_CNT_OVFL:
  583. bit_mask = DTSEC_IMASK_MSROEN;
  584. break;
  585. default:
  586. bit_mask = 0;
  587. break;
  588. }
  589. return bit_mask;
  590. }
  591. static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
  592. {
  593. /* Checks if dTSEC driver parameters were initialized */
  594. if (!dtsec_drv_params)
  595. return true;
  596. return false;
  597. }
  598. static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
  599. {
  600. struct dtsec_regs __iomem *regs = dtsec->regs;
  601. if (is_init_done(dtsec->dtsec_drv_param))
  602. return 0;
  603. return (u16)ioread32be(&regs->maxfrm);
  604. }
  605. static void dtsec_isr(void *handle)
  606. {
  607. struct fman_mac *dtsec = (struct fman_mac *)handle;
  608. struct dtsec_regs __iomem *regs = dtsec->regs;
  609. u32 event;
  610. /* do not handle MDIO events */
  611. event = ioread32be(&regs->ievent) &
  612. (u32)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN));
  613. event &= ioread32be(&regs->imask);
  614. iowrite32be(event, &regs->ievent);
  615. if (event & DTSEC_IMASK_BREN)
  616. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_RX);
  617. if (event & DTSEC_IMASK_RXCEN)
  618. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_RX_CTL);
  619. if (event & DTSEC_IMASK_GTSCEN)
  620. dtsec->exception_cb(dtsec->dev_id,
  621. FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET);
  622. if (event & DTSEC_IMASK_BTEN)
  623. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_TX);
  624. if (event & DTSEC_IMASK_TXCEN)
  625. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_CTL);
  626. if (event & DTSEC_IMASK_TXEEN)
  627. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_ERR);
  628. if (event & DTSEC_IMASK_LCEN)
  629. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_LATE_COL);
  630. if (event & DTSEC_IMASK_CRLEN)
  631. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT);
  632. if (event & DTSEC_IMASK_XFUNEN) {
  633. /* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */
  634. if (dtsec->fm_rev_info.major == 2) {
  635. u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i;
  636. /* a. Write 0x00E0_0C00 to DTSEC_ID
  637. * This is a read only register
  638. * b. Read and save the value of TPKT
  639. */
  640. tpkt1 = ioread32be(&regs->tpkt);
  641. /* c. Read the register at dTSEC address offset 0x32C */
  642. tmp_reg1 = ioread32be(&regs->reserved02c0[27]);
  643. /* d. Compare bits [9:15] to bits [25:31] of the
  644. * register at address offset 0x32C.
  645. */
  646. if ((tmp_reg1 & 0x007F0000) !=
  647. (tmp_reg1 & 0x0000007F)) {
  648. /* If they are not equal, save the value of
  649. * this register and wait for at least
  650. * MAXFRM*16 ns
  651. */
  652. usleep_range((u32)(min
  653. (dtsec_get_max_frame_length(dtsec) *
  654. 16 / 1000, 1)), (u32)
  655. (min(dtsec_get_max_frame_length
  656. (dtsec) * 16 / 1000, 1) + 1));
  657. }
  658. /* e. Read and save TPKT again and read the register
  659. * at dTSEC address offset 0x32C again
  660. */
  661. tpkt2 = ioread32be(&regs->tpkt);
  662. tmp_reg2 = ioread32be(&regs->reserved02c0[27]);
  663. /* f. Compare the value of TPKT saved in step b to
  664. * value read in step e. Also compare bits [9:15] of
  665. * the register at offset 0x32C saved in step d to the
  666. * value of bits [9:15] saved in step e. If the two
  667. * registers values are unchanged, then the transmit
  668. * portion of the dTSEC controller is locked up and
  669. * the user should proceed to the recover sequence.
  670. */
  671. if ((tpkt1 == tpkt2) && ((tmp_reg1 & 0x007F0000) ==
  672. (tmp_reg2 & 0x007F0000))) {
  673. /* recover sequence */
  674. /* a.Write a 1 to RCTRL[GRS] */
  675. iowrite32be(ioread32be(&regs->rctrl) |
  676. RCTRL_GRS, &regs->rctrl);
  677. /* b.Wait until IEVENT[GRSC]=1, or at least
  678. * 100 us has elapsed.
  679. */
  680. for (i = 0; i < 100; i++) {
  681. if (ioread32be(&regs->ievent) &
  682. DTSEC_IMASK_GRSCEN)
  683. break;
  684. udelay(1);
  685. }
  686. if (ioread32be(&regs->ievent) &
  687. DTSEC_IMASK_GRSCEN)
  688. iowrite32be(DTSEC_IMASK_GRSCEN,
  689. &regs->ievent);
  690. else
  691. pr_debug("Rx lockup due to Tx lockup\n");
  692. /* c.Write a 1 to bit n of FM_RSTC
  693. * (offset 0x0CC of FPM)
  694. */
  695. fman_reset_mac(dtsec->fm, dtsec->mac_id);
  696. /* d.Wait 4 Tx clocks (32 ns) */
  697. udelay(1);
  698. /* e.Write a 0 to bit n of FM_RSTC. */
  699. /* cleared by FMAN
  700. */
  701. }
  702. }
  703. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_FIFO_UNDRN);
  704. }
  705. if (event & DTSEC_IMASK_MAGEN)
  706. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_MAG_PCKT);
  707. if (event & DTSEC_IMASK_GRSCEN)
  708. dtsec->exception_cb(dtsec->dev_id,
  709. FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET);
  710. if (event & DTSEC_IMASK_TDPEEN)
  711. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_DATA_ERR);
  712. if (event & DTSEC_IMASK_RDPEEN)
  713. dtsec->exception_cb(dtsec->dev_id, FM_MAC_1G_RX_DATA_ERR);
  714. /* masked interrupts */
  715. WARN_ON(event & DTSEC_IMASK_ABRTEN);
  716. WARN_ON(event & DTSEC_IMASK_IFERREN);
  717. }
  718. static void dtsec_1588_isr(void *handle)
  719. {
  720. struct fman_mac *dtsec = (struct fman_mac *)handle;
  721. struct dtsec_regs __iomem *regs = dtsec->regs;
  722. u32 event;
  723. if (dtsec->ptp_tsu_enabled) {
  724. event = ioread32be(&regs->tmr_pevent);
  725. event &= ioread32be(&regs->tmr_pemask);
  726. if (event) {
  727. iowrite32be(event, &regs->tmr_pevent);
  728. WARN_ON(event & TMR_PEVENT_TSRE);
  729. dtsec->exception_cb(dtsec->dev_id,
  730. FM_MAC_EX_1G_1588_TS_RX_ERR);
  731. }
  732. }
  733. }
  734. static void free_init_resources(struct fman_mac *dtsec)
  735. {
  736. fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
  737. FMAN_INTR_TYPE_ERR);
  738. fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
  739. FMAN_INTR_TYPE_NORMAL);
  740. /* release the driver's group hash table */
  741. free_hash_table(dtsec->multicast_addr_hash);
  742. dtsec->multicast_addr_hash = NULL;
  743. /* release the driver's individual hash table */
  744. free_hash_table(dtsec->unicast_addr_hash);
  745. dtsec->unicast_addr_hash = NULL;
  746. }
  747. int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val)
  748. {
  749. if (is_init_done(dtsec->dtsec_drv_param))
  750. return -EINVAL;
  751. dtsec->dtsec_drv_param->maximum_frame = new_val;
  752. return 0;
  753. }
  754. int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val)
  755. {
  756. if (is_init_done(dtsec->dtsec_drv_param))
  757. return -EINVAL;
  758. dtsec->dtsec_drv_param->tx_pad_crc = new_val;
  759. return 0;
  760. }
  761. static void graceful_start(struct fman_mac *dtsec, enum comm_mode mode)
  762. {
  763. struct dtsec_regs __iomem *regs = dtsec->regs;
  764. if (mode & COMM_MODE_TX)
  765. iowrite32be(ioread32be(&regs->tctrl) &
  766. ~TCTRL_GTS, &regs->tctrl);
  767. if (mode & COMM_MODE_RX)
  768. iowrite32be(ioread32be(&regs->rctrl) &
  769. ~RCTRL_GRS, &regs->rctrl);
  770. }
  771. static void graceful_stop(struct fman_mac *dtsec, enum comm_mode mode)
  772. {
  773. struct dtsec_regs __iomem *regs = dtsec->regs;
  774. u32 tmp;
  775. /* Graceful stop - Assert the graceful Rx stop bit */
  776. if (mode & COMM_MODE_RX) {
  777. tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
  778. iowrite32be(tmp, &regs->rctrl);
  779. if (dtsec->fm_rev_info.major == 2) {
  780. /* Workaround for dTSEC Errata A002 */
  781. usleep_range(100, 200);
  782. } else {
  783. /* Workaround for dTSEC Errata A004839 */
  784. usleep_range(10, 50);
  785. }
  786. }
  787. /* Graceful stop - Assert the graceful Tx stop bit */
  788. if (mode & COMM_MODE_TX) {
  789. if (dtsec->fm_rev_info.major == 2) {
  790. /* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */
  791. pr_debug("GTS not supported due to DTSEC_A004 Errata.\n");
  792. } else {
  793. tmp = ioread32be(&regs->tctrl) | TCTRL_GTS;
  794. iowrite32be(tmp, &regs->tctrl);
  795. /* Workaround for dTSEC Errata A0012, A0014 */
  796. usleep_range(10, 50);
  797. }
  798. }
  799. }
  800. int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
  801. {
  802. struct dtsec_regs __iomem *regs = dtsec->regs;
  803. u32 tmp;
  804. if (!is_init_done(dtsec->dtsec_drv_param))
  805. return -EINVAL;
  806. /* Enable */
  807. tmp = ioread32be(&regs->maccfg1);
  808. if (mode & COMM_MODE_RX)
  809. tmp |= MACCFG1_RX_EN;
  810. if (mode & COMM_MODE_TX)
  811. tmp |= MACCFG1_TX_EN;
  812. iowrite32be(tmp, &regs->maccfg1);
  813. /* Graceful start - clear the graceful Rx/Tx stop bit */
  814. graceful_start(dtsec, mode);
  815. return 0;
  816. }
  817. int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode)
  818. {
  819. struct dtsec_regs __iomem *regs = dtsec->regs;
  820. u32 tmp;
  821. if (!is_init_done(dtsec->dtsec_drv_param))
  822. return -EINVAL;
  823. /* Graceful stop - Assert the graceful Rx/Tx stop bit */
  824. graceful_stop(dtsec, mode);
  825. tmp = ioread32be(&regs->maccfg1);
  826. if (mode & COMM_MODE_RX)
  827. tmp &= ~MACCFG1_RX_EN;
  828. if (mode & COMM_MODE_TX)
  829. tmp &= ~MACCFG1_TX_EN;
  830. iowrite32be(tmp, &regs->maccfg1);
  831. return 0;
  832. }
  833. int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
  834. u8 __maybe_unused priority,
  835. u16 pause_time, u16 __maybe_unused thresh_time)
  836. {
  837. struct dtsec_regs __iomem *regs = dtsec->regs;
  838. enum comm_mode mode = COMM_MODE_NONE;
  839. u32 ptv = 0;
  840. if (!is_init_done(dtsec->dtsec_drv_param))
  841. return -EINVAL;
  842. if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
  843. mode |= COMM_MODE_RX;
  844. if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
  845. mode |= COMM_MODE_TX;
  846. graceful_stop(dtsec, mode);
  847. if (pause_time) {
  848. /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
  849. if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
  850. pr_warn("pause-time: %d illegal.Should be > 320\n",
  851. pause_time);
  852. return -EINVAL;
  853. }
  854. ptv = ioread32be(&regs->ptv);
  855. ptv &= PTV_PTE_MASK;
  856. ptv |= pause_time & PTV_PT_MASK;
  857. iowrite32be(ptv, &regs->ptv);
  858. /* trigger the transmission of a flow-control pause frame */
  859. iowrite32be(ioread32be(&regs->maccfg1) | MACCFG1_TX_FLOW,
  860. &regs->maccfg1);
  861. } else
  862. iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
  863. &regs->maccfg1);
  864. graceful_start(dtsec, mode);
  865. return 0;
  866. }
  867. int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
  868. {
  869. struct dtsec_regs __iomem *regs = dtsec->regs;
  870. enum comm_mode mode = COMM_MODE_NONE;
  871. u32 tmp;
  872. if (!is_init_done(dtsec->dtsec_drv_param))
  873. return -EINVAL;
  874. if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
  875. mode |= COMM_MODE_RX;
  876. if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
  877. mode |= COMM_MODE_TX;
  878. graceful_stop(dtsec, mode);
  879. tmp = ioread32be(&regs->maccfg1);
  880. if (en)
  881. tmp |= MACCFG1_RX_FLOW;
  882. else
  883. tmp &= ~MACCFG1_RX_FLOW;
  884. iowrite32be(tmp, &regs->maccfg1);
  885. graceful_start(dtsec, mode);
  886. return 0;
  887. }
  888. int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr)
  889. {
  890. struct dtsec_regs __iomem *regs = dtsec->regs;
  891. enum comm_mode mode = COMM_MODE_NONE;
  892. if (!is_init_done(dtsec->dtsec_drv_param))
  893. return -EINVAL;
  894. if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
  895. mode |= COMM_MODE_RX;
  896. if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
  897. mode |= COMM_MODE_TX;
  898. graceful_stop(dtsec, mode);
  899. /* Initialize MAC Station Address registers (1 & 2)
  900. * Station address have to be swapped (big endian to little endian
  901. */
  902. dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
  903. set_mac_address(dtsec->regs, (u8 *)(*enet_addr));
  904. graceful_start(dtsec, mode);
  905. return 0;
  906. }
  907. int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
  908. {
  909. struct dtsec_regs __iomem *regs = dtsec->regs;
  910. struct eth_hash_entry *hash_entry;
  911. u64 addr;
  912. s32 bucket;
  913. u32 crc = 0xFFFFFFFF;
  914. bool mcast, ghtx;
  915. if (!is_init_done(dtsec->dtsec_drv_param))
  916. return -EINVAL;
  917. addr = ENET_ADDR_TO_UINT64(*eth_addr);
  918. ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
  919. mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
  920. /* Cannot handle unicast mac addr when GHTX is on */
  921. if (ghtx && !mcast) {
  922. pr_err("Could not compute hash bucket\n");
  923. return -EINVAL;
  924. }
  925. crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
  926. crc = bitrev32(crc);
  927. /* considering the 9 highest order bits in crc H[8:0]:
  928. *if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register
  929. *and H[5:1] (next 5 bits) identify the hash bit
  930. *if ghts = 1 H[8:5] (highest order 4 bits) identify the hash register
  931. *and H[4:0] (next 5 bits) identify the hash bit.
  932. *
  933. *In bucket index output the low 5 bits identify the hash register
  934. *bit, while the higher 4 bits identify the hash register
  935. */
  936. if (ghtx) {
  937. bucket = (s32)((crc >> 23) & 0x1ff);
  938. } else {
  939. bucket = (s32)((crc >> 24) & 0xff);
  940. /* if !ghtx and mcast the bit must be set in gaddr instead of
  941. *igaddr.
  942. */
  943. if (mcast)
  944. bucket += 0x100;
  945. }
  946. set_bucket(dtsec->regs, bucket, true);
  947. /* Create element to be added to the driver hash table */
  948. hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
  949. if (!hash_entry)
  950. return -ENOMEM;
  951. hash_entry->addr = addr;
  952. INIT_LIST_HEAD(&hash_entry->node);
  953. if (addr & MAC_GROUP_ADDRESS)
  954. /* Group Address */
  955. list_add_tail(&hash_entry->node,
  956. &dtsec->multicast_addr_hash->lsts[bucket]);
  957. else
  958. list_add_tail(&hash_entry->node,
  959. &dtsec->unicast_addr_hash->lsts[bucket]);
  960. return 0;
  961. }
  962. int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
  963. {
  964. u32 tmp;
  965. struct dtsec_regs __iomem *regs = dtsec->regs;
  966. if (!is_init_done(dtsec->dtsec_drv_param))
  967. return -EINVAL;
  968. tmp = ioread32be(&regs->rctrl);
  969. if (enable)
  970. tmp |= RCTRL_MPROM;
  971. else
  972. tmp &= ~RCTRL_MPROM;
  973. iowrite32be(tmp, &regs->rctrl);
  974. return 0;
  975. }
  976. int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
  977. {
  978. struct dtsec_regs __iomem *regs = dtsec->regs;
  979. struct list_head *pos;
  980. struct eth_hash_entry *hash_entry = NULL;
  981. u64 addr;
  982. s32 bucket;
  983. u32 crc = 0xFFFFFFFF;
  984. bool mcast, ghtx;
  985. if (!is_init_done(dtsec->dtsec_drv_param))
  986. return -EINVAL;
  987. addr = ENET_ADDR_TO_UINT64(*eth_addr);
  988. ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
  989. mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
  990. /* Cannot handle unicast mac addr when GHTX is on */
  991. if (ghtx && !mcast) {
  992. pr_err("Could not compute hash bucket\n");
  993. return -EINVAL;
  994. }
  995. crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
  996. crc = bitrev32(crc);
  997. if (ghtx) {
  998. bucket = (s32)((crc >> 23) & 0x1ff);
  999. } else {
  1000. bucket = (s32)((crc >> 24) & 0xff);
  1001. /* if !ghtx and mcast the bit must be set
  1002. * in gaddr instead of igaddr.
  1003. */
  1004. if (mcast)
  1005. bucket += 0x100;
  1006. }
  1007. if (addr & MAC_GROUP_ADDRESS) {
  1008. /* Group Address */
  1009. list_for_each(pos,
  1010. &dtsec->multicast_addr_hash->lsts[bucket]) {
  1011. hash_entry = ETH_HASH_ENTRY_OBJ(pos);
  1012. if (hash_entry->addr == addr) {
  1013. list_del_init(&hash_entry->node);
  1014. kfree(hash_entry);
  1015. break;
  1016. }
  1017. }
  1018. if (list_empty(&dtsec->multicast_addr_hash->lsts[bucket]))
  1019. set_bucket(dtsec->regs, bucket, false);
  1020. } else {
  1021. /* Individual Address */
  1022. list_for_each(pos,
  1023. &dtsec->unicast_addr_hash->lsts[bucket]) {
  1024. hash_entry = ETH_HASH_ENTRY_OBJ(pos);
  1025. if (hash_entry->addr == addr) {
  1026. list_del_init(&hash_entry->node);
  1027. kfree(hash_entry);
  1028. break;
  1029. }
  1030. }
  1031. if (list_empty(&dtsec->unicast_addr_hash->lsts[bucket]))
  1032. set_bucket(dtsec->regs, bucket, false);
  1033. }
  1034. /* address does not exist */
  1035. WARN_ON(!hash_entry);
  1036. return 0;
  1037. }
  1038. int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
  1039. {
  1040. struct dtsec_regs __iomem *regs = dtsec->regs;
  1041. u32 tmp;
  1042. if (!is_init_done(dtsec->dtsec_drv_param))
  1043. return -EINVAL;
  1044. /* Set unicast promiscuous */
  1045. tmp = ioread32be(&regs->rctrl);
  1046. if (new_val)
  1047. tmp |= RCTRL_UPROM;
  1048. else
  1049. tmp &= ~RCTRL_UPROM;
  1050. iowrite32be(tmp, &regs->rctrl);
  1051. /* Set multicast promiscuous */
  1052. tmp = ioread32be(&regs->rctrl);
  1053. if (new_val)
  1054. tmp |= RCTRL_MPROM;
  1055. else
  1056. tmp &= ~RCTRL_MPROM;
  1057. iowrite32be(tmp, &regs->rctrl);
  1058. return 0;
  1059. }
  1060. int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
  1061. {
  1062. struct dtsec_regs __iomem *regs = dtsec->regs;
  1063. enum comm_mode mode = COMM_MODE_NONE;
  1064. u32 tmp;
  1065. if (!is_init_done(dtsec->dtsec_drv_param))
  1066. return -EINVAL;
  1067. if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
  1068. mode |= COMM_MODE_RX;
  1069. if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
  1070. mode |= COMM_MODE_TX;
  1071. graceful_stop(dtsec, mode);
  1072. tmp = ioread32be(&regs->maccfg2);
  1073. /* Full Duplex */
  1074. tmp |= MACCFG2_FULL_DUPLEX;
  1075. tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE);
  1076. if (speed < SPEED_1000)
  1077. tmp |= MACCFG2_NIBBLE_MODE;
  1078. else if (speed == SPEED_1000)
  1079. tmp |= MACCFG2_BYTE_MODE;
  1080. iowrite32be(tmp, &regs->maccfg2);
  1081. tmp = ioread32be(&regs->ecntrl);
  1082. if (speed == SPEED_100)
  1083. tmp |= DTSEC_ECNTRL_R100M;
  1084. else
  1085. tmp &= ~DTSEC_ECNTRL_R100M;
  1086. iowrite32be(tmp, &regs->ecntrl);
  1087. graceful_start(dtsec, mode);
  1088. return 0;
  1089. }
  1090. int dtsec_restart_autoneg(struct fman_mac *dtsec)
  1091. {
  1092. u16 tmp_reg16;
  1093. if (!is_init_done(dtsec->dtsec_drv_param))
  1094. return -EINVAL;
  1095. tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR);
  1096. tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
  1097. tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART |
  1098. BMCR_FULLDPLX | BMCR_SPEED1000);
  1099. phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
  1100. return 0;
  1101. }
  1102. int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version)
  1103. {
  1104. struct dtsec_regs __iomem *regs = dtsec->regs;
  1105. if (!is_init_done(dtsec->dtsec_drv_param))
  1106. return -EINVAL;
  1107. *mac_version = ioread32be(&regs->tsec_id);
  1108. return 0;
  1109. }
  1110. int dtsec_set_exception(struct fman_mac *dtsec,
  1111. enum fman_mac_exceptions exception, bool enable)
  1112. {
  1113. struct dtsec_regs __iomem *regs = dtsec->regs;
  1114. u32 bit_mask = 0;
  1115. if (!is_init_done(dtsec->dtsec_drv_param))
  1116. return -EINVAL;
  1117. if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) {
  1118. bit_mask = get_exception_flag(exception);
  1119. if (bit_mask) {
  1120. if (enable)
  1121. dtsec->exceptions |= bit_mask;
  1122. else
  1123. dtsec->exceptions &= ~bit_mask;
  1124. } else {
  1125. pr_err("Undefined exception\n");
  1126. return -EINVAL;
  1127. }
  1128. if (enable)
  1129. iowrite32be(ioread32be(&regs->imask) | bit_mask,
  1130. &regs->imask);
  1131. else
  1132. iowrite32be(ioread32be(&regs->imask) & ~bit_mask,
  1133. &regs->imask);
  1134. } else {
  1135. if (!dtsec->ptp_tsu_enabled) {
  1136. pr_err("Exception valid for 1588 only\n");
  1137. return -EINVAL;
  1138. }
  1139. switch (exception) {
  1140. case FM_MAC_EX_1G_1588_TS_RX_ERR:
  1141. if (enable) {
  1142. dtsec->en_tsu_err_exception = true;
  1143. iowrite32be(ioread32be(&regs->tmr_pemask) |
  1144. TMR_PEMASK_TSREEN,
  1145. &regs->tmr_pemask);
  1146. } else {
  1147. dtsec->en_tsu_err_exception = false;
  1148. iowrite32be(ioread32be(&regs->tmr_pemask) &
  1149. ~TMR_PEMASK_TSREEN,
  1150. &regs->tmr_pemask);
  1151. }
  1152. break;
  1153. default:
  1154. pr_err("Undefined exception\n");
  1155. return -EINVAL;
  1156. }
  1157. }
  1158. return 0;
  1159. }
  1160. int dtsec_init(struct fman_mac *dtsec)
  1161. {
  1162. struct dtsec_regs __iomem *regs = dtsec->regs;
  1163. struct dtsec_cfg *dtsec_drv_param;
  1164. int err;
  1165. u16 max_frm_ln;
  1166. enet_addr_t eth_addr;
  1167. if (is_init_done(dtsec->dtsec_drv_param))
  1168. return -EINVAL;
  1169. if (DEFAULT_RESET_ON_INIT &&
  1170. (fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) {
  1171. pr_err("Can't reset MAC!\n");
  1172. return -EINVAL;
  1173. }
  1174. err = check_init_parameters(dtsec);
  1175. if (err)
  1176. return err;
  1177. dtsec_drv_param = dtsec->dtsec_drv_param;
  1178. MAKE_ENET_ADDR_FROM_UINT64(dtsec->addr, eth_addr);
  1179. err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
  1180. dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions,
  1181. dtsec->tbiphy->mdio.addr);
  1182. if (err) {
  1183. free_init_resources(dtsec);
  1184. pr_err("DTSEC version doesn't support this i/f mode\n");
  1185. return err;
  1186. }
  1187. if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) {
  1188. u16 tmp_reg16;
  1189. /* Configure the TBI PHY Control Register */
  1190. tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
  1191. phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
  1192. tmp_reg16 = TBICON_CLK_SELECT;
  1193. phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
  1194. tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE |
  1195. BMCR_FULLDPLX | BMCR_SPEED1000);
  1196. phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
  1197. if (dtsec->basex_if)
  1198. tmp_reg16 = TBIANA_1000X;
  1199. else
  1200. tmp_reg16 = TBIANA_SGMII;
  1201. phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16);
  1202. tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART |
  1203. BMCR_FULLDPLX | BMCR_SPEED1000);
  1204. phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
  1205. }
  1206. /* Max Frame Length */
  1207. max_frm_ln = (u16)ioread32be(&regs->maxfrm);
  1208. err = fman_set_mac_max_frame(dtsec->fm, dtsec->mac_id, max_frm_ln);
  1209. if (err) {
  1210. pr_err("Setting max frame length failed\n");
  1211. free_init_resources(dtsec);
  1212. return -EINVAL;
  1213. }
  1214. dtsec->multicast_addr_hash =
  1215. alloc_hash_table(EXTENDED_HASH_TABLE_SIZE);
  1216. if (!dtsec->multicast_addr_hash) {
  1217. free_init_resources(dtsec);
  1218. pr_err("MC hash table is failed\n");
  1219. return -ENOMEM;
  1220. }
  1221. dtsec->unicast_addr_hash = alloc_hash_table(DTSEC_HASH_TABLE_SIZE);
  1222. if (!dtsec->unicast_addr_hash) {
  1223. free_init_resources(dtsec);
  1224. pr_err("UC hash table is failed\n");
  1225. return -ENOMEM;
  1226. }
  1227. /* register err intr handler for dtsec to FPM (err) */
  1228. fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
  1229. FMAN_INTR_TYPE_ERR, dtsec_isr, dtsec);
  1230. /* register 1588 intr handler for TMR to FPM (normal) */
  1231. fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
  1232. FMAN_INTR_TYPE_NORMAL, dtsec_1588_isr, dtsec);
  1233. kfree(dtsec_drv_param);
  1234. dtsec->dtsec_drv_param = NULL;
  1235. return 0;
  1236. }
  1237. int dtsec_free(struct fman_mac *dtsec)
  1238. {
  1239. free_init_resources(dtsec);
  1240. kfree(dtsec->dtsec_drv_param);
  1241. dtsec->dtsec_drv_param = NULL;
  1242. kfree(dtsec);
  1243. return 0;
  1244. }
  1245. struct fman_mac *dtsec_config(struct fman_mac_params *params)
  1246. {
  1247. struct fman_mac *dtsec;
  1248. struct dtsec_cfg *dtsec_drv_param;
  1249. void __iomem *base_addr;
  1250. base_addr = params->base_addr;
  1251. /* allocate memory for the UCC GETH data structure. */
  1252. dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL);
  1253. if (!dtsec)
  1254. return NULL;
  1255. /* allocate memory for the d_tsec driver parameters data structure. */
  1256. dtsec_drv_param = kzalloc(sizeof(*dtsec_drv_param), GFP_KERNEL);
  1257. if (!dtsec_drv_param)
  1258. goto err_dtsec;
  1259. /* Plant parameter structure pointer */
  1260. dtsec->dtsec_drv_param = dtsec_drv_param;
  1261. set_dflts(dtsec_drv_param);
  1262. dtsec->regs = base_addr;
  1263. dtsec->addr = ENET_ADDR_TO_UINT64(params->addr);
  1264. dtsec->max_speed = params->max_speed;
  1265. dtsec->phy_if = params->phy_if;
  1266. dtsec->mac_id = params->mac_id;
  1267. dtsec->exceptions = (DTSEC_IMASK_BREN |
  1268. DTSEC_IMASK_RXCEN |
  1269. DTSEC_IMASK_BTEN |
  1270. DTSEC_IMASK_TXCEN |
  1271. DTSEC_IMASK_TXEEN |
  1272. DTSEC_IMASK_ABRTEN |
  1273. DTSEC_IMASK_LCEN |
  1274. DTSEC_IMASK_CRLEN |
  1275. DTSEC_IMASK_XFUNEN |
  1276. DTSEC_IMASK_IFERREN |
  1277. DTSEC_IMASK_MAGEN |
  1278. DTSEC_IMASK_TDPEEN |
  1279. DTSEC_IMASK_RDPEEN);
  1280. dtsec->exception_cb = params->exception_cb;
  1281. dtsec->event_cb = params->event_cb;
  1282. dtsec->dev_id = params->dev_id;
  1283. dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
  1284. dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en;
  1285. dtsec->fm = params->fm;
  1286. dtsec->basex_if = params->basex_if;
  1287. if (!params->internal_phy_node) {
  1288. pr_err("TBI PHY node is not available\n");
  1289. goto err_dtsec_drv_param;
  1290. }
  1291. dtsec->tbiphy = of_phy_find_device(params->internal_phy_node);
  1292. if (!dtsec->tbiphy) {
  1293. pr_err("of_phy_find_device (TBI PHY) failed\n");
  1294. goto err_dtsec_drv_param;
  1295. }
  1296. put_device(&dtsec->tbiphy->mdio.dev);
  1297. /* Save FMan revision */
  1298. fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
  1299. return dtsec;
  1300. err_dtsec_drv_param:
  1301. kfree(dtsec_drv_param);
  1302. err_dtsec:
  1303. kfree(dtsec);
  1304. return NULL;
  1305. }