fman_dtsec.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451
  1. /*
  2. * Copyright 2008-2015 Freescale Semiconductor Inc.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions are met:
  6. * * Redistributions of source code must retain the above copyright
  7. * notice, this list of conditions and the following disclaimer.
  8. * * Redistributions in binary form must reproduce the above copyright
  9. * notice, this list of conditions and the following disclaimer in the
  10. * documentation and/or other materials provided with the distribution.
  11. * * Neither the name of Freescale Semiconductor nor the
  12. * names of its contributors may be used to endorse or promote products
  13. * derived from this software without specific prior written permission.
  14. *
  15. *
  16. * ALTERNATIVELY, this software may be distributed under the terms of the
  17. * GNU General Public License ("GPL") as published by the Free Software
  18. * Foundation, either version 2 of that License or (at your option) any
  19. * later version.
  20. *
  21. * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  22. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  23. * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  24. * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  25. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  26. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  27. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  28. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  29. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  30. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  31. */
  32. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33. #include "fman_dtsec.h"
  34. #include "fman.h"
  35. #include <linux/slab.h>
  36. #include <linux/bitrev.h>
  37. #include <linux/io.h>
  38. #include <linux/delay.h>
  39. #include <linux/phy.h>
  40. #include <linux/crc32.h>
  41. #include <linux/of_mdio.h>
  42. #include <linux/mii.h>
  43. /* TBI register addresses */
  44. #define MII_TBICON 0x11
  45. /* TBICON register bit fields */
  46. #define TBICON_SOFT_RESET 0x8000 /* Soft reset */
  47. #define TBICON_DISABLE_RX_DIS 0x2000 /* Disable receive disparity */
  48. #define TBICON_DISABLE_TX_DIS 0x1000 /* Disable transmit disparity */
  49. #define TBICON_AN_SENSE 0x0100 /* Auto-negotiation sense enable */
  50. #define TBICON_CLK_SELECT 0x0020 /* Clock select */
  51. #define TBICON_MI_MODE 0x0010 /* GMII mode (TBI if not set) */
  52. #define TBIANA_SGMII 0x4001
  53. #define TBIANA_1000X 0x01a0
  54. /* Interrupt Mask Register (IMASK) */
  55. #define DTSEC_IMASK_BREN 0x80000000
  56. #define DTSEC_IMASK_RXCEN 0x40000000
  57. #define DTSEC_IMASK_MSROEN 0x04000000
  58. #define DTSEC_IMASK_GTSCEN 0x02000000
  59. #define DTSEC_IMASK_BTEN 0x01000000
  60. #define DTSEC_IMASK_TXCEN 0x00800000
  61. #define DTSEC_IMASK_TXEEN 0x00400000
  62. #define DTSEC_IMASK_LCEN 0x00040000
  63. #define DTSEC_IMASK_CRLEN 0x00020000
  64. #define DTSEC_IMASK_XFUNEN 0x00010000
  65. #define DTSEC_IMASK_ABRTEN 0x00008000
  66. #define DTSEC_IMASK_IFERREN 0x00004000
  67. #define DTSEC_IMASK_MAGEN 0x00000800
  68. #define DTSEC_IMASK_MMRDEN 0x00000400
  69. #define DTSEC_IMASK_MMWREN 0x00000200
  70. #define DTSEC_IMASK_GRSCEN 0x00000100
  71. #define DTSEC_IMASK_TDPEEN 0x00000002
  72. #define DTSEC_IMASK_RDPEEN 0x00000001
  73. #define DTSEC_EVENTS_MASK \
  74. ((u32)(DTSEC_IMASK_BREN | \
  75. DTSEC_IMASK_RXCEN | \
  76. DTSEC_IMASK_BTEN | \
  77. DTSEC_IMASK_TXCEN | \
  78. DTSEC_IMASK_TXEEN | \
  79. DTSEC_IMASK_ABRTEN | \
  80. DTSEC_IMASK_LCEN | \
  81. DTSEC_IMASK_CRLEN | \
  82. DTSEC_IMASK_XFUNEN | \
  83. DTSEC_IMASK_IFERREN | \
  84. DTSEC_IMASK_MAGEN | \
  85. DTSEC_IMASK_TDPEEN | \
  86. DTSEC_IMASK_RDPEEN))
  87. /* dtsec timestamp event bits */
  88. #define TMR_PEMASK_TSREEN 0x00010000
  89. #define TMR_PEVENT_TSRE 0x00010000
  90. /* Group address bit indication */
  91. #define MAC_GROUP_ADDRESS 0x0000010000000000ULL
  92. /* Defaults */
  93. #define DEFAULT_HALFDUP_RETRANSMIT 0xf
  94. #define DEFAULT_HALFDUP_COLL_WINDOW 0x37
  95. #define DEFAULT_TX_PAUSE_TIME 0xf000
  96. #define DEFAULT_RX_PREPEND 0
  97. #define DEFAULT_PREAMBLE_LEN 7
  98. #define DEFAULT_TX_PAUSE_TIME_EXTD 0
  99. #define DEFAULT_NON_BACK_TO_BACK_IPG1 0x40
  100. #define DEFAULT_NON_BACK_TO_BACK_IPG2 0x60
  101. #define DEFAULT_MIN_IFG_ENFORCEMENT 0x50
  102. #define DEFAULT_BACK_TO_BACK_IPG 0x60
  103. #define DEFAULT_MAXIMUM_FRAME 0x600
  104. /* register related defines (bits, field offsets..) */
  105. #define DTSEC_ID2_INT_REDUCED_OFF 0x00010000
  106. #define DTSEC_ECNTRL_GMIIM 0x00000040
  107. #define DTSEC_ECNTRL_TBIM 0x00000020
  108. #define DTSEC_ECNTRL_SGMIIM 0x00000002
  109. #define DTSEC_ECNTRL_RPM 0x00000010
  110. #define DTSEC_ECNTRL_R100M 0x00000008
  111. #define DTSEC_ECNTRL_QSGMIIM 0x00000001
  112. #define DTSEC_TCTRL_GTS 0x00000020
  113. #define RCTRL_PAL_MASK 0x001f0000
  114. #define RCTRL_PAL_SHIFT 16
  115. #define RCTRL_GHTX 0x00000400
  116. #define RCTRL_GRS 0x00000020
  117. #define RCTRL_MPROM 0x00000008
  118. #define RCTRL_RSF 0x00000004
  119. #define RCTRL_UPROM 0x00000001
  120. #define MACCFG1_SOFT_RESET 0x80000000
  121. #define MACCFG1_RX_FLOW 0x00000020
  122. #define MACCFG1_TX_FLOW 0x00000010
  123. #define MACCFG1_TX_EN 0x00000001
  124. #define MACCFG1_RX_EN 0x00000004
  125. #define MACCFG2_NIBBLE_MODE 0x00000100
  126. #define MACCFG2_BYTE_MODE 0x00000200
  127. #define MACCFG2_PAD_CRC_EN 0x00000004
  128. #define MACCFG2_FULL_DUPLEX 0x00000001
  129. #define MACCFG2_PREAMBLE_LENGTH_MASK 0x0000f000
  130. #define MACCFG2_PREAMBLE_LENGTH_SHIFT 12
  131. #define IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT 24
  132. #define IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT 16
  133. #define IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT 8
  134. #define IPGIFG_NON_BACK_TO_BACK_IPG_1 0x7F000000
  135. #define IPGIFG_NON_BACK_TO_BACK_IPG_2 0x007F0000
  136. #define IPGIFG_MIN_IFG_ENFORCEMENT 0x0000FF00
  137. #define IPGIFG_BACK_TO_BACK_IPG 0x0000007F
  138. #define HAFDUP_EXCESS_DEFER 0x00010000
  139. #define HAFDUP_COLLISION_WINDOW 0x000003ff
  140. #define HAFDUP_RETRANSMISSION_MAX_SHIFT 12
  141. #define HAFDUP_RETRANSMISSION_MAX 0x0000f000
  142. #define NUM_OF_HASH_REGS 8 /* Number of hash table registers */
  143. #define PTV_PTE_MASK 0xffff0000
  144. #define PTV_PT_MASK 0x0000ffff
  145. #define PTV_PTE_SHIFT 16
  146. #define MAX_PACKET_ALIGNMENT 31
  147. #define MAX_INTER_PACKET_GAP 0x7f
  148. #define MAX_RETRANSMISSION 0x0f
  149. #define MAX_COLLISION_WINDOW 0x03ff
  150. /* Hash table size (32 bits*8 regs) */
  151. #define DTSEC_HASH_TABLE_SIZE 256
  152. /* Extended Hash table size (32 bits*16 regs) */
  153. #define EXTENDED_HASH_TABLE_SIZE 512
  154. /* dTSEC Memory Map registers */
  155. struct dtsec_regs {
  156. /* dTSEC General Control and Status Registers */
  157. u32 tsec_id; /* 0x000 ETSEC_ID register */
  158. u32 tsec_id2; /* 0x004 ETSEC_ID2 register */
  159. u32 ievent; /* 0x008 Interrupt event register */
  160. u32 imask; /* 0x00C Interrupt mask register */
  161. u32 reserved0010[1];
  162. u32 ecntrl; /* 0x014 E control register */
  163. u32 ptv; /* 0x018 Pause time value register */
  164. u32 tbipa; /* 0x01C TBI PHY address register */
  165. u32 tmr_ctrl; /* 0x020 Time-stamp Control register */
  166. u32 tmr_pevent; /* 0x024 Time-stamp event register */
  167. u32 tmr_pemask; /* 0x028 Timer event mask register */
  168. u32 reserved002c[5];
  169. u32 tctrl; /* 0x040 Transmit control register */
  170. u32 reserved0044[3];
  171. u32 rctrl; /* 0x050 Receive control register */
  172. u32 reserved0054[11];
  173. u32 igaddr[8]; /* 0x080-0x09C Individual/group address */
  174. u32 gaddr[8]; /* 0x0A0-0x0BC Group address registers 0-7 */
  175. u32 reserved00c0[16];
  176. u32 maccfg1; /* 0x100 MAC configuration #1 */
  177. u32 maccfg2; /* 0x104 MAC configuration #2 */
  178. u32 ipgifg; /* 0x108 IPG/IFG */
  179. u32 hafdup; /* 0x10C Half-duplex */
  180. u32 maxfrm; /* 0x110 Maximum frame */
  181. u32 reserved0114[10];
  182. u32 ifstat; /* 0x13C Interface status */
  183. u32 macstnaddr1; /* 0x140 Station Address,part 1 */
  184. u32 macstnaddr2; /* 0x144 Station Address,part 2 */
  185. struct {
  186. u32 exact_match1; /* octets 1-4 */
  187. u32 exact_match2; /* octets 5-6 */
  188. } macaddr[15]; /* 0x148-0x1BC mac exact match addresses 1-15 */
  189. u32 reserved01c0[16];
  190. u32 tr64; /* 0x200 Tx and Rx 64 byte frame counter */
  191. u32 tr127; /* 0x204 Tx and Rx 65 to 127 byte frame counter */
  192. u32 tr255; /* 0x208 Tx and Rx 128 to 255 byte frame counter */
  193. u32 tr511; /* 0x20C Tx and Rx 256 to 511 byte frame counter */
  194. u32 tr1k; /* 0x210 Tx and Rx 512 to 1023 byte frame counter */
  195. u32 trmax; /* 0x214 Tx and Rx 1024 to 1518 byte frame counter */
  196. u32 trmgv;
  197. /* 0x218 Tx and Rx 1519 to 1522 byte good VLAN frame count */
  198. u32 rbyt; /* 0x21C receive byte counter */
  199. u32 rpkt; /* 0x220 receive packet counter */
  200. u32 rfcs; /* 0x224 receive FCS error counter */
  201. u32 rmca; /* 0x228 RMCA Rx multicast packet counter */
  202. u32 rbca; /* 0x22C Rx broadcast packet counter */
  203. u32 rxcf; /* 0x230 Rx control frame packet counter */
  204. u32 rxpf; /* 0x234 Rx pause frame packet counter */
  205. u32 rxuo; /* 0x238 Rx unknown OP code counter */
  206. u32 raln; /* 0x23C Rx alignment error counter */
  207. u32 rflr; /* 0x240 Rx frame length error counter */
  208. u32 rcde; /* 0x244 Rx code error counter */
  209. u32 rcse; /* 0x248 Rx carrier sense error counter */
  210. u32 rund; /* 0x24C Rx undersize packet counter */
  211. u32 rovr; /* 0x250 Rx oversize packet counter */
  212. u32 rfrg; /* 0x254 Rx fragments counter */
  213. u32 rjbr; /* 0x258 Rx jabber counter */
  214. u32 rdrp; /* 0x25C Rx drop */
  215. u32 tbyt; /* 0x260 Tx byte counter */
  216. u32 tpkt; /* 0x264 Tx packet counter */
  217. u32 tmca; /* 0x268 Tx multicast packet counter */
  218. u32 tbca; /* 0x26C Tx broadcast packet counter */
  219. u32 txpf; /* 0x270 Tx pause control frame counter */
  220. u32 tdfr; /* 0x274 Tx deferral packet counter */
  221. u32 tedf; /* 0x278 Tx excessive deferral packet counter */
  222. u32 tscl; /* 0x27C Tx single collision packet counter */
  223. u32 tmcl; /* 0x280 Tx multiple collision packet counter */
  224. u32 tlcl; /* 0x284 Tx late collision packet counter */
  225. u32 txcl; /* 0x288 Tx excessive collision packet counter */
  226. u32 tncl; /* 0x28C Tx total collision counter */
  227. u32 reserved0290[1];
  228. u32 tdrp; /* 0x294 Tx drop frame counter */
  229. u32 tjbr; /* 0x298 Tx jabber frame counter */
  230. u32 tfcs; /* 0x29C Tx FCS error counter */
  231. u32 txcf; /* 0x2A0 Tx control frame counter */
  232. u32 tovr; /* 0x2A4 Tx oversize frame counter */
  233. u32 tund; /* 0x2A8 Tx undersize frame counter */
  234. u32 tfrg; /* 0x2AC Tx fragments frame counter */
  235. u32 car1; /* 0x2B0 carry register one register* */
  236. u32 car2; /* 0x2B4 carry register two register* */
  237. u32 cam1; /* 0x2B8 carry register one mask register */
  238. u32 cam2; /* 0x2BC carry register two mask register */
  239. u32 reserved02c0[848];
  240. };
  241. /* struct dtsec_cfg - dTSEC configuration
  242. * Transmit half-duplex flow control, under software control for 10/100-Mbps
  243. * half-duplex media. If set, back pressure is applied to media by raising
  244. * carrier.
  245. * halfdup_retransmit:
  246. * Number of retransmission attempts following a collision.
  247. * If this is exceeded dTSEC aborts transmission due to excessive collisions.
  248. * The standard specifies the attempt limit to be 15.
  249. * halfdup_coll_window:
  250. * The number of bytes of the frame during which collisions may occur.
  251. * The default value of 55 corresponds to the frame byte at the end of the
  252. * standard 512-bit slot time window. If collisions are detected after this
  253. * byte, the late collision event is asserted and transmission of current
  254. * frame is aborted.
  255. * tx_pad_crc:
  256. * Pad and append CRC. If set, the MAC pads all ransmitted short frames and
  257. * appends a CRC to every frame regardless of padding requirement.
  258. * tx_pause_time:
  259. * Transmit pause time value. This pause value is used as part of the pause
  260. * frame to be sent when a transmit pause frame is initiated.
  261. * If set to 0 this disables transmission of pause frames.
  262. * preamble_len:
  263. * Length, in bytes, of the preamble field preceding each Ethernet
  264. * start-of-frame delimiter byte. The default value of 0x7 should be used in
  265. * order to guarantee reliable operation with IEEE 802.3 compliant hardware.
  266. * rx_prepend:
  267. * Packet alignment padding length. The specified number of bytes (1-31)
  268. * of zero padding are inserted before the start of each received frame.
  269. * For Ethernet, where optional preamble extraction is enabled, the padding
  270. * appears before the preamble, otherwise the padding precedes the
  271. * layer 2 header.
  272. *
  273. * This structure contains basic dTSEC configuration and must be passed to
  274. * init() function. A default set of configuration values can be
  275. * obtained by calling set_dflts().
  276. */
  277. struct dtsec_cfg {
  278. u16 halfdup_retransmit;
  279. u16 halfdup_coll_window;
  280. bool tx_pad_crc;
  281. u16 tx_pause_time;
  282. bool ptp_tsu_en;
  283. bool ptp_exception_en;
  284. u32 preamble_len;
  285. u32 rx_prepend;
  286. u16 tx_pause_time_extd;
  287. u16 maximum_frame;
  288. u32 non_back_to_back_ipg1;
  289. u32 non_back_to_back_ipg2;
  290. u32 min_ifg_enforcement;
  291. u32 back_to_back_ipg;
  292. };
  293. struct fman_mac {
  294. /* pointer to dTSEC memory mapped registers */
  295. struct dtsec_regs __iomem *regs;
  296. /* MAC address of device */
  297. u64 addr;
  298. /* Ethernet physical interface */
  299. phy_interface_t phy_if;
  300. u16 max_speed;
  301. void *dev_id; /* device cookie used by the exception cbs */
  302. fman_mac_exception_cb *exception_cb;
  303. fman_mac_exception_cb *event_cb;
  304. /* Number of individual addresses in registers for this station */
  305. u8 num_of_ind_addr_in_regs;
  306. /* pointer to driver's global address hash table */
  307. struct eth_hash_t *multicast_addr_hash;
  308. /* pointer to driver's individual address hash table */
  309. struct eth_hash_t *unicast_addr_hash;
  310. u8 mac_id;
  311. u32 exceptions;
  312. bool ptp_tsu_enabled;
  313. bool en_tsu_err_exeption;
  314. struct dtsec_cfg *dtsec_drv_param;
  315. void *fm;
  316. struct fman_rev_info fm_rev_info;
  317. bool basex_if;
  318. struct phy_device *tbiphy;
  319. };
  320. static void set_dflts(struct dtsec_cfg *cfg)
  321. {
  322. cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT;
  323. cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW;
  324. cfg->tx_pad_crc = true;
  325. cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME;
  326. /* PHY address 0 is reserved (DPAA RM) */
  327. cfg->rx_prepend = DEFAULT_RX_PREPEND;
  328. cfg->ptp_tsu_en = true;
  329. cfg->ptp_exception_en = true;
  330. cfg->preamble_len = DEFAULT_PREAMBLE_LEN;
  331. cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD;
  332. cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1;
  333. cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2;
  334. cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT;
  335. cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG;
  336. cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
  337. }
  338. static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
  339. phy_interface_t iface, u16 iface_speed, u8 *macaddr,
  340. u32 exception_mask, u8 tbi_addr)
  341. {
  342. bool is_rgmii, is_sgmii, is_qsgmii;
  343. int i;
  344. u32 tmp;
  345. /* Soft reset */
  346. iowrite32be(MACCFG1_SOFT_RESET, &regs->maccfg1);
  347. iowrite32be(0, &regs->maccfg1);
  348. /* dtsec_id2 */
  349. tmp = ioread32be(&regs->tsec_id2);
  350. /* check RGMII support */
  351. if (iface == PHY_INTERFACE_MODE_RGMII ||
  352. iface == PHY_INTERFACE_MODE_RMII)
  353. if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
  354. return -EINVAL;
  355. if (iface == PHY_INTERFACE_MODE_SGMII ||
  356. iface == PHY_INTERFACE_MODE_MII)
  357. if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
  358. return -EINVAL;
  359. is_rgmii = iface == PHY_INTERFACE_MODE_RGMII;
  360. is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
  361. is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
  362. tmp = 0;
  363. if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII)
  364. tmp |= DTSEC_ECNTRL_GMIIM;
  365. if (is_sgmii)
  366. tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM);
  367. if (is_qsgmii)
  368. tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM |
  369. DTSEC_ECNTRL_QSGMIIM);
  370. if (is_rgmii)
  371. tmp |= DTSEC_ECNTRL_RPM;
  372. if (iface_speed == SPEED_100)
  373. tmp |= DTSEC_ECNTRL_R100M;
  374. iowrite32be(tmp, &regs->ecntrl);
  375. tmp = 0;
  376. if (cfg->tx_pause_time)
  377. tmp |= cfg->tx_pause_time;
  378. if (cfg->tx_pause_time_extd)
  379. tmp |= cfg->tx_pause_time_extd << PTV_PTE_SHIFT;
  380. iowrite32be(tmp, &regs->ptv);
  381. tmp = 0;
  382. tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK;
  383. /* Accept short frames */
  384. tmp |= RCTRL_RSF;
  385. iowrite32be(tmp, &regs->rctrl);
  386. /* Assign a Phy Address to the TBI (TBIPA).
  387. * Done also in cases where TBI is not selected to avoid conflict with
  388. * the external PHY's Physical address
  389. */
  390. iowrite32be(tbi_addr, &regs->tbipa);
  391. iowrite32be(0, &regs->tmr_ctrl);
  392. if (cfg->ptp_tsu_en) {
  393. tmp = 0;
  394. tmp |= TMR_PEVENT_TSRE;
  395. iowrite32be(tmp, &regs->tmr_pevent);
  396. if (cfg->ptp_exception_en) {
  397. tmp = 0;
  398. tmp |= TMR_PEMASK_TSREEN;
  399. iowrite32be(tmp, &regs->tmr_pemask);
  400. }
  401. }
  402. tmp = 0;
  403. tmp |= MACCFG1_RX_FLOW;
  404. tmp |= MACCFG1_TX_FLOW;
  405. iowrite32be(tmp, &regs->maccfg1);
  406. tmp = 0;
  407. if (iface_speed < SPEED_1000)
  408. tmp |= MACCFG2_NIBBLE_MODE;
  409. else if (iface_speed == SPEED_1000)
  410. tmp |= MACCFG2_BYTE_MODE;
  411. tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
  412. MACCFG2_PREAMBLE_LENGTH_MASK;
  413. if (cfg->tx_pad_crc)
  414. tmp |= MACCFG2_PAD_CRC_EN;
  415. /* Full Duplex */
  416. tmp |= MACCFG2_FULL_DUPLEX;
  417. iowrite32be(tmp, &regs->maccfg2);
  418. tmp = (((cfg->non_back_to_back_ipg1 <<
  419. IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT)
  420. & IPGIFG_NON_BACK_TO_BACK_IPG_1)
  421. | ((cfg->non_back_to_back_ipg2 <<
  422. IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT)
  423. & IPGIFG_NON_BACK_TO_BACK_IPG_2)
  424. | ((cfg->min_ifg_enforcement << IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT)
  425. & IPGIFG_MIN_IFG_ENFORCEMENT)
  426. | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG));
  427. iowrite32be(tmp, &regs->ipgifg);
  428. tmp = 0;
  429. tmp |= HAFDUP_EXCESS_DEFER;
  430. tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT)
  431. & HAFDUP_RETRANSMISSION_MAX);
  432. tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW);
  433. iowrite32be(tmp, &regs->hafdup);
  434. /* Initialize Maximum frame length */
  435. iowrite32be(cfg->maximum_frame, &regs->maxfrm);
  436. iowrite32be(0xffffffff, &regs->cam1);
  437. iowrite32be(0xffffffff, &regs->cam2);
  438. iowrite32be(exception_mask, &regs->imask);
  439. iowrite32be(0xffffffff, &regs->ievent);
  440. tmp = (u32)((macaddr[5] << 24) |
  441. (macaddr[4] << 16) | (macaddr[3] << 8) | macaddr[2]);
  442. iowrite32be(tmp, &regs->macstnaddr1);
  443. tmp = (u32)((macaddr[1] << 24) | (macaddr[0] << 16));
  444. iowrite32be(tmp, &regs->macstnaddr2);
  445. /* HASH */
  446. for (i = 0; i < NUM_OF_HASH_REGS; i++) {
  447. /* Initialize IADDRx */
  448. iowrite32be(0, &regs->igaddr[i]);
  449. /* Initialize GADDRx */
  450. iowrite32be(0, &regs->gaddr[i]);
  451. }
  452. return 0;
  453. }
  454. static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
  455. {
  456. u32 tmp;
  457. tmp = (u32)((adr[5] << 24) |
  458. (adr[4] << 16) | (adr[3] << 8) | adr[2]);
  459. iowrite32be(tmp, &regs->macstnaddr1);
  460. tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
  461. iowrite32be(tmp, &regs->macstnaddr2);
  462. }
  463. static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
  464. bool enable)
  465. {
  466. int reg_idx = (bucket >> 5) & 0xf;
  467. int bit_idx = bucket & 0x1f;
  468. u32 bit_mask = 0x80000000 >> bit_idx;
  469. u32 __iomem *reg;
  470. if (reg_idx > 7)
  471. reg = &regs->gaddr[reg_idx - 8];
  472. else
  473. reg = &regs->igaddr[reg_idx];
  474. if (enable)
  475. iowrite32be(ioread32be(reg) | bit_mask, reg);
  476. else
  477. iowrite32be(ioread32be(reg) & (~bit_mask), reg);
  478. }
  479. static int check_init_parameters(struct fman_mac *dtsec)
  480. {
  481. if (dtsec->max_speed >= SPEED_10000) {
  482. pr_err("1G MAC driver supports 1G or lower speeds\n");
  483. return -EINVAL;
  484. }
  485. if (dtsec->addr == 0) {
  486. pr_err("Ethernet MAC Must have a valid MAC Address\n");
  487. return -EINVAL;
  488. }
  489. if ((dtsec->dtsec_drv_param)->rx_prepend >
  490. MAX_PACKET_ALIGNMENT) {
  491. pr_err("packetAlignmentPadding can't be > than %d\n",
  492. MAX_PACKET_ALIGNMENT);
  493. return -EINVAL;
  494. }
  495. if (((dtsec->dtsec_drv_param)->non_back_to_back_ipg1 >
  496. MAX_INTER_PACKET_GAP) ||
  497. ((dtsec->dtsec_drv_param)->non_back_to_back_ipg2 >
  498. MAX_INTER_PACKET_GAP) ||
  499. ((dtsec->dtsec_drv_param)->back_to_back_ipg >
  500. MAX_INTER_PACKET_GAP)) {
  501. pr_err("Inter packet gap can't be greater than %d\n",
  502. MAX_INTER_PACKET_GAP);
  503. return -EINVAL;
  504. }
  505. if ((dtsec->dtsec_drv_param)->halfdup_retransmit >
  506. MAX_RETRANSMISSION) {
  507. pr_err("maxRetransmission can't be greater than %d\n",
  508. MAX_RETRANSMISSION);
  509. return -EINVAL;
  510. }
  511. if ((dtsec->dtsec_drv_param)->halfdup_coll_window >
  512. MAX_COLLISION_WINDOW) {
  513. pr_err("collisionWindow can't be greater than %d\n",
  514. MAX_COLLISION_WINDOW);
  515. return -EINVAL;
  516. /* If Auto negotiation process is disabled, need to set up the PHY
  517. * using the MII Management Interface
  518. */
  519. }
  520. if (!dtsec->exception_cb) {
  521. pr_err("uninitialized exception_cb\n");
  522. return -EINVAL;
  523. }
  524. if (!dtsec->event_cb) {
  525. pr_err("uninitialized event_cb\n");
  526. return -EINVAL;
  527. }
  528. return 0;
  529. }
  530. static int get_exception_flag(enum fman_mac_exceptions exception)
  531. {
  532. u32 bit_mask;
  533. switch (exception) {
  534. case FM_MAC_EX_1G_BAB_RX:
  535. bit_mask = DTSEC_IMASK_BREN;
  536. break;
  537. case FM_MAC_EX_1G_RX_CTL:
  538. bit_mask = DTSEC_IMASK_RXCEN;
  539. break;
  540. case FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET:
  541. bit_mask = DTSEC_IMASK_GTSCEN;
  542. break;
  543. case FM_MAC_EX_1G_BAB_TX:
  544. bit_mask = DTSEC_IMASK_BTEN;
  545. break;
  546. case FM_MAC_EX_1G_TX_CTL:
  547. bit_mask = DTSEC_IMASK_TXCEN;
  548. break;
  549. case FM_MAC_EX_1G_TX_ERR:
  550. bit_mask = DTSEC_IMASK_TXEEN;
  551. break;
  552. case FM_MAC_EX_1G_LATE_COL:
  553. bit_mask = DTSEC_IMASK_LCEN;
  554. break;
  555. case FM_MAC_EX_1G_COL_RET_LMT:
  556. bit_mask = DTSEC_IMASK_CRLEN;
  557. break;
  558. case FM_MAC_EX_1G_TX_FIFO_UNDRN:
  559. bit_mask = DTSEC_IMASK_XFUNEN;
  560. break;
  561. case FM_MAC_EX_1G_MAG_PCKT:
  562. bit_mask = DTSEC_IMASK_MAGEN;
  563. break;
  564. case FM_MAC_EX_1G_MII_MNG_RD_COMPLET:
  565. bit_mask = DTSEC_IMASK_MMRDEN;
  566. break;
  567. case FM_MAC_EX_1G_MII_MNG_WR_COMPLET:
  568. bit_mask = DTSEC_IMASK_MMWREN;
  569. break;
  570. case FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET:
  571. bit_mask = DTSEC_IMASK_GRSCEN;
  572. break;
  573. case FM_MAC_EX_1G_DATA_ERR:
  574. bit_mask = DTSEC_IMASK_TDPEEN;
  575. break;
  576. case FM_MAC_EX_1G_RX_MIB_CNT_OVFL:
  577. bit_mask = DTSEC_IMASK_MSROEN;
  578. break;
  579. default:
  580. bit_mask = 0;
  581. break;
  582. }
  583. return bit_mask;
  584. }
  585. static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
  586. {
  587. /* Checks if dTSEC driver parameters were initialized */
  588. if (!dtsec_drv_params)
  589. return true;
  590. return false;
  591. }
  592. static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
  593. {
  594. struct dtsec_regs __iomem *regs = dtsec->regs;
  595. if (is_init_done(dtsec->dtsec_drv_param))
  596. return 0;
  597. return (u16)ioread32be(&regs->maxfrm);
  598. }
  599. static void dtsec_isr(void *handle)
  600. {
  601. struct fman_mac *dtsec = (struct fman_mac *)handle;
  602. struct dtsec_regs __iomem *regs = dtsec->regs;
  603. u32 event;
  604. /* do not handle MDIO events */
  605. event = ioread32be(&regs->ievent) &
  606. (u32)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN));
  607. event &= ioread32be(&regs->imask);
  608. iowrite32be(event, &regs->ievent);
  609. if (event & DTSEC_IMASK_BREN)
  610. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_RX);
  611. if (event & DTSEC_IMASK_RXCEN)
  612. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_RX_CTL);
  613. if (event & DTSEC_IMASK_GTSCEN)
  614. dtsec->exception_cb(dtsec->dev_id,
  615. FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET);
  616. if (event & DTSEC_IMASK_BTEN)
  617. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_TX);
  618. if (event & DTSEC_IMASK_TXCEN)
  619. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_CTL);
  620. if (event & DTSEC_IMASK_TXEEN)
  621. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_ERR);
  622. if (event & DTSEC_IMASK_LCEN)
  623. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_LATE_COL);
  624. if (event & DTSEC_IMASK_CRLEN)
  625. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT);
  626. if (event & DTSEC_IMASK_XFUNEN) {
  627. /* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */
  628. if (dtsec->fm_rev_info.major == 2) {
  629. u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i;
  630. /* a. Write 0x00E0_0C00 to DTSEC_ID
  631. * This is a read only register
  632. * b. Read and save the value of TPKT
  633. */
  634. tpkt1 = ioread32be(&regs->tpkt);
  635. /* c. Read the register at dTSEC address offset 0x32C */
  636. tmp_reg1 = ioread32be(&regs->reserved02c0[27]);
  637. /* d. Compare bits [9:15] to bits [25:31] of the
  638. * register at address offset 0x32C.
  639. */
  640. if ((tmp_reg1 & 0x007F0000) !=
  641. (tmp_reg1 & 0x0000007F)) {
  642. /* If they are not equal, save the value of
  643. * this register and wait for at least
  644. * MAXFRM*16 ns
  645. */
  646. usleep_range((u32)(min
  647. (dtsec_get_max_frame_length(dtsec) *
  648. 16 / 1000, 1)), (u32)
  649. (min(dtsec_get_max_frame_length
  650. (dtsec) * 16 / 1000, 1) + 1));
  651. }
  652. /* e. Read and save TPKT again and read the register
  653. * at dTSEC address offset 0x32C again
  654. */
  655. tpkt2 = ioread32be(&regs->tpkt);
  656. tmp_reg2 = ioread32be(&regs->reserved02c0[27]);
  657. /* f. Compare the value of TPKT saved in step b to
  658. * value read in step e. Also compare bits [9:15] of
  659. * the register at offset 0x32C saved in step d to the
  660. * value of bits [9:15] saved in step e. If the two
  661. * registers values are unchanged, then the transmit
  662. * portion of the dTSEC controller is locked up and
  663. * the user should proceed to the recover sequence.
  664. */
  665. if ((tpkt1 == tpkt2) && ((tmp_reg1 & 0x007F0000) ==
  666. (tmp_reg2 & 0x007F0000))) {
  667. /* recover sequence */
  668. /* a.Write a 1 to RCTRL[GRS] */
  669. iowrite32be(ioread32be(&regs->rctrl) |
  670. RCTRL_GRS, &regs->rctrl);
  671. /* b.Wait until IEVENT[GRSC]=1, or at least
  672. * 100 us has elapsed.
  673. */
  674. for (i = 0; i < 100; i++) {
  675. if (ioread32be(&regs->ievent) &
  676. DTSEC_IMASK_GRSCEN)
  677. break;
  678. udelay(1);
  679. }
  680. if (ioread32be(&regs->ievent) &
  681. DTSEC_IMASK_GRSCEN)
  682. iowrite32be(DTSEC_IMASK_GRSCEN,
  683. &regs->ievent);
  684. else
  685. pr_debug("Rx lockup due to Tx lockup\n");
  686. /* c.Write a 1 to bit n of FM_RSTC
  687. * (offset 0x0CC of FPM)
  688. */
  689. fman_reset_mac(dtsec->fm, dtsec->mac_id);
  690. /* d.Wait 4 Tx clocks (32 ns) */
  691. udelay(1);
  692. /* e.Write a 0 to bit n of FM_RSTC. */
  693. /* cleared by FMAN
  694. */
  695. }
  696. }
  697. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_FIFO_UNDRN);
  698. }
  699. if (event & DTSEC_IMASK_MAGEN)
  700. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_MAG_PCKT);
  701. if (event & DTSEC_IMASK_GRSCEN)
  702. dtsec->exception_cb(dtsec->dev_id,
  703. FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET);
  704. if (event & DTSEC_IMASK_TDPEEN)
  705. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_DATA_ERR);
  706. if (event & DTSEC_IMASK_RDPEEN)
  707. dtsec->exception_cb(dtsec->dev_id, FM_MAC_1G_RX_DATA_ERR);
  708. /* masked interrupts */
  709. WARN_ON(event & DTSEC_IMASK_ABRTEN);
  710. WARN_ON(event & DTSEC_IMASK_IFERREN);
  711. }
  712. static void dtsec_1588_isr(void *handle)
  713. {
  714. struct fman_mac *dtsec = (struct fman_mac *)handle;
  715. struct dtsec_regs __iomem *regs = dtsec->regs;
  716. u32 event;
  717. if (dtsec->ptp_tsu_enabled) {
  718. event = ioread32be(&regs->tmr_pevent);
  719. event &= ioread32be(&regs->tmr_pemask);
  720. if (event) {
  721. iowrite32be(event, &regs->tmr_pevent);
  722. WARN_ON(event & TMR_PEVENT_TSRE);
  723. dtsec->exception_cb(dtsec->dev_id,
  724. FM_MAC_EX_1G_1588_TS_RX_ERR);
  725. }
  726. }
  727. }
  728. static void free_init_resources(struct fman_mac *dtsec)
  729. {
  730. fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
  731. FMAN_INTR_TYPE_ERR);
  732. fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
  733. FMAN_INTR_TYPE_NORMAL);
  734. /* release the driver's group hash table */
  735. free_hash_table(dtsec->multicast_addr_hash);
  736. dtsec->multicast_addr_hash = NULL;
  737. /* release the driver's individual hash table */
  738. free_hash_table(dtsec->unicast_addr_hash);
  739. dtsec->unicast_addr_hash = NULL;
  740. }
  741. int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val)
  742. {
  743. if (is_init_done(dtsec->dtsec_drv_param))
  744. return -EINVAL;
  745. dtsec->dtsec_drv_param->maximum_frame = new_val;
  746. return 0;
  747. }
  748. int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val)
  749. {
  750. if (is_init_done(dtsec->dtsec_drv_param))
  751. return -EINVAL;
  752. dtsec->dtsec_drv_param->tx_pad_crc = new_val;
  753. return 0;
  754. }
  755. int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
  756. {
  757. struct dtsec_regs __iomem *regs = dtsec->regs;
  758. u32 tmp;
  759. if (!is_init_done(dtsec->dtsec_drv_param))
  760. return -EINVAL;
  761. /* Enable */
  762. tmp = ioread32be(&regs->maccfg1);
  763. if (mode & COMM_MODE_RX)
  764. tmp |= MACCFG1_RX_EN;
  765. if (mode & COMM_MODE_TX)
  766. tmp |= MACCFG1_TX_EN;
  767. iowrite32be(tmp, &regs->maccfg1);
  768. /* Graceful start - clear the graceful receive stop bit */
  769. if (mode & COMM_MODE_TX)
  770. iowrite32be(ioread32be(&regs->tctrl) & ~DTSEC_TCTRL_GTS,
  771. &regs->tctrl);
  772. if (mode & COMM_MODE_RX)
  773. iowrite32be(ioread32be(&regs->rctrl) & ~RCTRL_GRS,
  774. &regs->rctrl);
  775. return 0;
  776. }
  777. int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode)
  778. {
  779. struct dtsec_regs __iomem *regs = dtsec->regs;
  780. u32 tmp;
  781. if (!is_init_done(dtsec->dtsec_drv_param))
  782. return -EINVAL;
  783. /* Gracefull stop - Assert the graceful transmit stop bit */
  784. if (mode & COMM_MODE_RX) {
  785. tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
  786. iowrite32be(tmp, &regs->rctrl);
  787. if (dtsec->fm_rev_info.major == 2)
  788. usleep_range(100, 200);
  789. else
  790. udelay(10);
  791. }
  792. if (mode & COMM_MODE_TX) {
  793. if (dtsec->fm_rev_info.major == 2)
  794. pr_debug("GTS not supported due to DTSEC_A004 errata.\n");
  795. else
  796. pr_debug("GTS not supported due to DTSEC_A0014 errata.\n");
  797. }
  798. tmp = ioread32be(&regs->maccfg1);
  799. if (mode & COMM_MODE_RX)
  800. tmp &= ~MACCFG1_RX_EN;
  801. if (mode & COMM_MODE_TX)
  802. tmp &= ~MACCFG1_TX_EN;
  803. iowrite32be(tmp, &regs->maccfg1);
  804. return 0;
  805. }
  806. int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
  807. u8 __maybe_unused priority,
  808. u16 pause_time, u16 __maybe_unused thresh_time)
  809. {
  810. struct dtsec_regs __iomem *regs = dtsec->regs;
  811. u32 ptv = 0;
  812. if (!is_init_done(dtsec->dtsec_drv_param))
  813. return -EINVAL;
  814. if (pause_time) {
  815. /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
  816. if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
  817. pr_warn("pause-time: %d illegal.Should be > 320\n",
  818. pause_time);
  819. return -EINVAL;
  820. }
  821. ptv = ioread32be(&regs->ptv);
  822. ptv &= PTV_PTE_MASK;
  823. ptv |= pause_time & PTV_PT_MASK;
  824. iowrite32be(ptv, &regs->ptv);
  825. /* trigger the transmission of a flow-control pause frame */
  826. iowrite32be(ioread32be(&regs->maccfg1) | MACCFG1_TX_FLOW,
  827. &regs->maccfg1);
  828. } else
  829. iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
  830. &regs->maccfg1);
  831. return 0;
  832. }
  833. int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
  834. {
  835. struct dtsec_regs __iomem *regs = dtsec->regs;
  836. u32 tmp;
  837. if (!is_init_done(dtsec->dtsec_drv_param))
  838. return -EINVAL;
  839. tmp = ioread32be(&regs->maccfg1);
  840. if (en)
  841. tmp |= MACCFG1_RX_FLOW;
  842. else
  843. tmp &= ~MACCFG1_RX_FLOW;
  844. iowrite32be(tmp, &regs->maccfg1);
  845. return 0;
  846. }
  847. int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr)
  848. {
  849. if (!is_init_done(dtsec->dtsec_drv_param))
  850. return -EINVAL;
  851. /* Initialize MAC Station Address registers (1 & 2)
  852. * Station address have to be swapped (big endian to little endian
  853. */
  854. dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
  855. set_mac_address(dtsec->regs, (u8 *)(*enet_addr));
  856. return 0;
  857. }
  858. int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
  859. {
  860. struct dtsec_regs __iomem *regs = dtsec->regs;
  861. struct eth_hash_entry *hash_entry;
  862. u64 addr;
  863. s32 bucket;
  864. u32 crc = 0xFFFFFFFF;
  865. bool mcast, ghtx;
  866. if (!is_init_done(dtsec->dtsec_drv_param))
  867. return -EINVAL;
  868. addr = ENET_ADDR_TO_UINT64(*eth_addr);
  869. ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
  870. mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
  871. /* Cannot handle unicast mac addr when GHTX is on */
  872. if (ghtx && !mcast) {
  873. pr_err("Could not compute hash bucket\n");
  874. return -EINVAL;
  875. }
  876. crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
  877. crc = bitrev32(crc);
  878. /* considering the 9 highest order bits in crc H[8:0]:
  879. *if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register
  880. *and H[5:1] (next 5 bits) identify the hash bit
  881. *if ghts = 1 H[8:5] (highest order 4 bits) identify the hash register
  882. *and H[4:0] (next 5 bits) identify the hash bit.
  883. *
  884. *In bucket index output the low 5 bits identify the hash register
  885. *bit, while the higher 4 bits identify the hash register
  886. */
  887. if (ghtx) {
  888. bucket = (s32)((crc >> 23) & 0x1ff);
  889. } else {
  890. bucket = (s32)((crc >> 24) & 0xff);
  891. /* if !ghtx and mcast the bit must be set in gaddr instead of
  892. *igaddr.
  893. */
  894. if (mcast)
  895. bucket += 0x100;
  896. }
  897. set_bucket(dtsec->regs, bucket, true);
  898. /* Create element to be added to the driver hash table */
  899. hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
  900. if (!hash_entry)
  901. return -ENOMEM;
  902. hash_entry->addr = addr;
  903. INIT_LIST_HEAD(&hash_entry->node);
  904. if (addr & MAC_GROUP_ADDRESS)
  905. /* Group Address */
  906. list_add_tail(&hash_entry->node,
  907. &dtsec->multicast_addr_hash->lsts[bucket]);
  908. else
  909. list_add_tail(&hash_entry->node,
  910. &dtsec->unicast_addr_hash->lsts[bucket]);
  911. return 0;
  912. }
  913. int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
  914. {
  915. struct dtsec_regs __iomem *regs = dtsec->regs;
  916. struct list_head *pos;
  917. struct eth_hash_entry *hash_entry = NULL;
  918. u64 addr;
  919. s32 bucket;
  920. u32 crc = 0xFFFFFFFF;
  921. bool mcast, ghtx;
  922. if (!is_init_done(dtsec->dtsec_drv_param))
  923. return -EINVAL;
  924. addr = ENET_ADDR_TO_UINT64(*eth_addr);
  925. ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
  926. mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
  927. /* Cannot handle unicast mac addr when GHTX is on */
  928. if (ghtx && !mcast) {
  929. pr_err("Could not compute hash bucket\n");
  930. return -EINVAL;
  931. }
  932. crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
  933. crc = bitrev32(crc);
  934. if (ghtx) {
  935. bucket = (s32)((crc >> 23) & 0x1ff);
  936. } else {
  937. bucket = (s32)((crc >> 24) & 0xff);
  938. /* if !ghtx and mcast the bit must be set
  939. * in gaddr instead of igaddr.
  940. */
  941. if (mcast)
  942. bucket += 0x100;
  943. }
  944. if (addr & MAC_GROUP_ADDRESS) {
  945. /* Group Address */
  946. list_for_each(pos,
  947. &dtsec->multicast_addr_hash->lsts[bucket]) {
  948. hash_entry = ETH_HASH_ENTRY_OBJ(pos);
  949. if (hash_entry->addr == addr) {
  950. list_del_init(&hash_entry->node);
  951. kfree(hash_entry);
  952. break;
  953. }
  954. }
  955. if (list_empty(&dtsec->multicast_addr_hash->lsts[bucket]))
  956. set_bucket(dtsec->regs, bucket, false);
  957. } else {
  958. /* Individual Address */
  959. list_for_each(pos,
  960. &dtsec->unicast_addr_hash->lsts[bucket]) {
  961. hash_entry = ETH_HASH_ENTRY_OBJ(pos);
  962. if (hash_entry->addr == addr) {
  963. list_del_init(&hash_entry->node);
  964. kfree(hash_entry);
  965. break;
  966. }
  967. }
  968. if (list_empty(&dtsec->unicast_addr_hash->lsts[bucket]))
  969. set_bucket(dtsec->regs, bucket, false);
  970. }
  971. /* address does not exist */
  972. WARN_ON(!hash_entry);
  973. return 0;
  974. }
  975. int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
  976. {
  977. struct dtsec_regs __iomem *regs = dtsec->regs;
  978. u32 tmp;
  979. if (!is_init_done(dtsec->dtsec_drv_param))
  980. return -EINVAL;
  981. /* Set unicast promiscuous */
  982. tmp = ioread32be(&regs->rctrl);
  983. if (new_val)
  984. tmp |= RCTRL_UPROM;
  985. else
  986. tmp &= ~RCTRL_UPROM;
  987. iowrite32be(tmp, &regs->rctrl);
  988. /* Set multicast promiscuous */
  989. tmp = ioread32be(&regs->rctrl);
  990. if (new_val)
  991. tmp |= RCTRL_MPROM;
  992. else
  993. tmp &= ~RCTRL_MPROM;
  994. iowrite32be(tmp, &regs->rctrl);
  995. return 0;
  996. }
  997. int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
  998. {
  999. struct dtsec_regs __iomem *regs = dtsec->regs;
  1000. u32 tmp;
  1001. if (!is_init_done(dtsec->dtsec_drv_param))
  1002. return -EINVAL;
  1003. tmp = ioread32be(&regs->maccfg2);
  1004. /* Full Duplex */
  1005. tmp |= MACCFG2_FULL_DUPLEX;
  1006. tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE);
  1007. if (speed < SPEED_1000)
  1008. tmp |= MACCFG2_NIBBLE_MODE;
  1009. else if (speed == SPEED_1000)
  1010. tmp |= MACCFG2_BYTE_MODE;
  1011. iowrite32be(tmp, &regs->maccfg2);
  1012. tmp = ioread32be(&regs->ecntrl);
  1013. if (speed == SPEED_100)
  1014. tmp |= DTSEC_ECNTRL_R100M;
  1015. else
  1016. tmp &= ~DTSEC_ECNTRL_R100M;
  1017. iowrite32be(tmp, &regs->ecntrl);
  1018. return 0;
  1019. }
  1020. int dtsec_restart_autoneg(struct fman_mac *dtsec)
  1021. {
  1022. u16 tmp_reg16;
  1023. if (!is_init_done(dtsec->dtsec_drv_param))
  1024. return -EINVAL;
  1025. tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR);
  1026. tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
  1027. tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART |
  1028. BMCR_FULLDPLX | BMCR_SPEED1000);
  1029. phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
  1030. return 0;
  1031. }
  1032. int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version)
  1033. {
  1034. struct dtsec_regs __iomem *regs = dtsec->regs;
  1035. if (!is_init_done(dtsec->dtsec_drv_param))
  1036. return -EINVAL;
  1037. *mac_version = ioread32be(&regs->tsec_id);
  1038. return 0;
  1039. }
  1040. int dtsec_set_exception(struct fman_mac *dtsec,
  1041. enum fman_mac_exceptions exception, bool enable)
  1042. {
  1043. struct dtsec_regs __iomem *regs = dtsec->regs;
  1044. u32 bit_mask = 0;
  1045. if (!is_init_done(dtsec->dtsec_drv_param))
  1046. return -EINVAL;
  1047. if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) {
  1048. bit_mask = get_exception_flag(exception);
  1049. if (bit_mask) {
  1050. if (enable)
  1051. dtsec->exceptions |= bit_mask;
  1052. else
  1053. dtsec->exceptions &= ~bit_mask;
  1054. } else {
  1055. pr_err("Undefined exception\n");
  1056. return -EINVAL;
  1057. }
  1058. if (enable)
  1059. iowrite32be(ioread32be(&regs->imask) | bit_mask,
  1060. &regs->imask);
  1061. else
  1062. iowrite32be(ioread32be(&regs->imask) & ~bit_mask,
  1063. &regs->imask);
  1064. } else {
  1065. if (!dtsec->ptp_tsu_enabled) {
  1066. pr_err("Exception valid for 1588 only\n");
  1067. return -EINVAL;
  1068. }
  1069. switch (exception) {
  1070. case FM_MAC_EX_1G_1588_TS_RX_ERR:
  1071. if (enable) {
  1072. dtsec->en_tsu_err_exeption = true;
  1073. iowrite32be(ioread32be(&regs->tmr_pemask) |
  1074. TMR_PEMASK_TSREEN,
  1075. &regs->tmr_pemask);
  1076. } else {
  1077. dtsec->en_tsu_err_exeption = false;
  1078. iowrite32be(ioread32be(&regs->tmr_pemask) &
  1079. ~TMR_PEMASK_TSREEN,
  1080. &regs->tmr_pemask);
  1081. }
  1082. break;
  1083. default:
  1084. pr_err("Undefined exception\n");
  1085. return -EINVAL;
  1086. }
  1087. }
  1088. return 0;
  1089. }
  1090. int dtsec_init(struct fman_mac *dtsec)
  1091. {
  1092. struct dtsec_regs __iomem *regs = dtsec->regs;
  1093. struct dtsec_cfg *dtsec_drv_param;
  1094. int err;
  1095. u16 max_frm_ln;
  1096. enet_addr_t eth_addr;
  1097. if (is_init_done(dtsec->dtsec_drv_param))
  1098. return -EINVAL;
  1099. if (DEFAULT_RESET_ON_INIT &&
  1100. (fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) {
  1101. pr_err("Can't reset MAC!\n");
  1102. return -EINVAL;
  1103. }
  1104. err = check_init_parameters(dtsec);
  1105. if (err)
  1106. return err;
  1107. dtsec_drv_param = dtsec->dtsec_drv_param;
  1108. MAKE_ENET_ADDR_FROM_UINT64(dtsec->addr, eth_addr);
  1109. err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
  1110. dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions,
  1111. dtsec->tbiphy->mdio.addr);
  1112. if (err) {
  1113. free_init_resources(dtsec);
  1114. pr_err("DTSEC version doesn't support this i/f mode\n");
  1115. return err;
  1116. }
  1117. if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) {
  1118. u16 tmp_reg16;
  1119. /* Configure the TBI PHY Control Register */
  1120. tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
  1121. phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
  1122. tmp_reg16 = TBICON_CLK_SELECT;
  1123. phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
  1124. tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE |
  1125. BMCR_FULLDPLX | BMCR_SPEED1000);
  1126. phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
  1127. if (dtsec->basex_if)
  1128. tmp_reg16 = TBIANA_1000X;
  1129. else
  1130. tmp_reg16 = TBIANA_SGMII;
  1131. phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16);
  1132. tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART |
  1133. BMCR_FULLDPLX | BMCR_SPEED1000);
  1134. phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
  1135. }
  1136. /* Max Frame Length */
  1137. max_frm_ln = (u16)ioread32be(&regs->maxfrm);
  1138. err = fman_set_mac_max_frame(dtsec->fm, dtsec->mac_id, max_frm_ln);
  1139. if (err) {
  1140. pr_err("Setting max frame length failed\n");
  1141. free_init_resources(dtsec);
  1142. return -EINVAL;
  1143. }
  1144. dtsec->multicast_addr_hash =
  1145. alloc_hash_table(EXTENDED_HASH_TABLE_SIZE);
  1146. if (!dtsec->multicast_addr_hash) {
  1147. free_init_resources(dtsec);
  1148. pr_err("MC hash table is failed\n");
  1149. return -ENOMEM;
  1150. }
  1151. dtsec->unicast_addr_hash = alloc_hash_table(DTSEC_HASH_TABLE_SIZE);
  1152. if (!dtsec->unicast_addr_hash) {
  1153. free_init_resources(dtsec);
  1154. pr_err("UC hash table is failed\n");
  1155. return -ENOMEM;
  1156. }
  1157. /* register err intr handler for dtsec to FPM (err) */
  1158. fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
  1159. FMAN_INTR_TYPE_ERR, dtsec_isr, dtsec);
  1160. /* register 1588 intr handler for TMR to FPM (normal) */
  1161. fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
  1162. FMAN_INTR_TYPE_NORMAL, dtsec_1588_isr, dtsec);
  1163. kfree(dtsec_drv_param);
  1164. dtsec->dtsec_drv_param = NULL;
  1165. return 0;
  1166. }
  1167. int dtsec_free(struct fman_mac *dtsec)
  1168. {
  1169. free_init_resources(dtsec);
  1170. kfree(dtsec->dtsec_drv_param);
  1171. dtsec->dtsec_drv_param = NULL;
  1172. kfree(dtsec);
  1173. return 0;
  1174. }
  1175. struct fman_mac *dtsec_config(struct fman_mac_params *params)
  1176. {
  1177. struct fman_mac *dtsec;
  1178. struct dtsec_cfg *dtsec_drv_param;
  1179. void __iomem *base_addr;
  1180. base_addr = params->base_addr;
  1181. /* allocate memory for the UCC GETH data structure. */
  1182. dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL);
  1183. if (!dtsec)
  1184. return NULL;
  1185. /* allocate memory for the d_tsec driver parameters data structure. */
  1186. dtsec_drv_param = kzalloc(sizeof(*dtsec_drv_param), GFP_KERNEL);
  1187. if (!dtsec_drv_param)
  1188. goto err_dtsec;
  1189. /* Plant parameter structure pointer */
  1190. dtsec->dtsec_drv_param = dtsec_drv_param;
  1191. set_dflts(dtsec_drv_param);
  1192. dtsec->regs = base_addr;
  1193. dtsec->addr = ENET_ADDR_TO_UINT64(params->addr);
  1194. dtsec->max_speed = params->max_speed;
  1195. dtsec->phy_if = params->phy_if;
  1196. dtsec->mac_id = params->mac_id;
  1197. dtsec->exceptions = (DTSEC_IMASK_BREN |
  1198. DTSEC_IMASK_RXCEN |
  1199. DTSEC_IMASK_BTEN |
  1200. DTSEC_IMASK_TXCEN |
  1201. DTSEC_IMASK_TXEEN |
  1202. DTSEC_IMASK_ABRTEN |
  1203. DTSEC_IMASK_LCEN |
  1204. DTSEC_IMASK_CRLEN |
  1205. DTSEC_IMASK_XFUNEN |
  1206. DTSEC_IMASK_IFERREN |
  1207. DTSEC_IMASK_MAGEN |
  1208. DTSEC_IMASK_TDPEEN |
  1209. DTSEC_IMASK_RDPEEN);
  1210. dtsec->exception_cb = params->exception_cb;
  1211. dtsec->event_cb = params->event_cb;
  1212. dtsec->dev_id = params->dev_id;
  1213. dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
  1214. dtsec->en_tsu_err_exeption = dtsec->dtsec_drv_param->ptp_exception_en;
  1215. dtsec->fm = params->fm;
  1216. dtsec->basex_if = params->basex_if;
  1217. if (!params->internal_phy_node) {
  1218. pr_err("TBI PHY node is not available\n");
  1219. goto err_dtsec_drv_param;
  1220. }
  1221. dtsec->tbiphy = of_phy_find_device(params->internal_phy_node);
  1222. if (!dtsec->tbiphy) {
  1223. pr_err("of_phy_find_device (TBI PHY) failed\n");
  1224. goto err_dtsec_drv_param;
  1225. }
  1226. put_device(&dtsec->tbiphy->mdio.dev);
  1227. /* Save FMan revision */
  1228. fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
  1229. return dtsec;
  1230. err_dtsec_drv_param:
  1231. kfree(dtsec_drv_param);
  1232. err_dtsec:
  1233. kfree(dtsec);
  1234. return NULL;
  1235. }