fman_dtsec.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457
  1. /*
  2. * Copyright 2008-2015 Freescale Semiconductor Inc.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions are met:
  6. * * Redistributions of source code must retain the above copyright
  7. * notice, this list of conditions and the following disclaimer.
  8. * * Redistributions in binary form must reproduce the above copyright
  9. * notice, this list of conditions and the following disclaimer in the
  10. * documentation and/or other materials provided with the distribution.
  11. * * Neither the name of Freescale Semiconductor nor the
  12. * names of its contributors may be used to endorse or promote products
  13. * derived from this software without specific prior written permission.
  14. *
  15. *
  16. * ALTERNATIVELY, this software may be distributed under the terms of the
  17. * GNU General Public License ("GPL") as published by the Free Software
  18. * Foundation, either version 2 of that License or (at your option) any
  19. * later version.
  20. *
  21. * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  22. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  23. * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  24. * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  25. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  26. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  27. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  28. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  29. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  30. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  31. */
  32. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33. #include "fman_dtsec.h"
  34. #include "fman.h"
  35. #include <linux/slab.h>
  36. #include <linux/bitrev.h>
  37. #include <linux/io.h>
  38. #include <linux/delay.h>
  39. #include <linux/phy.h>
  40. #include <linux/crc32.h>
  41. #include <linux/of_mdio.h>
  42. #include <linux/mii.h>
  43. /* TBI register addresses */
  44. #define MII_TBICON 0x11
  45. /* TBICON register bit fields */
  46. #define TBICON_SOFT_RESET 0x8000 /* Soft reset */
  47. #define TBICON_DISABLE_RX_DIS 0x2000 /* Disable receive disparity */
  48. #define TBICON_DISABLE_TX_DIS 0x1000 /* Disable transmit disparity */
  49. #define TBICON_AN_SENSE 0x0100 /* Auto-negotiation sense enable */
  50. #define TBICON_CLK_SELECT 0x0020 /* Clock select */
  51. #define TBICON_MI_MODE 0x0010 /* GMII mode (TBI if not set) */
  52. #define TBIANA_SGMII 0x4001
  53. #define TBIANA_1000X 0x01a0
  54. /* Interrupt Mask Register (IMASK) */
  55. #define DTSEC_IMASK_BREN 0x80000000
  56. #define DTSEC_IMASK_RXCEN 0x40000000
  57. #define DTSEC_IMASK_MSROEN 0x04000000
  58. #define DTSEC_IMASK_GTSCEN 0x02000000
  59. #define DTSEC_IMASK_BTEN 0x01000000
  60. #define DTSEC_IMASK_TXCEN 0x00800000
  61. #define DTSEC_IMASK_TXEEN 0x00400000
  62. #define DTSEC_IMASK_LCEN 0x00040000
  63. #define DTSEC_IMASK_CRLEN 0x00020000
  64. #define DTSEC_IMASK_XFUNEN 0x00010000
  65. #define DTSEC_IMASK_ABRTEN 0x00008000
  66. #define DTSEC_IMASK_IFERREN 0x00004000
  67. #define DTSEC_IMASK_MAGEN 0x00000800
  68. #define DTSEC_IMASK_MMRDEN 0x00000400
  69. #define DTSEC_IMASK_MMWREN 0x00000200
  70. #define DTSEC_IMASK_GRSCEN 0x00000100
  71. #define DTSEC_IMASK_TDPEEN 0x00000002
  72. #define DTSEC_IMASK_RDPEEN 0x00000001
  73. #define DTSEC_EVENTS_MASK \
  74. ((u32)(DTSEC_IMASK_BREN | \
  75. DTSEC_IMASK_RXCEN | \
  76. DTSEC_IMASK_BTEN | \
  77. DTSEC_IMASK_TXCEN | \
  78. DTSEC_IMASK_TXEEN | \
  79. DTSEC_IMASK_ABRTEN | \
  80. DTSEC_IMASK_LCEN | \
  81. DTSEC_IMASK_CRLEN | \
  82. DTSEC_IMASK_XFUNEN | \
  83. DTSEC_IMASK_IFERREN | \
  84. DTSEC_IMASK_MAGEN | \
  85. DTSEC_IMASK_TDPEEN | \
  86. DTSEC_IMASK_RDPEEN))
  87. /* dtsec timestamp event bits */
  88. #define TMR_PEMASK_TSREEN 0x00010000
  89. #define TMR_PEVENT_TSRE 0x00010000
  90. /* Group address bit indication */
  91. #define MAC_GROUP_ADDRESS 0x0000010000000000ULL
  92. /* Defaults */
  93. #define DEFAULT_HALFDUP_RETRANSMIT 0xf
  94. #define DEFAULT_HALFDUP_COLL_WINDOW 0x37
  95. #define DEFAULT_TX_PAUSE_TIME 0xf000
  96. #define DEFAULT_RX_PREPEND 0
  97. #define DEFAULT_PREAMBLE_LEN 7
  98. #define DEFAULT_TX_PAUSE_TIME_EXTD 0
  99. #define DEFAULT_NON_BACK_TO_BACK_IPG1 0x40
  100. #define DEFAULT_NON_BACK_TO_BACK_IPG2 0x60
  101. #define DEFAULT_MIN_IFG_ENFORCEMENT 0x50
  102. #define DEFAULT_BACK_TO_BACK_IPG 0x60
  103. #define DEFAULT_MAXIMUM_FRAME 0x600
  104. /* register related defines (bits, field offsets..) */
  105. #define DTSEC_ID2_INT_REDUCED_OFF 0x00010000
  106. #define DTSEC_ECNTRL_GMIIM 0x00000040
  107. #define DTSEC_ECNTRL_TBIM 0x00000020
  108. #define DTSEC_ECNTRL_SGMIIM 0x00000002
  109. #define DTSEC_ECNTRL_RPM 0x00000010
  110. #define DTSEC_ECNTRL_R100M 0x00000008
  111. #define DTSEC_ECNTRL_QSGMIIM 0x00000001
  112. #define DTSEC_TCTRL_GTS 0x00000020
  113. #define RCTRL_PAL_MASK 0x001f0000
  114. #define RCTRL_PAL_SHIFT 16
  115. #define RCTRL_GHTX 0x00000400
  116. #define RCTRL_GRS 0x00000020
  117. #define RCTRL_MPROM 0x00000008
  118. #define RCTRL_RSF 0x00000004
  119. #define RCTRL_UPROM 0x00000001
  120. #define MACCFG1_SOFT_RESET 0x80000000
  121. #define MACCFG1_RX_FLOW 0x00000020
  122. #define MACCFG1_TX_FLOW 0x00000010
  123. #define MACCFG1_TX_EN 0x00000001
  124. #define MACCFG1_RX_EN 0x00000004
  125. #define MACCFG2_NIBBLE_MODE 0x00000100
  126. #define MACCFG2_BYTE_MODE 0x00000200
  127. #define MACCFG2_PAD_CRC_EN 0x00000004
  128. #define MACCFG2_FULL_DUPLEX 0x00000001
  129. #define MACCFG2_PREAMBLE_LENGTH_MASK 0x0000f000
  130. #define MACCFG2_PREAMBLE_LENGTH_SHIFT 12
  131. #define IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT 24
  132. #define IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT 16
  133. #define IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT 8
  134. #define IPGIFG_NON_BACK_TO_BACK_IPG_1 0x7F000000
  135. #define IPGIFG_NON_BACK_TO_BACK_IPG_2 0x007F0000
  136. #define IPGIFG_MIN_IFG_ENFORCEMENT 0x0000FF00
  137. #define IPGIFG_BACK_TO_BACK_IPG 0x0000007F
  138. #define HAFDUP_EXCESS_DEFER 0x00010000
  139. #define HAFDUP_COLLISION_WINDOW 0x000003ff
  140. #define HAFDUP_RETRANSMISSION_MAX_SHIFT 12
  141. #define HAFDUP_RETRANSMISSION_MAX 0x0000f000
  142. #define NUM_OF_HASH_REGS 8 /* Number of hash table registers */
  143. #define PTV_PTE_MASK 0xffff0000
  144. #define PTV_PT_MASK 0x0000ffff
  145. #define PTV_PTE_SHIFT 16
  146. #define MAX_PACKET_ALIGNMENT 31
  147. #define MAX_INTER_PACKET_GAP 0x7f
  148. #define MAX_RETRANSMISSION 0x0f
  149. #define MAX_COLLISION_WINDOW 0x03ff
  150. /* Hash table size (32 bits*8 regs) */
  151. #define DTSEC_HASH_TABLE_SIZE 256
  152. /* Extended Hash table size (32 bits*16 regs) */
  153. #define EXTENDED_HASH_TABLE_SIZE 512
  154. /* dTSEC Memory Map registers */
  155. struct dtsec_regs {
  156. /* dTSEC General Control and Status Registers */
  157. u32 tsec_id; /* 0x000 ETSEC_ID register */
  158. u32 tsec_id2; /* 0x004 ETSEC_ID2 register */
  159. u32 ievent; /* 0x008 Interrupt event register */
  160. u32 imask; /* 0x00C Interrupt mask register */
  161. u32 reserved0010[1];
  162. u32 ecntrl; /* 0x014 E control register */
  163. u32 ptv; /* 0x018 Pause time value register */
  164. u32 tbipa; /* 0x01C TBI PHY address register */
  165. u32 tmr_ctrl; /* 0x020 Time-stamp Control register */
  166. u32 tmr_pevent; /* 0x024 Time-stamp event register */
  167. u32 tmr_pemask; /* 0x028 Timer event mask register */
  168. u32 reserved002c[5];
  169. u32 tctrl; /* 0x040 Transmit control register */
  170. u32 reserved0044[3];
  171. u32 rctrl; /* 0x050 Receive control register */
  172. u32 reserved0054[11];
  173. u32 igaddr[8]; /* 0x080-0x09C Individual/group address */
  174. u32 gaddr[8]; /* 0x0A0-0x0BC Group address registers 0-7 */
  175. u32 reserved00c0[16];
  176. u32 maccfg1; /* 0x100 MAC configuration #1 */
  177. u32 maccfg2; /* 0x104 MAC configuration #2 */
  178. u32 ipgifg; /* 0x108 IPG/IFG */
  179. u32 hafdup; /* 0x10C Half-duplex */
  180. u32 maxfrm; /* 0x110 Maximum frame */
  181. u32 reserved0114[10];
  182. u32 ifstat; /* 0x13C Interface status */
  183. u32 macstnaddr1; /* 0x140 Station Address,part 1 */
  184. u32 macstnaddr2; /* 0x144 Station Address,part 2 */
  185. struct {
  186. u32 exact_match1; /* octets 1-4 */
  187. u32 exact_match2; /* octets 5-6 */
  188. } macaddr[15]; /* 0x148-0x1BC mac exact match addresses 1-15 */
  189. u32 reserved01c0[16];
  190. u32 tr64; /* 0x200 Tx and Rx 64 byte frame counter */
  191. u32 tr127; /* 0x204 Tx and Rx 65 to 127 byte frame counter */
  192. u32 tr255; /* 0x208 Tx and Rx 128 to 255 byte frame counter */
  193. u32 tr511; /* 0x20C Tx and Rx 256 to 511 byte frame counter */
  194. u32 tr1k; /* 0x210 Tx and Rx 512 to 1023 byte frame counter */
  195. u32 trmax; /* 0x214 Tx and Rx 1024 to 1518 byte frame counter */
  196. u32 trmgv;
  197. /* 0x218 Tx and Rx 1519 to 1522 byte good VLAN frame count */
  198. u32 rbyt; /* 0x21C receive byte counter */
  199. u32 rpkt; /* 0x220 receive packet counter */
  200. u32 rfcs; /* 0x224 receive FCS error counter */
  201. u32 rmca; /* 0x228 RMCA Rx multicast packet counter */
  202. u32 rbca; /* 0x22C Rx broadcast packet counter */
  203. u32 rxcf; /* 0x230 Rx control frame packet counter */
  204. u32 rxpf; /* 0x234 Rx pause frame packet counter */
  205. u32 rxuo; /* 0x238 Rx unknown OP code counter */
  206. u32 raln; /* 0x23C Rx alignment error counter */
  207. u32 rflr; /* 0x240 Rx frame length error counter */
  208. u32 rcde; /* 0x244 Rx code error counter */
  209. u32 rcse; /* 0x248 Rx carrier sense error counter */
  210. u32 rund; /* 0x24C Rx undersize packet counter */
  211. u32 rovr; /* 0x250 Rx oversize packet counter */
  212. u32 rfrg; /* 0x254 Rx fragments counter */
  213. u32 rjbr; /* 0x258 Rx jabber counter */
  214. u32 rdrp; /* 0x25C Rx drop */
  215. u32 tbyt; /* 0x260 Tx byte counter */
  216. u32 tpkt; /* 0x264 Tx packet counter */
  217. u32 tmca; /* 0x268 Tx multicast packet counter */
  218. u32 tbca; /* 0x26C Tx broadcast packet counter */
  219. u32 txpf; /* 0x270 Tx pause control frame counter */
  220. u32 tdfr; /* 0x274 Tx deferral packet counter */
  221. u32 tedf; /* 0x278 Tx excessive deferral packet counter */
  222. u32 tscl; /* 0x27C Tx single collision packet counter */
  223. u32 tmcl; /* 0x280 Tx multiple collision packet counter */
  224. u32 tlcl; /* 0x284 Tx late collision packet counter */
  225. u32 txcl; /* 0x288 Tx excessive collision packet counter */
  226. u32 tncl; /* 0x28C Tx total collision counter */
  227. u32 reserved0290[1];
  228. u32 tdrp; /* 0x294 Tx drop frame counter */
  229. u32 tjbr; /* 0x298 Tx jabber frame counter */
  230. u32 tfcs; /* 0x29C Tx FCS error counter */
  231. u32 txcf; /* 0x2A0 Tx control frame counter */
  232. u32 tovr; /* 0x2A4 Tx oversize frame counter */
  233. u32 tund; /* 0x2A8 Tx undersize frame counter */
  234. u32 tfrg; /* 0x2AC Tx fragments frame counter */
  235. u32 car1; /* 0x2B0 carry register one register* */
  236. u32 car2; /* 0x2B4 carry register two register* */
  237. u32 cam1; /* 0x2B8 carry register one mask register */
  238. u32 cam2; /* 0x2BC carry register two mask register */
  239. u32 reserved02c0[848];
  240. };
  241. /* struct dtsec_cfg - dTSEC configuration
  242. * Transmit half-duplex flow control, under software control for 10/100-Mbps
  243. * half-duplex media. If set, back pressure is applied to media by raising
  244. * carrier.
  245. * halfdup_retransmit:
  246. * Number of retransmission attempts following a collision.
  247. * If this is exceeded dTSEC aborts transmission due to excessive collisions.
  248. * The standard specifies the attempt limit to be 15.
  249. * halfdup_coll_window:
  250. * The number of bytes of the frame during which collisions may occur.
  251. * The default value of 55 corresponds to the frame byte at the end of the
  252. * standard 512-bit slot time window. If collisions are detected after this
  253. * byte, the late collision event is asserted and transmission of current
  254. * frame is aborted.
  255. * tx_pad_crc:
  256. * Pad and append CRC. If set, the MAC pads all ransmitted short frames and
  257. * appends a CRC to every frame regardless of padding requirement.
  258. * tx_pause_time:
  259. * Transmit pause time value. This pause value is used as part of the pause
  260. * frame to be sent when a transmit pause frame is initiated.
  261. * If set to 0 this disables transmission of pause frames.
  262. * preamble_len:
  263. * Length, in bytes, of the preamble field preceding each Ethernet
  264. * start-of-frame delimiter byte. The default value of 0x7 should be used in
  265. * order to guarantee reliable operation with IEEE 802.3 compliant hardware.
  266. * rx_prepend:
  267. * Packet alignment padding length. The specified number of bytes (1-31)
  268. * of zero padding are inserted before the start of each received frame.
  269. * For Ethernet, where optional preamble extraction is enabled, the padding
  270. * appears before the preamble, otherwise the padding precedes the
  271. * layer 2 header.
  272. *
  273. * This structure contains basic dTSEC configuration and must be passed to
  274. * init() function. A default set of configuration values can be
  275. * obtained by calling set_dflts().
  276. */
  277. struct dtsec_cfg {
  278. u16 halfdup_retransmit;
  279. u16 halfdup_coll_window;
  280. bool tx_pad_crc;
  281. u16 tx_pause_time;
  282. bool ptp_tsu_en;
  283. bool ptp_exception_en;
  284. u32 preamble_len;
  285. u32 rx_prepend;
  286. u16 tx_pause_time_extd;
  287. u16 maximum_frame;
  288. u32 non_back_to_back_ipg1;
  289. u32 non_back_to_back_ipg2;
  290. u32 min_ifg_enforcement;
  291. u32 back_to_back_ipg;
  292. };
  293. struct fman_mac {
  294. /* pointer to dTSEC memory mapped registers */
  295. struct dtsec_regs __iomem *regs;
  296. /* MAC address of device */
  297. u64 addr;
  298. /* Ethernet physical interface */
  299. phy_interface_t phy_if;
  300. u16 max_speed;
  301. void *dev_id; /* device cookie used by the exception cbs */
  302. fman_mac_exception_cb *exception_cb;
  303. fman_mac_exception_cb *event_cb;
  304. /* Number of individual addresses in registers for this station */
  305. u8 num_of_ind_addr_in_regs;
  306. /* pointer to driver's global address hash table */
  307. struct eth_hash_t *multicast_addr_hash;
  308. /* pointer to driver's individual address hash table */
  309. struct eth_hash_t *unicast_addr_hash;
  310. u8 mac_id;
  311. u32 exceptions;
  312. bool ptp_tsu_enabled;
  313. bool en_tsu_err_exception;
  314. struct dtsec_cfg *dtsec_drv_param;
  315. void *fm;
  316. struct fman_rev_info fm_rev_info;
  317. bool basex_if;
  318. struct phy_device *tbiphy;
  319. };
  320. static void set_dflts(struct dtsec_cfg *cfg)
  321. {
  322. cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT;
  323. cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW;
  324. cfg->tx_pad_crc = true;
  325. cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME;
  326. /* PHY address 0 is reserved (DPAA RM) */
  327. cfg->rx_prepend = DEFAULT_RX_PREPEND;
  328. cfg->ptp_tsu_en = true;
  329. cfg->ptp_exception_en = true;
  330. cfg->preamble_len = DEFAULT_PREAMBLE_LEN;
  331. cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD;
  332. cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1;
  333. cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2;
  334. cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT;
  335. cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG;
  336. cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
  337. }
  338. static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
  339. phy_interface_t iface, u16 iface_speed, u8 *macaddr,
  340. u32 exception_mask, u8 tbi_addr)
  341. {
  342. bool is_rgmii, is_sgmii, is_qsgmii;
  343. int i;
  344. u32 tmp;
  345. /* Soft reset */
  346. iowrite32be(MACCFG1_SOFT_RESET, &regs->maccfg1);
  347. iowrite32be(0, &regs->maccfg1);
  348. /* dtsec_id2 */
  349. tmp = ioread32be(&regs->tsec_id2);
  350. /* check RGMII support */
  351. if (iface == PHY_INTERFACE_MODE_RGMII ||
  352. iface == PHY_INTERFACE_MODE_RGMII_ID ||
  353. iface == PHY_INTERFACE_MODE_RGMII_RXID ||
  354. iface == PHY_INTERFACE_MODE_RGMII_TXID ||
  355. iface == PHY_INTERFACE_MODE_RMII)
  356. if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
  357. return -EINVAL;
  358. if (iface == PHY_INTERFACE_MODE_SGMII ||
  359. iface == PHY_INTERFACE_MODE_MII)
  360. if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
  361. return -EINVAL;
  362. is_rgmii = iface == PHY_INTERFACE_MODE_RGMII ||
  363. iface == PHY_INTERFACE_MODE_RGMII_ID ||
  364. iface == PHY_INTERFACE_MODE_RGMII_RXID ||
  365. iface == PHY_INTERFACE_MODE_RGMII_TXID;
  366. is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
  367. is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
  368. tmp = 0;
  369. if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII)
  370. tmp |= DTSEC_ECNTRL_GMIIM;
  371. if (is_sgmii)
  372. tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM);
  373. if (is_qsgmii)
  374. tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM |
  375. DTSEC_ECNTRL_QSGMIIM);
  376. if (is_rgmii)
  377. tmp |= DTSEC_ECNTRL_RPM;
  378. if (iface_speed == SPEED_100)
  379. tmp |= DTSEC_ECNTRL_R100M;
  380. iowrite32be(tmp, &regs->ecntrl);
  381. tmp = 0;
  382. if (cfg->tx_pause_time)
  383. tmp |= cfg->tx_pause_time;
  384. if (cfg->tx_pause_time_extd)
  385. tmp |= cfg->tx_pause_time_extd << PTV_PTE_SHIFT;
  386. iowrite32be(tmp, &regs->ptv);
  387. tmp = 0;
  388. tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK;
  389. /* Accept short frames */
  390. tmp |= RCTRL_RSF;
  391. iowrite32be(tmp, &regs->rctrl);
  392. /* Assign a Phy Address to the TBI (TBIPA).
  393. * Done also in cases where TBI is not selected to avoid conflict with
  394. * the external PHY's Physical address
  395. */
  396. iowrite32be(tbi_addr, &regs->tbipa);
  397. iowrite32be(0, &regs->tmr_ctrl);
  398. if (cfg->ptp_tsu_en) {
  399. tmp = 0;
  400. tmp |= TMR_PEVENT_TSRE;
  401. iowrite32be(tmp, &regs->tmr_pevent);
  402. if (cfg->ptp_exception_en) {
  403. tmp = 0;
  404. tmp |= TMR_PEMASK_TSREEN;
  405. iowrite32be(tmp, &regs->tmr_pemask);
  406. }
  407. }
  408. tmp = 0;
  409. tmp |= MACCFG1_RX_FLOW;
  410. tmp |= MACCFG1_TX_FLOW;
  411. iowrite32be(tmp, &regs->maccfg1);
  412. tmp = 0;
  413. if (iface_speed < SPEED_1000)
  414. tmp |= MACCFG2_NIBBLE_MODE;
  415. else if (iface_speed == SPEED_1000)
  416. tmp |= MACCFG2_BYTE_MODE;
  417. tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
  418. MACCFG2_PREAMBLE_LENGTH_MASK;
  419. if (cfg->tx_pad_crc)
  420. tmp |= MACCFG2_PAD_CRC_EN;
  421. /* Full Duplex */
  422. tmp |= MACCFG2_FULL_DUPLEX;
  423. iowrite32be(tmp, &regs->maccfg2);
  424. tmp = (((cfg->non_back_to_back_ipg1 <<
  425. IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT)
  426. & IPGIFG_NON_BACK_TO_BACK_IPG_1)
  427. | ((cfg->non_back_to_back_ipg2 <<
  428. IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT)
  429. & IPGIFG_NON_BACK_TO_BACK_IPG_2)
  430. | ((cfg->min_ifg_enforcement << IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT)
  431. & IPGIFG_MIN_IFG_ENFORCEMENT)
  432. | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG));
  433. iowrite32be(tmp, &regs->ipgifg);
  434. tmp = 0;
  435. tmp |= HAFDUP_EXCESS_DEFER;
  436. tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT)
  437. & HAFDUP_RETRANSMISSION_MAX);
  438. tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW);
  439. iowrite32be(tmp, &regs->hafdup);
  440. /* Initialize Maximum frame length */
  441. iowrite32be(cfg->maximum_frame, &regs->maxfrm);
  442. iowrite32be(0xffffffff, &regs->cam1);
  443. iowrite32be(0xffffffff, &regs->cam2);
  444. iowrite32be(exception_mask, &regs->imask);
  445. iowrite32be(0xffffffff, &regs->ievent);
  446. tmp = (u32)((macaddr[5] << 24) |
  447. (macaddr[4] << 16) | (macaddr[3] << 8) | macaddr[2]);
  448. iowrite32be(tmp, &regs->macstnaddr1);
  449. tmp = (u32)((macaddr[1] << 24) | (macaddr[0] << 16));
  450. iowrite32be(tmp, &regs->macstnaddr2);
  451. /* HASH */
  452. for (i = 0; i < NUM_OF_HASH_REGS; i++) {
  453. /* Initialize IADDRx */
  454. iowrite32be(0, &regs->igaddr[i]);
  455. /* Initialize GADDRx */
  456. iowrite32be(0, &regs->gaddr[i]);
  457. }
  458. return 0;
  459. }
  460. static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
  461. {
  462. u32 tmp;
  463. tmp = (u32)((adr[5] << 24) |
  464. (adr[4] << 16) | (adr[3] << 8) | adr[2]);
  465. iowrite32be(tmp, &regs->macstnaddr1);
  466. tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
  467. iowrite32be(tmp, &regs->macstnaddr2);
  468. }
  469. static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
  470. bool enable)
  471. {
  472. int reg_idx = (bucket >> 5) & 0xf;
  473. int bit_idx = bucket & 0x1f;
  474. u32 bit_mask = 0x80000000 >> bit_idx;
  475. u32 __iomem *reg;
  476. if (reg_idx > 7)
  477. reg = &regs->gaddr[reg_idx - 8];
  478. else
  479. reg = &regs->igaddr[reg_idx];
  480. if (enable)
  481. iowrite32be(ioread32be(reg) | bit_mask, reg);
  482. else
  483. iowrite32be(ioread32be(reg) & (~bit_mask), reg);
  484. }
  485. static int check_init_parameters(struct fman_mac *dtsec)
  486. {
  487. if (dtsec->max_speed >= SPEED_10000) {
  488. pr_err("1G MAC driver supports 1G or lower speeds\n");
  489. return -EINVAL;
  490. }
  491. if (dtsec->addr == 0) {
  492. pr_err("Ethernet MAC Must have a valid MAC Address\n");
  493. return -EINVAL;
  494. }
  495. if ((dtsec->dtsec_drv_param)->rx_prepend >
  496. MAX_PACKET_ALIGNMENT) {
  497. pr_err("packetAlignmentPadding can't be > than %d\n",
  498. MAX_PACKET_ALIGNMENT);
  499. return -EINVAL;
  500. }
  501. if (((dtsec->dtsec_drv_param)->non_back_to_back_ipg1 >
  502. MAX_INTER_PACKET_GAP) ||
  503. ((dtsec->dtsec_drv_param)->non_back_to_back_ipg2 >
  504. MAX_INTER_PACKET_GAP) ||
  505. ((dtsec->dtsec_drv_param)->back_to_back_ipg >
  506. MAX_INTER_PACKET_GAP)) {
  507. pr_err("Inter packet gap can't be greater than %d\n",
  508. MAX_INTER_PACKET_GAP);
  509. return -EINVAL;
  510. }
  511. if ((dtsec->dtsec_drv_param)->halfdup_retransmit >
  512. MAX_RETRANSMISSION) {
  513. pr_err("maxRetransmission can't be greater than %d\n",
  514. MAX_RETRANSMISSION);
  515. return -EINVAL;
  516. }
  517. if ((dtsec->dtsec_drv_param)->halfdup_coll_window >
  518. MAX_COLLISION_WINDOW) {
  519. pr_err("collisionWindow can't be greater than %d\n",
  520. MAX_COLLISION_WINDOW);
  521. return -EINVAL;
  522. /* If Auto negotiation process is disabled, need to set up the PHY
  523. * using the MII Management Interface
  524. */
  525. }
  526. if (!dtsec->exception_cb) {
  527. pr_err("uninitialized exception_cb\n");
  528. return -EINVAL;
  529. }
  530. if (!dtsec->event_cb) {
  531. pr_err("uninitialized event_cb\n");
  532. return -EINVAL;
  533. }
  534. return 0;
  535. }
  536. static int get_exception_flag(enum fman_mac_exceptions exception)
  537. {
  538. u32 bit_mask;
  539. switch (exception) {
  540. case FM_MAC_EX_1G_BAB_RX:
  541. bit_mask = DTSEC_IMASK_BREN;
  542. break;
  543. case FM_MAC_EX_1G_RX_CTL:
  544. bit_mask = DTSEC_IMASK_RXCEN;
  545. break;
  546. case FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET:
  547. bit_mask = DTSEC_IMASK_GTSCEN;
  548. break;
  549. case FM_MAC_EX_1G_BAB_TX:
  550. bit_mask = DTSEC_IMASK_BTEN;
  551. break;
  552. case FM_MAC_EX_1G_TX_CTL:
  553. bit_mask = DTSEC_IMASK_TXCEN;
  554. break;
  555. case FM_MAC_EX_1G_TX_ERR:
  556. bit_mask = DTSEC_IMASK_TXEEN;
  557. break;
  558. case FM_MAC_EX_1G_LATE_COL:
  559. bit_mask = DTSEC_IMASK_LCEN;
  560. break;
  561. case FM_MAC_EX_1G_COL_RET_LMT:
  562. bit_mask = DTSEC_IMASK_CRLEN;
  563. break;
  564. case FM_MAC_EX_1G_TX_FIFO_UNDRN:
  565. bit_mask = DTSEC_IMASK_XFUNEN;
  566. break;
  567. case FM_MAC_EX_1G_MAG_PCKT:
  568. bit_mask = DTSEC_IMASK_MAGEN;
  569. break;
  570. case FM_MAC_EX_1G_MII_MNG_RD_COMPLET:
  571. bit_mask = DTSEC_IMASK_MMRDEN;
  572. break;
  573. case FM_MAC_EX_1G_MII_MNG_WR_COMPLET:
  574. bit_mask = DTSEC_IMASK_MMWREN;
  575. break;
  576. case FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET:
  577. bit_mask = DTSEC_IMASK_GRSCEN;
  578. break;
  579. case FM_MAC_EX_1G_DATA_ERR:
  580. bit_mask = DTSEC_IMASK_TDPEEN;
  581. break;
  582. case FM_MAC_EX_1G_RX_MIB_CNT_OVFL:
  583. bit_mask = DTSEC_IMASK_MSROEN;
  584. break;
  585. default:
  586. bit_mask = 0;
  587. break;
  588. }
  589. return bit_mask;
  590. }
  591. static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
  592. {
  593. /* Checks if dTSEC driver parameters were initialized */
  594. if (!dtsec_drv_params)
  595. return true;
  596. return false;
  597. }
  598. static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
  599. {
  600. struct dtsec_regs __iomem *regs = dtsec->regs;
  601. if (is_init_done(dtsec->dtsec_drv_param))
  602. return 0;
  603. return (u16)ioread32be(&regs->maxfrm);
  604. }
  605. static void dtsec_isr(void *handle)
  606. {
  607. struct fman_mac *dtsec = (struct fman_mac *)handle;
  608. struct dtsec_regs __iomem *regs = dtsec->regs;
  609. u32 event;
  610. /* do not handle MDIO events */
  611. event = ioread32be(&regs->ievent) &
  612. (u32)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN));
  613. event &= ioread32be(&regs->imask);
  614. iowrite32be(event, &regs->ievent);
  615. if (event & DTSEC_IMASK_BREN)
  616. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_RX);
  617. if (event & DTSEC_IMASK_RXCEN)
  618. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_RX_CTL);
  619. if (event & DTSEC_IMASK_GTSCEN)
  620. dtsec->exception_cb(dtsec->dev_id,
  621. FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET);
  622. if (event & DTSEC_IMASK_BTEN)
  623. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_TX);
  624. if (event & DTSEC_IMASK_TXCEN)
  625. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_CTL);
  626. if (event & DTSEC_IMASK_TXEEN)
  627. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_ERR);
  628. if (event & DTSEC_IMASK_LCEN)
  629. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_LATE_COL);
  630. if (event & DTSEC_IMASK_CRLEN)
  631. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT);
  632. if (event & DTSEC_IMASK_XFUNEN) {
  633. /* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */
  634. if (dtsec->fm_rev_info.major == 2) {
  635. u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i;
  636. /* a. Write 0x00E0_0C00 to DTSEC_ID
  637. * This is a read only register
  638. * b. Read and save the value of TPKT
  639. */
  640. tpkt1 = ioread32be(&regs->tpkt);
  641. /* c. Read the register at dTSEC address offset 0x32C */
  642. tmp_reg1 = ioread32be(&regs->reserved02c0[27]);
  643. /* d. Compare bits [9:15] to bits [25:31] of the
  644. * register at address offset 0x32C.
  645. */
  646. if ((tmp_reg1 & 0x007F0000) !=
  647. (tmp_reg1 & 0x0000007F)) {
  648. /* If they are not equal, save the value of
  649. * this register and wait for at least
  650. * MAXFRM*16 ns
  651. */
  652. usleep_range((u32)(min
  653. (dtsec_get_max_frame_length(dtsec) *
  654. 16 / 1000, 1)), (u32)
  655. (min(dtsec_get_max_frame_length
  656. (dtsec) * 16 / 1000, 1) + 1));
  657. }
  658. /* e. Read and save TPKT again and read the register
  659. * at dTSEC address offset 0x32C again
  660. */
  661. tpkt2 = ioread32be(&regs->tpkt);
  662. tmp_reg2 = ioread32be(&regs->reserved02c0[27]);
  663. /* f. Compare the value of TPKT saved in step b to
  664. * value read in step e. Also compare bits [9:15] of
  665. * the register at offset 0x32C saved in step d to the
  666. * value of bits [9:15] saved in step e. If the two
  667. * registers values are unchanged, then the transmit
  668. * portion of the dTSEC controller is locked up and
  669. * the user should proceed to the recover sequence.
  670. */
  671. if ((tpkt1 == tpkt2) && ((tmp_reg1 & 0x007F0000) ==
  672. (tmp_reg2 & 0x007F0000))) {
  673. /* recover sequence */
  674. /* a.Write a 1 to RCTRL[GRS] */
  675. iowrite32be(ioread32be(&regs->rctrl) |
  676. RCTRL_GRS, &regs->rctrl);
  677. /* b.Wait until IEVENT[GRSC]=1, or at least
  678. * 100 us has elapsed.
  679. */
  680. for (i = 0; i < 100; i++) {
  681. if (ioread32be(&regs->ievent) &
  682. DTSEC_IMASK_GRSCEN)
  683. break;
  684. udelay(1);
  685. }
  686. if (ioread32be(&regs->ievent) &
  687. DTSEC_IMASK_GRSCEN)
  688. iowrite32be(DTSEC_IMASK_GRSCEN,
  689. &regs->ievent);
  690. else
  691. pr_debug("Rx lockup due to Tx lockup\n");
  692. /* c.Write a 1 to bit n of FM_RSTC
  693. * (offset 0x0CC of FPM)
  694. */
  695. fman_reset_mac(dtsec->fm, dtsec->mac_id);
  696. /* d.Wait 4 Tx clocks (32 ns) */
  697. udelay(1);
  698. /* e.Write a 0 to bit n of FM_RSTC. */
  699. /* cleared by FMAN
  700. */
  701. }
  702. }
  703. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_FIFO_UNDRN);
  704. }
  705. if (event & DTSEC_IMASK_MAGEN)
  706. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_MAG_PCKT);
  707. if (event & DTSEC_IMASK_GRSCEN)
  708. dtsec->exception_cb(dtsec->dev_id,
  709. FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET);
  710. if (event & DTSEC_IMASK_TDPEEN)
  711. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_DATA_ERR);
  712. if (event & DTSEC_IMASK_RDPEEN)
  713. dtsec->exception_cb(dtsec->dev_id, FM_MAC_1G_RX_DATA_ERR);
  714. /* masked interrupts */
  715. WARN_ON(event & DTSEC_IMASK_ABRTEN);
  716. WARN_ON(event & DTSEC_IMASK_IFERREN);
  717. }
  718. static void dtsec_1588_isr(void *handle)
  719. {
  720. struct fman_mac *dtsec = (struct fman_mac *)handle;
  721. struct dtsec_regs __iomem *regs = dtsec->regs;
  722. u32 event;
  723. if (dtsec->ptp_tsu_enabled) {
  724. event = ioread32be(&regs->tmr_pevent);
  725. event &= ioread32be(&regs->tmr_pemask);
  726. if (event) {
  727. iowrite32be(event, &regs->tmr_pevent);
  728. WARN_ON(event & TMR_PEVENT_TSRE);
  729. dtsec->exception_cb(dtsec->dev_id,
  730. FM_MAC_EX_1G_1588_TS_RX_ERR);
  731. }
  732. }
  733. }
  734. static void free_init_resources(struct fman_mac *dtsec)
  735. {
  736. fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
  737. FMAN_INTR_TYPE_ERR);
  738. fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
  739. FMAN_INTR_TYPE_NORMAL);
  740. /* release the driver's group hash table */
  741. free_hash_table(dtsec->multicast_addr_hash);
  742. dtsec->multicast_addr_hash = NULL;
  743. /* release the driver's individual hash table */
  744. free_hash_table(dtsec->unicast_addr_hash);
  745. dtsec->unicast_addr_hash = NULL;
  746. }
  747. int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val)
  748. {
  749. if (is_init_done(dtsec->dtsec_drv_param))
  750. return -EINVAL;
  751. dtsec->dtsec_drv_param->maximum_frame = new_val;
  752. return 0;
  753. }
  754. int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val)
  755. {
  756. if (is_init_done(dtsec->dtsec_drv_param))
  757. return -EINVAL;
  758. dtsec->dtsec_drv_param->tx_pad_crc = new_val;
  759. return 0;
  760. }
  761. int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
  762. {
  763. struct dtsec_regs __iomem *regs = dtsec->regs;
  764. u32 tmp;
  765. if (!is_init_done(dtsec->dtsec_drv_param))
  766. return -EINVAL;
  767. /* Enable */
  768. tmp = ioread32be(&regs->maccfg1);
  769. if (mode & COMM_MODE_RX)
  770. tmp |= MACCFG1_RX_EN;
  771. if (mode & COMM_MODE_TX)
  772. tmp |= MACCFG1_TX_EN;
  773. iowrite32be(tmp, &regs->maccfg1);
  774. /* Graceful start - clear the graceful receive stop bit */
  775. if (mode & COMM_MODE_TX)
  776. iowrite32be(ioread32be(&regs->tctrl) & ~DTSEC_TCTRL_GTS,
  777. &regs->tctrl);
  778. if (mode & COMM_MODE_RX)
  779. iowrite32be(ioread32be(&regs->rctrl) & ~RCTRL_GRS,
  780. &regs->rctrl);
  781. return 0;
  782. }
  783. int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode)
  784. {
  785. struct dtsec_regs __iomem *regs = dtsec->regs;
  786. u32 tmp;
  787. if (!is_init_done(dtsec->dtsec_drv_param))
  788. return -EINVAL;
  789. /* Gracefull stop - Assert the graceful transmit stop bit */
  790. if (mode & COMM_MODE_RX) {
  791. tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
  792. iowrite32be(tmp, &regs->rctrl);
  793. if (dtsec->fm_rev_info.major == 2)
  794. usleep_range(100, 200);
  795. else
  796. udelay(10);
  797. }
  798. if (mode & COMM_MODE_TX) {
  799. if (dtsec->fm_rev_info.major == 2)
  800. pr_debug("GTS not supported due to DTSEC_A004 errata.\n");
  801. else
  802. pr_debug("GTS not supported due to DTSEC_A0014 errata.\n");
  803. }
  804. tmp = ioread32be(&regs->maccfg1);
  805. if (mode & COMM_MODE_RX)
  806. tmp &= ~MACCFG1_RX_EN;
  807. if (mode & COMM_MODE_TX)
  808. tmp &= ~MACCFG1_TX_EN;
  809. iowrite32be(tmp, &regs->maccfg1);
  810. return 0;
  811. }
  812. int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
  813. u8 __maybe_unused priority,
  814. u16 pause_time, u16 __maybe_unused thresh_time)
  815. {
  816. struct dtsec_regs __iomem *regs = dtsec->regs;
  817. u32 ptv = 0;
  818. if (!is_init_done(dtsec->dtsec_drv_param))
  819. return -EINVAL;
  820. if (pause_time) {
  821. /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
  822. if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
  823. pr_warn("pause-time: %d illegal.Should be > 320\n",
  824. pause_time);
  825. return -EINVAL;
  826. }
  827. ptv = ioread32be(&regs->ptv);
  828. ptv &= PTV_PTE_MASK;
  829. ptv |= pause_time & PTV_PT_MASK;
  830. iowrite32be(ptv, &regs->ptv);
  831. /* trigger the transmission of a flow-control pause frame */
  832. iowrite32be(ioread32be(&regs->maccfg1) | MACCFG1_TX_FLOW,
  833. &regs->maccfg1);
  834. } else
  835. iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
  836. &regs->maccfg1);
  837. return 0;
  838. }
  839. int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
  840. {
  841. struct dtsec_regs __iomem *regs = dtsec->regs;
  842. u32 tmp;
  843. if (!is_init_done(dtsec->dtsec_drv_param))
  844. return -EINVAL;
  845. tmp = ioread32be(&regs->maccfg1);
  846. if (en)
  847. tmp |= MACCFG1_RX_FLOW;
  848. else
  849. tmp &= ~MACCFG1_RX_FLOW;
  850. iowrite32be(tmp, &regs->maccfg1);
  851. return 0;
  852. }
  853. int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr)
  854. {
  855. if (!is_init_done(dtsec->dtsec_drv_param))
  856. return -EINVAL;
  857. /* Initialize MAC Station Address registers (1 & 2)
  858. * Station address have to be swapped (big endian to little endian
  859. */
  860. dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
  861. set_mac_address(dtsec->regs, (u8 *)(*enet_addr));
  862. return 0;
  863. }
  864. int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
  865. {
  866. struct dtsec_regs __iomem *regs = dtsec->regs;
  867. struct eth_hash_entry *hash_entry;
  868. u64 addr;
  869. s32 bucket;
  870. u32 crc = 0xFFFFFFFF;
  871. bool mcast, ghtx;
  872. if (!is_init_done(dtsec->dtsec_drv_param))
  873. return -EINVAL;
  874. addr = ENET_ADDR_TO_UINT64(*eth_addr);
  875. ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
  876. mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
  877. /* Cannot handle unicast mac addr when GHTX is on */
  878. if (ghtx && !mcast) {
  879. pr_err("Could not compute hash bucket\n");
  880. return -EINVAL;
  881. }
  882. crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
  883. crc = bitrev32(crc);
  884. /* considering the 9 highest order bits in crc H[8:0]:
  885. *if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register
  886. *and H[5:1] (next 5 bits) identify the hash bit
  887. *if ghts = 1 H[8:5] (highest order 4 bits) identify the hash register
  888. *and H[4:0] (next 5 bits) identify the hash bit.
  889. *
  890. *In bucket index output the low 5 bits identify the hash register
  891. *bit, while the higher 4 bits identify the hash register
  892. */
  893. if (ghtx) {
  894. bucket = (s32)((crc >> 23) & 0x1ff);
  895. } else {
  896. bucket = (s32)((crc >> 24) & 0xff);
  897. /* if !ghtx and mcast the bit must be set in gaddr instead of
  898. *igaddr.
  899. */
  900. if (mcast)
  901. bucket += 0x100;
  902. }
  903. set_bucket(dtsec->regs, bucket, true);
  904. /* Create element to be added to the driver hash table */
  905. hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
  906. if (!hash_entry)
  907. return -ENOMEM;
  908. hash_entry->addr = addr;
  909. INIT_LIST_HEAD(&hash_entry->node);
  910. if (addr & MAC_GROUP_ADDRESS)
  911. /* Group Address */
  912. list_add_tail(&hash_entry->node,
  913. &dtsec->multicast_addr_hash->lsts[bucket]);
  914. else
  915. list_add_tail(&hash_entry->node,
  916. &dtsec->unicast_addr_hash->lsts[bucket]);
  917. return 0;
  918. }
  919. int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
  920. {
  921. struct dtsec_regs __iomem *regs = dtsec->regs;
  922. struct list_head *pos;
  923. struct eth_hash_entry *hash_entry = NULL;
  924. u64 addr;
  925. s32 bucket;
  926. u32 crc = 0xFFFFFFFF;
  927. bool mcast, ghtx;
  928. if (!is_init_done(dtsec->dtsec_drv_param))
  929. return -EINVAL;
  930. addr = ENET_ADDR_TO_UINT64(*eth_addr);
  931. ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
  932. mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
  933. /* Cannot handle unicast mac addr when GHTX is on */
  934. if (ghtx && !mcast) {
  935. pr_err("Could not compute hash bucket\n");
  936. return -EINVAL;
  937. }
  938. crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
  939. crc = bitrev32(crc);
  940. if (ghtx) {
  941. bucket = (s32)((crc >> 23) & 0x1ff);
  942. } else {
  943. bucket = (s32)((crc >> 24) & 0xff);
  944. /* if !ghtx and mcast the bit must be set
  945. * in gaddr instead of igaddr.
  946. */
  947. if (mcast)
  948. bucket += 0x100;
  949. }
  950. if (addr & MAC_GROUP_ADDRESS) {
  951. /* Group Address */
  952. list_for_each(pos,
  953. &dtsec->multicast_addr_hash->lsts[bucket]) {
  954. hash_entry = ETH_HASH_ENTRY_OBJ(pos);
  955. if (hash_entry->addr == addr) {
  956. list_del_init(&hash_entry->node);
  957. kfree(hash_entry);
  958. break;
  959. }
  960. }
  961. if (list_empty(&dtsec->multicast_addr_hash->lsts[bucket]))
  962. set_bucket(dtsec->regs, bucket, false);
  963. } else {
  964. /* Individual Address */
  965. list_for_each(pos,
  966. &dtsec->unicast_addr_hash->lsts[bucket]) {
  967. hash_entry = ETH_HASH_ENTRY_OBJ(pos);
  968. if (hash_entry->addr == addr) {
  969. list_del_init(&hash_entry->node);
  970. kfree(hash_entry);
  971. break;
  972. }
  973. }
  974. if (list_empty(&dtsec->unicast_addr_hash->lsts[bucket]))
  975. set_bucket(dtsec->regs, bucket, false);
  976. }
  977. /* address does not exist */
  978. WARN_ON(!hash_entry);
  979. return 0;
  980. }
  981. int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
  982. {
  983. struct dtsec_regs __iomem *regs = dtsec->regs;
  984. u32 tmp;
  985. if (!is_init_done(dtsec->dtsec_drv_param))
  986. return -EINVAL;
  987. /* Set unicast promiscuous */
  988. tmp = ioread32be(&regs->rctrl);
  989. if (new_val)
  990. tmp |= RCTRL_UPROM;
  991. else
  992. tmp &= ~RCTRL_UPROM;
  993. iowrite32be(tmp, &regs->rctrl);
  994. /* Set multicast promiscuous */
  995. tmp = ioread32be(&regs->rctrl);
  996. if (new_val)
  997. tmp |= RCTRL_MPROM;
  998. else
  999. tmp &= ~RCTRL_MPROM;
  1000. iowrite32be(tmp, &regs->rctrl);
  1001. return 0;
  1002. }
  1003. int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
  1004. {
  1005. struct dtsec_regs __iomem *regs = dtsec->regs;
  1006. u32 tmp;
  1007. if (!is_init_done(dtsec->dtsec_drv_param))
  1008. return -EINVAL;
  1009. tmp = ioread32be(&regs->maccfg2);
  1010. /* Full Duplex */
  1011. tmp |= MACCFG2_FULL_DUPLEX;
  1012. tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE);
  1013. if (speed < SPEED_1000)
  1014. tmp |= MACCFG2_NIBBLE_MODE;
  1015. else if (speed == SPEED_1000)
  1016. tmp |= MACCFG2_BYTE_MODE;
  1017. iowrite32be(tmp, &regs->maccfg2);
  1018. tmp = ioread32be(&regs->ecntrl);
  1019. if (speed == SPEED_100)
  1020. tmp |= DTSEC_ECNTRL_R100M;
  1021. else
  1022. tmp &= ~DTSEC_ECNTRL_R100M;
  1023. iowrite32be(tmp, &regs->ecntrl);
  1024. return 0;
  1025. }
  1026. int dtsec_restart_autoneg(struct fman_mac *dtsec)
  1027. {
  1028. u16 tmp_reg16;
  1029. if (!is_init_done(dtsec->dtsec_drv_param))
  1030. return -EINVAL;
  1031. tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR);
  1032. tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
  1033. tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART |
  1034. BMCR_FULLDPLX | BMCR_SPEED1000);
  1035. phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
  1036. return 0;
  1037. }
  1038. int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version)
  1039. {
  1040. struct dtsec_regs __iomem *regs = dtsec->regs;
  1041. if (!is_init_done(dtsec->dtsec_drv_param))
  1042. return -EINVAL;
  1043. *mac_version = ioread32be(&regs->tsec_id);
  1044. return 0;
  1045. }
  1046. int dtsec_set_exception(struct fman_mac *dtsec,
  1047. enum fman_mac_exceptions exception, bool enable)
  1048. {
  1049. struct dtsec_regs __iomem *regs = dtsec->regs;
  1050. u32 bit_mask = 0;
  1051. if (!is_init_done(dtsec->dtsec_drv_param))
  1052. return -EINVAL;
  1053. if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) {
  1054. bit_mask = get_exception_flag(exception);
  1055. if (bit_mask) {
  1056. if (enable)
  1057. dtsec->exceptions |= bit_mask;
  1058. else
  1059. dtsec->exceptions &= ~bit_mask;
  1060. } else {
  1061. pr_err("Undefined exception\n");
  1062. return -EINVAL;
  1063. }
  1064. if (enable)
  1065. iowrite32be(ioread32be(&regs->imask) | bit_mask,
  1066. &regs->imask);
  1067. else
  1068. iowrite32be(ioread32be(&regs->imask) & ~bit_mask,
  1069. &regs->imask);
  1070. } else {
  1071. if (!dtsec->ptp_tsu_enabled) {
  1072. pr_err("Exception valid for 1588 only\n");
  1073. return -EINVAL;
  1074. }
  1075. switch (exception) {
  1076. case FM_MAC_EX_1G_1588_TS_RX_ERR:
  1077. if (enable) {
  1078. dtsec->en_tsu_err_exception = true;
  1079. iowrite32be(ioread32be(&regs->tmr_pemask) |
  1080. TMR_PEMASK_TSREEN,
  1081. &regs->tmr_pemask);
  1082. } else {
  1083. dtsec->en_tsu_err_exception = false;
  1084. iowrite32be(ioread32be(&regs->tmr_pemask) &
  1085. ~TMR_PEMASK_TSREEN,
  1086. &regs->tmr_pemask);
  1087. }
  1088. break;
  1089. default:
  1090. pr_err("Undefined exception\n");
  1091. return -EINVAL;
  1092. }
  1093. }
  1094. return 0;
  1095. }
  1096. int dtsec_init(struct fman_mac *dtsec)
  1097. {
  1098. struct dtsec_regs __iomem *regs = dtsec->regs;
  1099. struct dtsec_cfg *dtsec_drv_param;
  1100. int err;
  1101. u16 max_frm_ln;
  1102. enet_addr_t eth_addr;
  1103. if (is_init_done(dtsec->dtsec_drv_param))
  1104. return -EINVAL;
  1105. if (DEFAULT_RESET_ON_INIT &&
  1106. (fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) {
  1107. pr_err("Can't reset MAC!\n");
  1108. return -EINVAL;
  1109. }
  1110. err = check_init_parameters(dtsec);
  1111. if (err)
  1112. return err;
  1113. dtsec_drv_param = dtsec->dtsec_drv_param;
  1114. MAKE_ENET_ADDR_FROM_UINT64(dtsec->addr, eth_addr);
  1115. err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
  1116. dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions,
  1117. dtsec->tbiphy->mdio.addr);
  1118. if (err) {
  1119. free_init_resources(dtsec);
  1120. pr_err("DTSEC version doesn't support this i/f mode\n");
  1121. return err;
  1122. }
  1123. if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) {
  1124. u16 tmp_reg16;
  1125. /* Configure the TBI PHY Control Register */
  1126. tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
  1127. phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
  1128. tmp_reg16 = TBICON_CLK_SELECT;
  1129. phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
  1130. tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE |
  1131. BMCR_FULLDPLX | BMCR_SPEED1000);
  1132. phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
  1133. if (dtsec->basex_if)
  1134. tmp_reg16 = TBIANA_1000X;
  1135. else
  1136. tmp_reg16 = TBIANA_SGMII;
  1137. phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16);
  1138. tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART |
  1139. BMCR_FULLDPLX | BMCR_SPEED1000);
  1140. phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
  1141. }
  1142. /* Max Frame Length */
  1143. max_frm_ln = (u16)ioread32be(&regs->maxfrm);
  1144. err = fman_set_mac_max_frame(dtsec->fm, dtsec->mac_id, max_frm_ln);
  1145. if (err) {
  1146. pr_err("Setting max frame length failed\n");
  1147. free_init_resources(dtsec);
  1148. return -EINVAL;
  1149. }
  1150. dtsec->multicast_addr_hash =
  1151. alloc_hash_table(EXTENDED_HASH_TABLE_SIZE);
  1152. if (!dtsec->multicast_addr_hash) {
  1153. free_init_resources(dtsec);
  1154. pr_err("MC hash table is failed\n");
  1155. return -ENOMEM;
  1156. }
  1157. dtsec->unicast_addr_hash = alloc_hash_table(DTSEC_HASH_TABLE_SIZE);
  1158. if (!dtsec->unicast_addr_hash) {
  1159. free_init_resources(dtsec);
  1160. pr_err("UC hash table is failed\n");
  1161. return -ENOMEM;
  1162. }
  1163. /* register err intr handler for dtsec to FPM (err) */
  1164. fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
  1165. FMAN_INTR_TYPE_ERR, dtsec_isr, dtsec);
  1166. /* register 1588 intr handler for TMR to FPM (normal) */
  1167. fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
  1168. FMAN_INTR_TYPE_NORMAL, dtsec_1588_isr, dtsec);
  1169. kfree(dtsec_drv_param);
  1170. dtsec->dtsec_drv_param = NULL;
  1171. return 0;
  1172. }
  1173. int dtsec_free(struct fman_mac *dtsec)
  1174. {
  1175. free_init_resources(dtsec);
  1176. kfree(dtsec->dtsec_drv_param);
  1177. dtsec->dtsec_drv_param = NULL;
  1178. kfree(dtsec);
  1179. return 0;
  1180. }
  1181. struct fman_mac *dtsec_config(struct fman_mac_params *params)
  1182. {
  1183. struct fman_mac *dtsec;
  1184. struct dtsec_cfg *dtsec_drv_param;
  1185. void __iomem *base_addr;
  1186. base_addr = params->base_addr;
  1187. /* allocate memory for the UCC GETH data structure. */
  1188. dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL);
  1189. if (!dtsec)
  1190. return NULL;
  1191. /* allocate memory for the d_tsec driver parameters data structure. */
  1192. dtsec_drv_param = kzalloc(sizeof(*dtsec_drv_param), GFP_KERNEL);
  1193. if (!dtsec_drv_param)
  1194. goto err_dtsec;
  1195. /* Plant parameter structure pointer */
  1196. dtsec->dtsec_drv_param = dtsec_drv_param;
  1197. set_dflts(dtsec_drv_param);
  1198. dtsec->regs = base_addr;
  1199. dtsec->addr = ENET_ADDR_TO_UINT64(params->addr);
  1200. dtsec->max_speed = params->max_speed;
  1201. dtsec->phy_if = params->phy_if;
  1202. dtsec->mac_id = params->mac_id;
  1203. dtsec->exceptions = (DTSEC_IMASK_BREN |
  1204. DTSEC_IMASK_RXCEN |
  1205. DTSEC_IMASK_BTEN |
  1206. DTSEC_IMASK_TXCEN |
  1207. DTSEC_IMASK_TXEEN |
  1208. DTSEC_IMASK_ABRTEN |
  1209. DTSEC_IMASK_LCEN |
  1210. DTSEC_IMASK_CRLEN |
  1211. DTSEC_IMASK_XFUNEN |
  1212. DTSEC_IMASK_IFERREN |
  1213. DTSEC_IMASK_MAGEN |
  1214. DTSEC_IMASK_TDPEEN |
  1215. DTSEC_IMASK_RDPEEN);
  1216. dtsec->exception_cb = params->exception_cb;
  1217. dtsec->event_cb = params->event_cb;
  1218. dtsec->dev_id = params->dev_id;
  1219. dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
  1220. dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en;
  1221. dtsec->fm = params->fm;
  1222. dtsec->basex_if = params->basex_if;
  1223. if (!params->internal_phy_node) {
  1224. pr_err("TBI PHY node is not available\n");
  1225. goto err_dtsec_drv_param;
  1226. }
  1227. dtsec->tbiphy = of_phy_find_device(params->internal_phy_node);
  1228. if (!dtsec->tbiphy) {
  1229. pr_err("of_phy_find_device (TBI PHY) failed\n");
  1230. goto err_dtsec_drv_param;
  1231. }
  1232. put_device(&dtsec->tbiphy->mdio.dev);
  1233. /* Save FMan revision */
  1234. fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
  1235. return dtsec;
  1236. err_dtsec_drv_param:
  1237. kfree(dtsec_drv_param);
  1238. err_dtsec:
  1239. kfree(dtsec);
  1240. return NULL;
  1241. }