ftgmac100.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919
  1. /*
  2. * Faraday FTGMAC100 Gigabit Ethernet
  3. *
  4. * (C) Copyright 2009-2011 Faraday Technology
  5. * Po-Yu Chuang <ratbert@faraday-tech.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  20. */
  21. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  22. #include <linux/dma-mapping.h>
  23. #include <linux/etherdevice.h>
  24. #include <linux/ethtool.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/io.h>
  27. #include <linux/module.h>
  28. #include <linux/netdevice.h>
  29. #include <linux/of.h>
  30. #include <linux/phy.h>
  31. #include <linux/platform_device.h>
  32. #include <linux/property.h>
  33. #include <linux/crc32.h>
  34. #include <linux/if_vlan.h>
  35. #include <linux/of_net.h>
  36. #include <net/ip.h>
  37. #include <net/ncsi.h>
  38. #include "ftgmac100.h"
  39. #define DRV_NAME "ftgmac100"
  40. #define DRV_VERSION "0.7"
  41. /* Arbitrary values, I am not sure the HW has limits */
  42. #define MAX_RX_QUEUE_ENTRIES 1024
  43. #define MAX_TX_QUEUE_ENTRIES 1024
  44. #define MIN_RX_QUEUE_ENTRIES 32
  45. #define MIN_TX_QUEUE_ENTRIES 32
  46. /* Defaults */
  47. #define DEF_RX_QUEUE_ENTRIES 128
  48. #define DEF_TX_QUEUE_ENTRIES 128
  49. #define MAX_PKT_SIZE 1536
  50. #define RX_BUF_SIZE MAX_PKT_SIZE /* must be smaller than 0x3fff */
  51. /* Min number of tx ring entries before stopping queue */
  52. #define TX_THRESHOLD (MAX_SKB_FRAGS + 1)
  53. struct ftgmac100 {
  54. /* Registers */
  55. struct resource *res;
  56. void __iomem *base;
  57. /* Rx ring */
  58. unsigned int rx_q_entries;
  59. struct ftgmac100_rxdes *rxdes;
  60. dma_addr_t rxdes_dma;
  61. struct sk_buff **rx_skbs;
  62. unsigned int rx_pointer;
  63. u32 rxdes0_edorr_mask;
  64. /* Tx ring */
  65. unsigned int tx_q_entries;
  66. struct ftgmac100_txdes *txdes;
  67. dma_addr_t txdes_dma;
  68. struct sk_buff **tx_skbs;
  69. unsigned int tx_clean_pointer;
  70. unsigned int tx_pointer;
  71. u32 txdes0_edotr_mask;
  72. /* Used to signal the reset task of ring change request */
  73. unsigned int new_rx_q_entries;
  74. unsigned int new_tx_q_entries;
  75. /* Scratch page to use when rx skb alloc fails */
  76. void *rx_scratch;
  77. dma_addr_t rx_scratch_dma;
  78. /* Component structures */
  79. struct net_device *netdev;
  80. struct device *dev;
  81. struct ncsi_dev *ndev;
  82. struct napi_struct napi;
  83. struct work_struct reset_task;
  84. struct mii_bus *mii_bus;
  85. /* Link management */
  86. int cur_speed;
  87. int cur_duplex;
  88. bool use_ncsi;
  89. /* Multicast filter settings */
  90. u32 maht0;
  91. u32 maht1;
  92. /* Flow control settings */
  93. bool tx_pause;
  94. bool rx_pause;
  95. bool aneg_pause;
  96. /* Misc */
  97. bool need_mac_restart;
  98. bool is_aspeed;
  99. };
  100. static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr)
  101. {
  102. struct net_device *netdev = priv->netdev;
  103. int i;
  104. /* NOTE: reset clears all registers */
  105. iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
  106. iowrite32(maccr | FTGMAC100_MACCR_SW_RST,
  107. priv->base + FTGMAC100_OFFSET_MACCR);
  108. for (i = 0; i < 200; i++) {
  109. unsigned int maccr;
  110. maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
  111. if (!(maccr & FTGMAC100_MACCR_SW_RST))
  112. return 0;
  113. udelay(1);
  114. }
  115. netdev_err(netdev, "Hardware reset failed\n");
  116. return -EIO;
  117. }
  118. static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv)
  119. {
  120. u32 maccr = 0;
  121. switch (priv->cur_speed) {
  122. case SPEED_10:
  123. case 0: /* no link */
  124. break;
  125. case SPEED_100:
  126. maccr |= FTGMAC100_MACCR_FAST_MODE;
  127. break;
  128. case SPEED_1000:
  129. maccr |= FTGMAC100_MACCR_GIGA_MODE;
  130. break;
  131. default:
  132. netdev_err(priv->netdev, "Unknown speed %d !\n",
  133. priv->cur_speed);
  134. break;
  135. }
  136. /* (Re)initialize the queue pointers */
  137. priv->rx_pointer = 0;
  138. priv->tx_clean_pointer = 0;
  139. priv->tx_pointer = 0;
  140. /* The doc says reset twice with 10us interval */
  141. if (ftgmac100_reset_mac(priv, maccr))
  142. return -EIO;
  143. usleep_range(10, 1000);
  144. return ftgmac100_reset_mac(priv, maccr);
  145. }
  146. static void ftgmac100_write_mac_addr(struct ftgmac100 *priv, const u8 *mac)
  147. {
  148. unsigned int maddr = mac[0] << 8 | mac[1];
  149. unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
  150. iowrite32(maddr, priv->base + FTGMAC100_OFFSET_MAC_MADR);
  151. iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR);
  152. }
  153. static void ftgmac100_initial_mac(struct ftgmac100 *priv)
  154. {
  155. u8 mac[ETH_ALEN];
  156. unsigned int m;
  157. unsigned int l;
  158. void *addr;
  159. addr = device_get_mac_address(priv->dev, mac, ETH_ALEN);
  160. if (addr) {
  161. ether_addr_copy(priv->netdev->dev_addr, mac);
  162. dev_info(priv->dev, "Read MAC address %pM from device tree\n",
  163. mac);
  164. return;
  165. }
  166. m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR);
  167. l = ioread32(priv->base + FTGMAC100_OFFSET_MAC_LADR);
  168. mac[0] = (m >> 8) & 0xff;
  169. mac[1] = m & 0xff;
  170. mac[2] = (l >> 24) & 0xff;
  171. mac[3] = (l >> 16) & 0xff;
  172. mac[4] = (l >> 8) & 0xff;
  173. mac[5] = l & 0xff;
  174. if (is_valid_ether_addr(mac)) {
  175. ether_addr_copy(priv->netdev->dev_addr, mac);
  176. dev_info(priv->dev, "Read MAC address %pM from chip\n", mac);
  177. } else {
  178. eth_hw_addr_random(priv->netdev);
  179. dev_info(priv->dev, "Generated random MAC address %pM\n",
  180. priv->netdev->dev_addr);
  181. }
  182. }
  183. static int ftgmac100_set_mac_addr(struct net_device *dev, void *p)
  184. {
  185. int ret;
  186. ret = eth_prepare_mac_addr_change(dev, p);
  187. if (ret < 0)
  188. return ret;
  189. eth_commit_mac_addr_change(dev, p);
  190. ftgmac100_write_mac_addr(netdev_priv(dev), dev->dev_addr);
  191. return 0;
  192. }
  193. static void ftgmac100_config_pause(struct ftgmac100 *priv)
  194. {
  195. u32 fcr = FTGMAC100_FCR_PAUSE_TIME(16);
  196. /* Throttle tx queue when receiving pause frames */
  197. if (priv->rx_pause)
  198. fcr |= FTGMAC100_FCR_FC_EN;
  199. /* Enables sending pause frames when the RX queue is past a
  200. * certain threshold.
  201. */
  202. if (priv->tx_pause)
  203. fcr |= FTGMAC100_FCR_FCTHR_EN;
  204. iowrite32(fcr, priv->base + FTGMAC100_OFFSET_FCR);
  205. }
  206. static void ftgmac100_init_hw(struct ftgmac100 *priv)
  207. {
  208. u32 reg, rfifo_sz, tfifo_sz;
  209. /* Clear stale interrupts */
  210. reg = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
  211. iowrite32(reg, priv->base + FTGMAC100_OFFSET_ISR);
  212. /* Setup RX ring buffer base */
  213. iowrite32(priv->rxdes_dma, priv->base + FTGMAC100_OFFSET_RXR_BADR);
  214. /* Setup TX ring buffer base */
  215. iowrite32(priv->txdes_dma, priv->base + FTGMAC100_OFFSET_NPTXR_BADR);
  216. /* Configure RX buffer size */
  217. iowrite32(FTGMAC100_RBSR_SIZE(RX_BUF_SIZE),
  218. priv->base + FTGMAC100_OFFSET_RBSR);
  219. /* Set RX descriptor autopoll */
  220. iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1),
  221. priv->base + FTGMAC100_OFFSET_APTC);
  222. /* Write MAC address */
  223. ftgmac100_write_mac_addr(priv, priv->netdev->dev_addr);
  224. /* Write multicast filter */
  225. iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0);
  226. iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1);
  227. /* Configure descriptor sizes and increase burst sizes according
  228. * to values in Aspeed SDK. The FIFO arbitration is enabled and
  229. * the thresholds set based on the recommended values in the
  230. * AST2400 specification.
  231. */
  232. iowrite32(FTGMAC100_DBLAC_RXDES_SIZE(2) | /* 2*8 bytes RX descs */
  233. FTGMAC100_DBLAC_TXDES_SIZE(2) | /* 2*8 bytes TX descs */
  234. FTGMAC100_DBLAC_RXBURST_SIZE(3) | /* 512 bytes max RX bursts */
  235. FTGMAC100_DBLAC_TXBURST_SIZE(3) | /* 512 bytes max TX bursts */
  236. FTGMAC100_DBLAC_RX_THR_EN | /* Enable fifo threshold arb */
  237. FTGMAC100_DBLAC_RXFIFO_HTHR(6) | /* 6/8 of FIFO high threshold */
  238. FTGMAC100_DBLAC_RXFIFO_LTHR(2), /* 2/8 of FIFO low threshold */
  239. priv->base + FTGMAC100_OFFSET_DBLAC);
  240. /* Interrupt mitigation configured for 1 interrupt/packet. HW interrupt
  241. * mitigation doesn't seem to provide any benefit with NAPI so leave
  242. * it at that.
  243. */
  244. iowrite32(FTGMAC100_ITC_RXINT_THR(1) |
  245. FTGMAC100_ITC_TXINT_THR(1),
  246. priv->base + FTGMAC100_OFFSET_ITC);
  247. /* Configure FIFO sizes in the TPAFCR register */
  248. reg = ioread32(priv->base + FTGMAC100_OFFSET_FEAR);
  249. rfifo_sz = reg & 0x00000007;
  250. tfifo_sz = (reg >> 3) & 0x00000007;
  251. reg = ioread32(priv->base + FTGMAC100_OFFSET_TPAFCR);
  252. reg &= ~0x3f000000;
  253. reg |= (tfifo_sz << 27);
  254. reg |= (rfifo_sz << 24);
  255. iowrite32(reg, priv->base + FTGMAC100_OFFSET_TPAFCR);
  256. }
  257. static void ftgmac100_start_hw(struct ftgmac100 *priv)
  258. {
  259. u32 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
  260. /* Keep the original GMAC and FAST bits */
  261. maccr &= (FTGMAC100_MACCR_FAST_MODE | FTGMAC100_MACCR_GIGA_MODE);
  262. /* Add all the main enable bits */
  263. maccr |= FTGMAC100_MACCR_TXDMA_EN |
  264. FTGMAC100_MACCR_RXDMA_EN |
  265. FTGMAC100_MACCR_TXMAC_EN |
  266. FTGMAC100_MACCR_RXMAC_EN |
  267. FTGMAC100_MACCR_CRC_APD |
  268. FTGMAC100_MACCR_PHY_LINK_LEVEL |
  269. FTGMAC100_MACCR_RX_RUNT |
  270. FTGMAC100_MACCR_RX_BROADPKT;
  271. /* Add other bits as needed */
  272. if (priv->cur_duplex == DUPLEX_FULL)
  273. maccr |= FTGMAC100_MACCR_FULLDUP;
  274. if (priv->netdev->flags & IFF_PROMISC)
  275. maccr |= FTGMAC100_MACCR_RX_ALL;
  276. if (priv->netdev->flags & IFF_ALLMULTI)
  277. maccr |= FTGMAC100_MACCR_RX_MULTIPKT;
  278. else if (netdev_mc_count(priv->netdev))
  279. maccr |= FTGMAC100_MACCR_HT_MULTI_EN;
  280. /* Vlan filtering enabled */
  281. if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
  282. maccr |= FTGMAC100_MACCR_RM_VLAN;
  283. /* Hit the HW */
  284. iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
  285. }
  286. static void ftgmac100_stop_hw(struct ftgmac100 *priv)
  287. {
  288. iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR);
  289. }
  290. static void ftgmac100_calc_mc_hash(struct ftgmac100 *priv)
  291. {
  292. struct netdev_hw_addr *ha;
  293. priv->maht1 = 0;
  294. priv->maht0 = 0;
  295. netdev_for_each_mc_addr(ha, priv->netdev) {
  296. u32 crc_val = ether_crc_le(ETH_ALEN, ha->addr);
  297. crc_val = (~(crc_val >> 2)) & 0x3f;
  298. if (crc_val >= 32)
  299. priv->maht1 |= 1ul << (crc_val - 32);
  300. else
  301. priv->maht0 |= 1ul << (crc_val);
  302. }
  303. }
  304. static void ftgmac100_set_rx_mode(struct net_device *netdev)
  305. {
  306. struct ftgmac100 *priv = netdev_priv(netdev);
  307. /* Setup the hash filter */
  308. ftgmac100_calc_mc_hash(priv);
  309. /* Interface down ? that's all there is to do */
  310. if (!netif_running(netdev))
  311. return;
  312. /* Update the HW */
  313. iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0);
  314. iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1);
  315. /* Reconfigure MACCR */
  316. ftgmac100_start_hw(priv);
  317. }
  318. static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
  319. struct ftgmac100_rxdes *rxdes, gfp_t gfp)
  320. {
  321. struct net_device *netdev = priv->netdev;
  322. struct sk_buff *skb;
  323. dma_addr_t map;
  324. int err = 0;
  325. skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE);
  326. if (unlikely(!skb)) {
  327. if (net_ratelimit())
  328. netdev_warn(netdev, "failed to allocate rx skb\n");
  329. err = -ENOMEM;
  330. map = priv->rx_scratch_dma;
  331. } else {
  332. map = dma_map_single(priv->dev, skb->data, RX_BUF_SIZE,
  333. DMA_FROM_DEVICE);
  334. if (unlikely(dma_mapping_error(priv->dev, map))) {
  335. if (net_ratelimit())
  336. netdev_err(netdev, "failed to map rx page\n");
  337. dev_kfree_skb_any(skb);
  338. map = priv->rx_scratch_dma;
  339. skb = NULL;
  340. err = -ENOMEM;
  341. }
  342. }
  343. /* Store skb */
  344. priv->rx_skbs[entry] = skb;
  345. /* Store DMA address into RX desc */
  346. rxdes->rxdes3 = cpu_to_le32(map);
  347. /* Ensure the above is ordered vs clearing the OWN bit */
  348. dma_wmb();
  349. /* Clean status (which resets own bit) */
  350. if (entry == (priv->rx_q_entries - 1))
  351. rxdes->rxdes0 = cpu_to_le32(priv->rxdes0_edorr_mask);
  352. else
  353. rxdes->rxdes0 = 0;
  354. return err;
  355. }
  356. static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv,
  357. unsigned int pointer)
  358. {
  359. return (pointer + 1) & (priv->rx_q_entries - 1);
  360. }
  361. static void ftgmac100_rx_packet_error(struct ftgmac100 *priv, u32 status)
  362. {
  363. struct net_device *netdev = priv->netdev;
  364. if (status & FTGMAC100_RXDES0_RX_ERR)
  365. netdev->stats.rx_errors++;
  366. if (status & FTGMAC100_RXDES0_CRC_ERR)
  367. netdev->stats.rx_crc_errors++;
  368. if (status & (FTGMAC100_RXDES0_FTL |
  369. FTGMAC100_RXDES0_RUNT |
  370. FTGMAC100_RXDES0_RX_ODD_NB))
  371. netdev->stats.rx_length_errors++;
  372. }
  373. static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
  374. {
  375. struct net_device *netdev = priv->netdev;
  376. struct ftgmac100_rxdes *rxdes;
  377. struct sk_buff *skb;
  378. unsigned int pointer, size;
  379. u32 status, csum_vlan;
  380. dma_addr_t map;
  381. /* Grab next RX descriptor */
  382. pointer = priv->rx_pointer;
  383. rxdes = &priv->rxdes[pointer];
  384. /* Grab descriptor status */
  385. status = le32_to_cpu(rxdes->rxdes0);
  386. /* Do we have a packet ? */
  387. if (!(status & FTGMAC100_RXDES0_RXPKT_RDY))
  388. return false;
  389. /* Order subsequent reads with the test for the ready bit */
  390. dma_rmb();
  391. /* We don't cope with fragmented RX packets */
  392. if (unlikely(!(status & FTGMAC100_RXDES0_FRS) ||
  393. !(status & FTGMAC100_RXDES0_LRS)))
  394. goto drop;
  395. /* Grab received size and csum vlan field in the descriptor */
  396. size = status & FTGMAC100_RXDES0_VDBC;
  397. csum_vlan = le32_to_cpu(rxdes->rxdes1);
  398. /* Any error (other than csum offload) flagged ? */
  399. if (unlikely(status & RXDES0_ANY_ERROR)) {
  400. /* Correct for incorrect flagging of runt packets
  401. * with vlan tags... Just accept a runt packet that
  402. * has been flagged as vlan and whose size is at
  403. * least 60 bytes.
  404. */
  405. if ((status & FTGMAC100_RXDES0_RUNT) &&
  406. (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL) &&
  407. (size >= 60))
  408. status &= ~FTGMAC100_RXDES0_RUNT;
  409. /* Any error still in there ? */
  410. if (status & RXDES0_ANY_ERROR) {
  411. ftgmac100_rx_packet_error(priv, status);
  412. goto drop;
  413. }
  414. }
  415. /* If the packet had no skb (failed to allocate earlier)
  416. * then try to allocate one and skip
  417. */
  418. skb = priv->rx_skbs[pointer];
  419. if (!unlikely(skb)) {
  420. ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC);
  421. goto drop;
  422. }
  423. if (unlikely(status & FTGMAC100_RXDES0_MULTICAST))
  424. netdev->stats.multicast++;
  425. /* If the HW found checksum errors, bounce it to software.
  426. *
  427. * If we didn't, we need to see if the packet was recognized
  428. * by HW as one of the supported checksummed protocols before
  429. * we accept the HW test results.
  430. */
  431. if (netdev->features & NETIF_F_RXCSUM) {
  432. u32 err_bits = FTGMAC100_RXDES1_TCP_CHKSUM_ERR |
  433. FTGMAC100_RXDES1_UDP_CHKSUM_ERR |
  434. FTGMAC100_RXDES1_IP_CHKSUM_ERR;
  435. if ((csum_vlan & err_bits) ||
  436. !(csum_vlan & FTGMAC100_RXDES1_PROT_MASK))
  437. skb->ip_summed = CHECKSUM_NONE;
  438. else
  439. skb->ip_summed = CHECKSUM_UNNECESSARY;
  440. }
  441. /* Transfer received size to skb */
  442. skb_put(skb, size);
  443. /* Extract vlan tag */
  444. if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
  445. (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL))
  446. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  447. csum_vlan & 0xffff);
  448. /* Tear down DMA mapping, do necessary cache management */
  449. map = le32_to_cpu(rxdes->rxdes3);
  450. #if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU)
  451. /* When we don't have an iommu, we can save cycles by not
  452. * invalidating the cache for the part of the packet that
  453. * wasn't received.
  454. */
  455. dma_unmap_single(priv->dev, map, size, DMA_FROM_DEVICE);
  456. #else
  457. dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
  458. #endif
  459. /* Resplenish rx ring */
  460. ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC);
  461. priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
  462. skb->protocol = eth_type_trans(skb, netdev);
  463. netdev->stats.rx_packets++;
  464. netdev->stats.rx_bytes += size;
  465. /* push packet to protocol stack */
  466. if (skb->ip_summed == CHECKSUM_NONE)
  467. netif_receive_skb(skb);
  468. else
  469. napi_gro_receive(&priv->napi, skb);
  470. (*processed)++;
  471. return true;
  472. drop:
  473. /* Clean rxdes0 (which resets own bit) */
  474. rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask);
  475. priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
  476. netdev->stats.rx_dropped++;
  477. return true;
  478. }
  479. static u32 ftgmac100_base_tx_ctlstat(struct ftgmac100 *priv,
  480. unsigned int index)
  481. {
  482. if (index == (priv->tx_q_entries - 1))
  483. return priv->txdes0_edotr_mask;
  484. else
  485. return 0;
  486. }
  487. static unsigned int ftgmac100_next_tx_pointer(struct ftgmac100 *priv,
  488. unsigned int pointer)
  489. {
  490. return (pointer + 1) & (priv->tx_q_entries - 1);
  491. }
  492. static u32 ftgmac100_tx_buf_avail(struct ftgmac100 *priv)
  493. {
  494. /* Returns the number of available slots in the TX queue
  495. *
  496. * This always leaves one free slot so we don't have to
  497. * worry about empty vs. full, and this simplifies the
  498. * test for ftgmac100_tx_buf_cleanable() below
  499. */
  500. return (priv->tx_clean_pointer - priv->tx_pointer - 1) &
  501. (priv->tx_q_entries - 1);
  502. }
  503. static bool ftgmac100_tx_buf_cleanable(struct ftgmac100 *priv)
  504. {
  505. return priv->tx_pointer != priv->tx_clean_pointer;
  506. }
  507. static void ftgmac100_free_tx_packet(struct ftgmac100 *priv,
  508. unsigned int pointer,
  509. struct sk_buff *skb,
  510. struct ftgmac100_txdes *txdes,
  511. u32 ctl_stat)
  512. {
  513. dma_addr_t map = le32_to_cpu(txdes->txdes3);
  514. size_t len;
  515. if (ctl_stat & FTGMAC100_TXDES0_FTS) {
  516. len = skb_headlen(skb);
  517. dma_unmap_single(priv->dev, map, len, DMA_TO_DEVICE);
  518. } else {
  519. len = FTGMAC100_TXDES0_TXBUF_SIZE(ctl_stat);
  520. dma_unmap_page(priv->dev, map, len, DMA_TO_DEVICE);
  521. }
  522. /* Free SKB on last segment */
  523. if (ctl_stat & FTGMAC100_TXDES0_LTS)
  524. dev_kfree_skb(skb);
  525. priv->tx_skbs[pointer] = NULL;
  526. }
  527. static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
  528. {
  529. struct net_device *netdev = priv->netdev;
  530. struct ftgmac100_txdes *txdes;
  531. struct sk_buff *skb;
  532. unsigned int pointer;
  533. u32 ctl_stat;
  534. pointer = priv->tx_clean_pointer;
  535. txdes = &priv->txdes[pointer];
  536. ctl_stat = le32_to_cpu(txdes->txdes0);
  537. if (ctl_stat & FTGMAC100_TXDES0_TXDMA_OWN)
  538. return false;
  539. skb = priv->tx_skbs[pointer];
  540. netdev->stats.tx_packets++;
  541. netdev->stats.tx_bytes += skb->len;
  542. ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
  543. txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
  544. priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer);
  545. return true;
  546. }
  547. static void ftgmac100_tx_complete(struct ftgmac100 *priv)
  548. {
  549. struct net_device *netdev = priv->netdev;
  550. /* Process all completed packets */
  551. while (ftgmac100_tx_buf_cleanable(priv) &&
  552. ftgmac100_tx_complete_packet(priv))
  553. ;
  554. /* Restart queue if needed */
  555. smp_mb();
  556. if (unlikely(netif_queue_stopped(netdev) &&
  557. ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)) {
  558. struct netdev_queue *txq;
  559. txq = netdev_get_tx_queue(netdev, 0);
  560. __netif_tx_lock(txq, smp_processor_id());
  561. if (netif_queue_stopped(netdev) &&
  562. ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)
  563. netif_wake_queue(netdev);
  564. __netif_tx_unlock(txq);
  565. }
  566. }
  567. static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan)
  568. {
  569. if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
  570. u8 ip_proto = ip_hdr(skb)->protocol;
  571. *csum_vlan |= FTGMAC100_TXDES1_IP_CHKSUM;
  572. switch(ip_proto) {
  573. case IPPROTO_TCP:
  574. *csum_vlan |= FTGMAC100_TXDES1_TCP_CHKSUM;
  575. return true;
  576. case IPPROTO_UDP:
  577. *csum_vlan |= FTGMAC100_TXDES1_UDP_CHKSUM;
  578. return true;
  579. case IPPROTO_IP:
  580. return true;
  581. }
  582. }
  583. return skb_checksum_help(skb) == 0;
  584. }
  585. static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
  586. struct net_device *netdev)
  587. {
  588. struct ftgmac100 *priv = netdev_priv(netdev);
  589. struct ftgmac100_txdes *txdes, *first;
  590. unsigned int pointer, nfrags, len, i, j;
  591. u32 f_ctl_stat, ctl_stat, csum_vlan;
  592. dma_addr_t map;
  593. /* The HW doesn't pad small frames */
  594. if (eth_skb_pad(skb)) {
  595. netdev->stats.tx_dropped++;
  596. return NETDEV_TX_OK;
  597. }
  598. /* Reject oversize packets */
  599. if (unlikely(skb->len > MAX_PKT_SIZE)) {
  600. if (net_ratelimit())
  601. netdev_dbg(netdev, "tx packet too big\n");
  602. goto drop;
  603. }
  604. /* Do we have a limit on #fragments ? I yet have to get a reply
  605. * from Aspeed. If there's one I haven't hit it.
  606. */
  607. nfrags = skb_shinfo(skb)->nr_frags;
  608. /* Get header len */
  609. len = skb_headlen(skb);
  610. /* Map the packet head */
  611. map = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
  612. if (dma_mapping_error(priv->dev, map)) {
  613. if (net_ratelimit())
  614. netdev_err(netdev, "map tx packet head failed\n");
  615. goto drop;
  616. }
  617. /* Grab the next free tx descriptor */
  618. pointer = priv->tx_pointer;
  619. txdes = first = &priv->txdes[pointer];
  620. /* Setup it up with the packet head. Don't write the head to the
  621. * ring just yet
  622. */
  623. priv->tx_skbs[pointer] = skb;
  624. f_ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer);
  625. f_ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN;
  626. f_ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len);
  627. f_ctl_stat |= FTGMAC100_TXDES0_FTS;
  628. if (nfrags == 0)
  629. f_ctl_stat |= FTGMAC100_TXDES0_LTS;
  630. txdes->txdes3 = cpu_to_le32(map);
  631. /* Setup HW checksumming */
  632. csum_vlan = 0;
  633. if (skb->ip_summed == CHECKSUM_PARTIAL &&
  634. !ftgmac100_prep_tx_csum(skb, &csum_vlan))
  635. goto drop;
  636. /* Add VLAN tag */
  637. if (skb_vlan_tag_present(skb)) {
  638. csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
  639. csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
  640. }
  641. txdes->txdes1 = cpu_to_le32(csum_vlan);
  642. /* Next descriptor */
  643. pointer = ftgmac100_next_tx_pointer(priv, pointer);
  644. /* Add the fragments */
  645. for (i = 0; i < nfrags; i++) {
  646. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  647. len = frag->size;
  648. /* Map it */
  649. map = skb_frag_dma_map(priv->dev, frag, 0, len,
  650. DMA_TO_DEVICE);
  651. if (dma_mapping_error(priv->dev, map))
  652. goto dma_err;
  653. /* Setup descriptor */
  654. priv->tx_skbs[pointer] = skb;
  655. txdes = &priv->txdes[pointer];
  656. ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer);
  657. ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN;
  658. ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len);
  659. if (i == (nfrags - 1))
  660. ctl_stat |= FTGMAC100_TXDES0_LTS;
  661. txdes->txdes0 = cpu_to_le32(ctl_stat);
  662. txdes->txdes1 = 0;
  663. txdes->txdes3 = cpu_to_le32(map);
  664. /* Next one */
  665. pointer = ftgmac100_next_tx_pointer(priv, pointer);
  666. }
  667. /* Order the previous packet and descriptor udpates
  668. * before setting the OWN bit on the first descriptor.
  669. */
  670. dma_wmb();
  671. first->txdes0 = cpu_to_le32(f_ctl_stat);
  672. /* Update next TX pointer */
  673. priv->tx_pointer = pointer;
  674. /* If there isn't enough room for all the fragments of a new packet
  675. * in the TX ring, stop the queue. The sequence below is race free
  676. * vs. a concurrent restart in ftgmac100_poll()
  677. */
  678. if (unlikely(ftgmac100_tx_buf_avail(priv) < TX_THRESHOLD)) {
  679. netif_stop_queue(netdev);
  680. /* Order the queue stop with the test below */
  681. smp_mb();
  682. if (ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)
  683. netif_wake_queue(netdev);
  684. }
  685. /* Poke transmitter to read the updated TX descriptors */
  686. iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD);
  687. return NETDEV_TX_OK;
  688. dma_err:
  689. if (net_ratelimit())
  690. netdev_err(netdev, "map tx fragment failed\n");
  691. /* Free head */
  692. pointer = priv->tx_pointer;
  693. ftgmac100_free_tx_packet(priv, pointer, skb, first, f_ctl_stat);
  694. first->txdes0 = cpu_to_le32(f_ctl_stat & priv->txdes0_edotr_mask);
  695. /* Then all fragments */
  696. for (j = 0; j < i; j++) {
  697. pointer = ftgmac100_next_tx_pointer(priv, pointer);
  698. txdes = &priv->txdes[pointer];
  699. ctl_stat = le32_to_cpu(txdes->txdes0);
  700. ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
  701. txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
  702. }
  703. /* This cannot be reached if we successfully mapped the
  704. * last fragment, so we know ftgmac100_free_tx_packet()
  705. * hasn't freed the skb yet.
  706. */
  707. drop:
  708. /* Drop the packet */
  709. dev_kfree_skb_any(skb);
  710. netdev->stats.tx_dropped++;
  711. return NETDEV_TX_OK;
  712. }
  713. static void ftgmac100_free_buffers(struct ftgmac100 *priv)
  714. {
  715. int i;
  716. /* Free all RX buffers */
  717. for (i = 0; i < priv->rx_q_entries; i++) {
  718. struct ftgmac100_rxdes *rxdes = &priv->rxdes[i];
  719. struct sk_buff *skb = priv->rx_skbs[i];
  720. dma_addr_t map = le32_to_cpu(rxdes->rxdes3);
  721. if (!skb)
  722. continue;
  723. priv->rx_skbs[i] = NULL;
  724. dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
  725. dev_kfree_skb_any(skb);
  726. }
  727. /* Free all TX buffers */
  728. for (i = 0; i < priv->tx_q_entries; i++) {
  729. struct ftgmac100_txdes *txdes = &priv->txdes[i];
  730. struct sk_buff *skb = priv->tx_skbs[i];
  731. if (!skb)
  732. continue;
  733. ftgmac100_free_tx_packet(priv, i, skb, txdes,
  734. le32_to_cpu(txdes->txdes0));
  735. }
  736. }
  737. static void ftgmac100_free_rings(struct ftgmac100 *priv)
  738. {
  739. /* Free skb arrays */
  740. kfree(priv->rx_skbs);
  741. kfree(priv->tx_skbs);
  742. /* Free descriptors */
  743. if (priv->rxdes)
  744. dma_free_coherent(priv->dev, MAX_RX_QUEUE_ENTRIES *
  745. sizeof(struct ftgmac100_rxdes),
  746. priv->rxdes, priv->rxdes_dma);
  747. priv->rxdes = NULL;
  748. if (priv->txdes)
  749. dma_free_coherent(priv->dev, MAX_TX_QUEUE_ENTRIES *
  750. sizeof(struct ftgmac100_txdes),
  751. priv->txdes, priv->txdes_dma);
  752. priv->txdes = NULL;
  753. /* Free scratch packet buffer */
  754. if (priv->rx_scratch)
  755. dma_free_coherent(priv->dev, RX_BUF_SIZE,
  756. priv->rx_scratch, priv->rx_scratch_dma);
  757. }
  758. static int ftgmac100_alloc_rings(struct ftgmac100 *priv)
  759. {
  760. /* Allocate skb arrays */
  761. priv->rx_skbs = kcalloc(MAX_RX_QUEUE_ENTRIES, sizeof(void *),
  762. GFP_KERNEL);
  763. if (!priv->rx_skbs)
  764. return -ENOMEM;
  765. priv->tx_skbs = kcalloc(MAX_TX_QUEUE_ENTRIES, sizeof(void *),
  766. GFP_KERNEL);
  767. if (!priv->tx_skbs)
  768. return -ENOMEM;
  769. /* Allocate descriptors */
  770. priv->rxdes = dma_zalloc_coherent(priv->dev,
  771. MAX_RX_QUEUE_ENTRIES *
  772. sizeof(struct ftgmac100_rxdes),
  773. &priv->rxdes_dma, GFP_KERNEL);
  774. if (!priv->rxdes)
  775. return -ENOMEM;
  776. priv->txdes = dma_zalloc_coherent(priv->dev,
  777. MAX_TX_QUEUE_ENTRIES *
  778. sizeof(struct ftgmac100_txdes),
  779. &priv->txdes_dma, GFP_KERNEL);
  780. if (!priv->txdes)
  781. return -ENOMEM;
  782. /* Allocate scratch packet buffer */
  783. priv->rx_scratch = dma_alloc_coherent(priv->dev,
  784. RX_BUF_SIZE,
  785. &priv->rx_scratch_dma,
  786. GFP_KERNEL);
  787. if (!priv->rx_scratch)
  788. return -ENOMEM;
  789. return 0;
  790. }
  791. static void ftgmac100_init_rings(struct ftgmac100 *priv)
  792. {
  793. struct ftgmac100_rxdes *rxdes = NULL;
  794. struct ftgmac100_txdes *txdes = NULL;
  795. int i;
  796. /* Update entries counts */
  797. priv->rx_q_entries = priv->new_rx_q_entries;
  798. priv->tx_q_entries = priv->new_tx_q_entries;
  799. if (WARN_ON(priv->rx_q_entries < MIN_RX_QUEUE_ENTRIES))
  800. return;
  801. /* Initialize RX ring */
  802. for (i = 0; i < priv->rx_q_entries; i++) {
  803. rxdes = &priv->rxdes[i];
  804. rxdes->rxdes0 = 0;
  805. rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma);
  806. }
  807. /* Mark the end of the ring */
  808. rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask);
  809. if (WARN_ON(priv->tx_q_entries < MIN_RX_QUEUE_ENTRIES))
  810. return;
  811. /* Initialize TX ring */
  812. for (i = 0; i < priv->tx_q_entries; i++) {
  813. txdes = &priv->txdes[i];
  814. txdes->txdes0 = 0;
  815. }
  816. txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask);
  817. }
  818. static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv)
  819. {
  820. int i;
  821. for (i = 0; i < priv->rx_q_entries; i++) {
  822. struct ftgmac100_rxdes *rxdes = &priv->rxdes[i];
  823. if (ftgmac100_alloc_rx_buf(priv, i, rxdes, GFP_KERNEL))
  824. return -ENOMEM;
  825. }
  826. return 0;
  827. }
  828. static void ftgmac100_adjust_link(struct net_device *netdev)
  829. {
  830. struct ftgmac100 *priv = netdev_priv(netdev);
  831. struct phy_device *phydev = netdev->phydev;
  832. bool tx_pause, rx_pause;
  833. int new_speed;
  834. /* We store "no link" as speed 0 */
  835. if (!phydev->link)
  836. new_speed = 0;
  837. else
  838. new_speed = phydev->speed;
  839. /* Grab pause settings from PHY if configured to do so */
  840. if (priv->aneg_pause) {
  841. rx_pause = tx_pause = phydev->pause;
  842. if (phydev->asym_pause)
  843. tx_pause = !rx_pause;
  844. } else {
  845. rx_pause = priv->rx_pause;
  846. tx_pause = priv->tx_pause;
  847. }
  848. /* Link hasn't changed, do nothing */
  849. if (phydev->speed == priv->cur_speed &&
  850. phydev->duplex == priv->cur_duplex &&
  851. rx_pause == priv->rx_pause &&
  852. tx_pause == priv->tx_pause)
  853. return;
  854. /* Print status if we have a link or we had one and just lost it,
  855. * don't print otherwise.
  856. */
  857. if (new_speed || priv->cur_speed)
  858. phy_print_status(phydev);
  859. priv->cur_speed = new_speed;
  860. priv->cur_duplex = phydev->duplex;
  861. priv->rx_pause = rx_pause;
  862. priv->tx_pause = tx_pause;
  863. /* Link is down, do nothing else */
  864. if (!new_speed)
  865. return;
  866. /* Disable all interrupts */
  867. iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
  868. /* Reset the adapter asynchronously */
  869. schedule_work(&priv->reset_task);
  870. }
  871. static int ftgmac100_mii_probe(struct ftgmac100 *priv, phy_interface_t intf)
  872. {
  873. struct net_device *netdev = priv->netdev;
  874. struct phy_device *phydev;
  875. phydev = phy_find_first(priv->mii_bus);
  876. if (!phydev) {
  877. netdev_info(netdev, "%s: no PHY found\n", netdev->name);
  878. return -ENODEV;
  879. }
  880. phydev = phy_connect(netdev, phydev_name(phydev),
  881. &ftgmac100_adjust_link, intf);
  882. if (IS_ERR(phydev)) {
  883. netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
  884. return PTR_ERR(phydev);
  885. }
  886. /* Indicate that we support PAUSE frames (see comment in
  887. * Documentation/networking/phy.txt)
  888. */
  889. phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
  890. phydev->advertising = phydev->supported;
  891. /* Display what we found */
  892. phy_attached_info(phydev);
  893. return 0;
  894. }
  895. static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
  896. {
  897. struct net_device *netdev = bus->priv;
  898. struct ftgmac100 *priv = netdev_priv(netdev);
  899. unsigned int phycr;
  900. int i;
  901. phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
  902. /* preserve MDC cycle threshold */
  903. phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK;
  904. phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) |
  905. FTGMAC100_PHYCR_REGAD(regnum) |
  906. FTGMAC100_PHYCR_MIIRD;
  907. iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR);
  908. for (i = 0; i < 10; i++) {
  909. phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
  910. if ((phycr & FTGMAC100_PHYCR_MIIRD) == 0) {
  911. int data;
  912. data = ioread32(priv->base + FTGMAC100_OFFSET_PHYDATA);
  913. return FTGMAC100_PHYDATA_MIIRDATA(data);
  914. }
  915. udelay(100);
  916. }
  917. netdev_err(netdev, "mdio read timed out\n");
  918. return -EIO;
  919. }
  920. static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
  921. int regnum, u16 value)
  922. {
  923. struct net_device *netdev = bus->priv;
  924. struct ftgmac100 *priv = netdev_priv(netdev);
  925. unsigned int phycr;
  926. int data;
  927. int i;
  928. phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
  929. /* preserve MDC cycle threshold */
  930. phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK;
  931. phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) |
  932. FTGMAC100_PHYCR_REGAD(regnum) |
  933. FTGMAC100_PHYCR_MIIWR;
  934. data = FTGMAC100_PHYDATA_MIIWDATA(value);
  935. iowrite32(data, priv->base + FTGMAC100_OFFSET_PHYDATA);
  936. iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR);
  937. for (i = 0; i < 10; i++) {
  938. phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
  939. if ((phycr & FTGMAC100_PHYCR_MIIWR) == 0)
  940. return 0;
  941. udelay(100);
  942. }
  943. netdev_err(netdev, "mdio write timed out\n");
  944. return -EIO;
  945. }
  946. static void ftgmac100_get_drvinfo(struct net_device *netdev,
  947. struct ethtool_drvinfo *info)
  948. {
  949. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  950. strlcpy(info->version, DRV_VERSION, sizeof(info->version));
  951. strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
  952. }
  953. static void ftgmac100_get_ringparam(struct net_device *netdev,
  954. struct ethtool_ringparam *ering)
  955. {
  956. struct ftgmac100 *priv = netdev_priv(netdev);
  957. memset(ering, 0, sizeof(*ering));
  958. ering->rx_max_pending = MAX_RX_QUEUE_ENTRIES;
  959. ering->tx_max_pending = MAX_TX_QUEUE_ENTRIES;
  960. ering->rx_pending = priv->rx_q_entries;
  961. ering->tx_pending = priv->tx_q_entries;
  962. }
  963. static int ftgmac100_set_ringparam(struct net_device *netdev,
  964. struct ethtool_ringparam *ering)
  965. {
  966. struct ftgmac100 *priv = netdev_priv(netdev);
  967. if (ering->rx_pending > MAX_RX_QUEUE_ENTRIES ||
  968. ering->tx_pending > MAX_TX_QUEUE_ENTRIES ||
  969. ering->rx_pending < MIN_RX_QUEUE_ENTRIES ||
  970. ering->tx_pending < MIN_TX_QUEUE_ENTRIES ||
  971. !is_power_of_2(ering->rx_pending) ||
  972. !is_power_of_2(ering->tx_pending))
  973. return -EINVAL;
  974. priv->new_rx_q_entries = ering->rx_pending;
  975. priv->new_tx_q_entries = ering->tx_pending;
  976. if (netif_running(netdev))
  977. schedule_work(&priv->reset_task);
  978. return 0;
  979. }
  980. static void ftgmac100_get_pauseparam(struct net_device *netdev,
  981. struct ethtool_pauseparam *pause)
  982. {
  983. struct ftgmac100 *priv = netdev_priv(netdev);
  984. pause->autoneg = priv->aneg_pause;
  985. pause->tx_pause = priv->tx_pause;
  986. pause->rx_pause = priv->rx_pause;
  987. }
  988. static int ftgmac100_set_pauseparam(struct net_device *netdev,
  989. struct ethtool_pauseparam *pause)
  990. {
  991. struct ftgmac100 *priv = netdev_priv(netdev);
  992. struct phy_device *phydev = netdev->phydev;
  993. priv->aneg_pause = pause->autoneg;
  994. priv->tx_pause = pause->tx_pause;
  995. priv->rx_pause = pause->rx_pause;
  996. if (phydev) {
  997. phydev->advertising &= ~ADVERTISED_Pause;
  998. phydev->advertising &= ~ADVERTISED_Asym_Pause;
  999. if (pause->rx_pause) {
  1000. phydev->advertising |= ADVERTISED_Pause;
  1001. phydev->advertising |= ADVERTISED_Asym_Pause;
  1002. }
  1003. if (pause->tx_pause)
  1004. phydev->advertising ^= ADVERTISED_Asym_Pause;
  1005. }
  1006. if (netif_running(netdev)) {
  1007. if (phydev && priv->aneg_pause)
  1008. phy_start_aneg(phydev);
  1009. else
  1010. ftgmac100_config_pause(priv);
  1011. }
  1012. return 0;
  1013. }
  1014. static const struct ethtool_ops ftgmac100_ethtool_ops = {
  1015. .get_drvinfo = ftgmac100_get_drvinfo,
  1016. .get_link = ethtool_op_get_link,
  1017. .get_link_ksettings = phy_ethtool_get_link_ksettings,
  1018. .set_link_ksettings = phy_ethtool_set_link_ksettings,
  1019. .nway_reset = phy_ethtool_nway_reset,
  1020. .get_ringparam = ftgmac100_get_ringparam,
  1021. .set_ringparam = ftgmac100_set_ringparam,
  1022. .get_pauseparam = ftgmac100_get_pauseparam,
  1023. .set_pauseparam = ftgmac100_set_pauseparam,
  1024. };
  1025. static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id)
  1026. {
  1027. struct net_device *netdev = dev_id;
  1028. struct ftgmac100 *priv = netdev_priv(netdev);
  1029. unsigned int status, new_mask = FTGMAC100_INT_BAD;
  1030. /* Fetch and clear interrupt bits, process abnormal ones */
  1031. status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
  1032. iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
  1033. if (unlikely(status & FTGMAC100_INT_BAD)) {
  1034. /* RX buffer unavailable */
  1035. if (status & FTGMAC100_INT_NO_RXBUF)
  1036. netdev->stats.rx_over_errors++;
  1037. /* received packet lost due to RX FIFO full */
  1038. if (status & FTGMAC100_INT_RPKT_LOST)
  1039. netdev->stats.rx_fifo_errors++;
  1040. /* sent packet lost due to excessive TX collision */
  1041. if (status & FTGMAC100_INT_XPKT_LOST)
  1042. netdev->stats.tx_fifo_errors++;
  1043. /* AHB error -> Reset the chip */
  1044. if (status & FTGMAC100_INT_AHB_ERR) {
  1045. if (net_ratelimit())
  1046. netdev_warn(netdev,
  1047. "AHB bus error ! Resetting chip.\n");
  1048. iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
  1049. schedule_work(&priv->reset_task);
  1050. return IRQ_HANDLED;
  1051. }
  1052. /* We may need to restart the MAC after such errors, delay
  1053. * this until after we have freed some Rx buffers though
  1054. */
  1055. priv->need_mac_restart = true;
  1056. /* Disable those errors until we restart */
  1057. new_mask &= ~status;
  1058. }
  1059. /* Only enable "bad" interrupts while NAPI is on */
  1060. iowrite32(new_mask, priv->base + FTGMAC100_OFFSET_IER);
  1061. /* Schedule NAPI bh */
  1062. napi_schedule_irqoff(&priv->napi);
  1063. return IRQ_HANDLED;
  1064. }
  1065. static bool ftgmac100_check_rx(struct ftgmac100 *priv)
  1066. {
  1067. struct ftgmac100_rxdes *rxdes = &priv->rxdes[priv->rx_pointer];
  1068. /* Do we have a packet ? */
  1069. return !!(rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY));
  1070. }
  1071. static int ftgmac100_poll(struct napi_struct *napi, int budget)
  1072. {
  1073. struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi);
  1074. int work_done = 0;
  1075. bool more;
  1076. /* Handle TX completions */
  1077. if (ftgmac100_tx_buf_cleanable(priv))
  1078. ftgmac100_tx_complete(priv);
  1079. /* Handle RX packets */
  1080. do {
  1081. more = ftgmac100_rx_packet(priv, &work_done);
  1082. } while (more && work_done < budget);
  1083. /* The interrupt is telling us to kick the MAC back to life
  1084. * after an RX overflow
  1085. */
  1086. if (unlikely(priv->need_mac_restart)) {
  1087. ftgmac100_start_hw(priv);
  1088. /* Re-enable "bad" interrupts */
  1089. iowrite32(FTGMAC100_INT_BAD,
  1090. priv->base + FTGMAC100_OFFSET_IER);
  1091. }
  1092. /* As long as we are waiting for transmit packets to be
  1093. * completed we keep NAPI going
  1094. */
  1095. if (ftgmac100_tx_buf_cleanable(priv))
  1096. work_done = budget;
  1097. if (work_done < budget) {
  1098. /* We are about to re-enable all interrupts. However
  1099. * the HW has been latching RX/TX packet interrupts while
  1100. * they were masked. So we clear them first, then we need
  1101. * to re-check if there's something to process
  1102. */
  1103. iowrite32(FTGMAC100_INT_RXTX,
  1104. priv->base + FTGMAC100_OFFSET_ISR);
  1105. /* Push the above (and provides a barrier vs. subsequent
  1106. * reads of the descriptor).
  1107. */
  1108. ioread32(priv->base + FTGMAC100_OFFSET_ISR);
  1109. /* Check RX and TX descriptors for more work to do */
  1110. if (ftgmac100_check_rx(priv) ||
  1111. ftgmac100_tx_buf_cleanable(priv))
  1112. return budget;
  1113. /* deschedule NAPI */
  1114. napi_complete(napi);
  1115. /* enable all interrupts */
  1116. iowrite32(FTGMAC100_INT_ALL,
  1117. priv->base + FTGMAC100_OFFSET_IER);
  1118. }
  1119. return work_done;
  1120. }
  1121. static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err)
  1122. {
  1123. int err = 0;
  1124. /* Re-init descriptors (adjust queue sizes) */
  1125. ftgmac100_init_rings(priv);
  1126. /* Realloc rx descriptors */
  1127. err = ftgmac100_alloc_rx_buffers(priv);
  1128. if (err && !ignore_alloc_err)
  1129. return err;
  1130. /* Reinit and restart HW */
  1131. ftgmac100_init_hw(priv);
  1132. ftgmac100_config_pause(priv);
  1133. ftgmac100_start_hw(priv);
  1134. /* Re-enable the device */
  1135. napi_enable(&priv->napi);
  1136. netif_start_queue(priv->netdev);
  1137. /* Enable all interrupts */
  1138. iowrite32(FTGMAC100_INT_ALL, priv->base + FTGMAC100_OFFSET_IER);
  1139. return err;
  1140. }
  1141. static void ftgmac100_reset_task(struct work_struct *work)
  1142. {
  1143. struct ftgmac100 *priv = container_of(work, struct ftgmac100,
  1144. reset_task);
  1145. struct net_device *netdev = priv->netdev;
  1146. int err;
  1147. netdev_dbg(netdev, "Resetting NIC...\n");
  1148. /* Lock the world */
  1149. rtnl_lock();
  1150. if (netdev->phydev)
  1151. mutex_lock(&netdev->phydev->lock);
  1152. if (priv->mii_bus)
  1153. mutex_lock(&priv->mii_bus->mdio_lock);
  1154. /* Check if the interface is still up */
  1155. if (!netif_running(netdev))
  1156. goto bail;
  1157. /* Stop the network stack */
  1158. netif_trans_update(netdev);
  1159. napi_disable(&priv->napi);
  1160. netif_tx_disable(netdev);
  1161. /* Stop and reset the MAC */
  1162. ftgmac100_stop_hw(priv);
  1163. err = ftgmac100_reset_and_config_mac(priv);
  1164. if (err) {
  1165. /* Not much we can do ... it might come back... */
  1166. netdev_err(netdev, "attempting to continue...\n");
  1167. }
  1168. /* Free all rx and tx buffers */
  1169. ftgmac100_free_buffers(priv);
  1170. /* Setup everything again and restart chip */
  1171. ftgmac100_init_all(priv, true);
  1172. netdev_dbg(netdev, "Reset done !\n");
  1173. bail:
  1174. if (priv->mii_bus)
  1175. mutex_unlock(&priv->mii_bus->mdio_lock);
  1176. if (netdev->phydev)
  1177. mutex_unlock(&netdev->phydev->lock);
  1178. rtnl_unlock();
  1179. }
  1180. static int ftgmac100_open(struct net_device *netdev)
  1181. {
  1182. struct ftgmac100 *priv = netdev_priv(netdev);
  1183. int err;
  1184. /* Allocate ring buffers */
  1185. err = ftgmac100_alloc_rings(priv);
  1186. if (err) {
  1187. netdev_err(netdev, "Failed to allocate descriptors\n");
  1188. return err;
  1189. }
  1190. /* When using NC-SI we force the speed to 100Mbit/s full duplex,
  1191. *
  1192. * Otherwise we leave it set to 0 (no link), the link
  1193. * message from the PHY layer will handle setting it up to
  1194. * something else if needed.
  1195. */
  1196. if (priv->use_ncsi) {
  1197. priv->cur_duplex = DUPLEX_FULL;
  1198. priv->cur_speed = SPEED_100;
  1199. } else {
  1200. priv->cur_duplex = 0;
  1201. priv->cur_speed = 0;
  1202. }
  1203. /* Reset the hardware */
  1204. err = ftgmac100_reset_and_config_mac(priv);
  1205. if (err)
  1206. goto err_hw;
  1207. /* Initialize NAPI */
  1208. netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64);
  1209. /* Grab our interrupt */
  1210. err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
  1211. if (err) {
  1212. netdev_err(netdev, "failed to request irq %d\n", netdev->irq);
  1213. goto err_irq;
  1214. }
  1215. /* Start things up */
  1216. err = ftgmac100_init_all(priv, false);
  1217. if (err) {
  1218. netdev_err(netdev, "Failed to allocate packet buffers\n");
  1219. goto err_alloc;
  1220. }
  1221. if (netdev->phydev) {
  1222. /* If we have a PHY, start polling */
  1223. phy_start(netdev->phydev);
  1224. } else if (priv->use_ncsi) {
  1225. /* If using NC-SI, set our carrier on and start the stack */
  1226. netif_carrier_on(netdev);
  1227. /* Start the NCSI device */
  1228. err = ncsi_start_dev(priv->ndev);
  1229. if (err)
  1230. goto err_ncsi;
  1231. }
  1232. return 0;
  1233. err_ncsi:
  1234. napi_disable(&priv->napi);
  1235. netif_stop_queue(netdev);
  1236. err_alloc:
  1237. ftgmac100_free_buffers(priv);
  1238. free_irq(netdev->irq, netdev);
  1239. err_irq:
  1240. netif_napi_del(&priv->napi);
  1241. err_hw:
  1242. iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
  1243. ftgmac100_free_rings(priv);
  1244. return err;
  1245. }
  1246. static int ftgmac100_stop(struct net_device *netdev)
  1247. {
  1248. struct ftgmac100 *priv = netdev_priv(netdev);
  1249. /* Note about the reset task: We are called with the rtnl lock
  1250. * held, so we are synchronized against the core of the reset
  1251. * task. We must not try to synchronously cancel it otherwise
  1252. * we can deadlock. But since it will test for netif_running()
  1253. * which has already been cleared by the net core, we don't
  1254. * anything special to do.
  1255. */
  1256. /* disable all interrupts */
  1257. iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
  1258. netif_stop_queue(netdev);
  1259. napi_disable(&priv->napi);
  1260. netif_napi_del(&priv->napi);
  1261. if (netdev->phydev)
  1262. phy_stop(netdev->phydev);
  1263. else if (priv->use_ncsi)
  1264. ncsi_stop_dev(priv->ndev);
  1265. ftgmac100_stop_hw(priv);
  1266. free_irq(netdev->irq, netdev);
  1267. ftgmac100_free_buffers(priv);
  1268. ftgmac100_free_rings(priv);
  1269. return 0;
  1270. }
  1271. /* optional */
  1272. static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  1273. {
  1274. if (!netdev->phydev)
  1275. return -ENXIO;
  1276. return phy_mii_ioctl(netdev->phydev, ifr, cmd);
  1277. }
  1278. static void ftgmac100_tx_timeout(struct net_device *netdev)
  1279. {
  1280. struct ftgmac100 *priv = netdev_priv(netdev);
  1281. /* Disable all interrupts */
  1282. iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
  1283. /* Do the reset outside of interrupt context */
  1284. schedule_work(&priv->reset_task);
  1285. }
  1286. static int ftgmac100_set_features(struct net_device *netdev,
  1287. netdev_features_t features)
  1288. {
  1289. struct ftgmac100 *priv = netdev_priv(netdev);
  1290. netdev_features_t changed = netdev->features ^ features;
  1291. if (!netif_running(netdev))
  1292. return 0;
  1293. /* Update the vlan filtering bit */
  1294. if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
  1295. u32 maccr;
  1296. maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
  1297. if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
  1298. maccr |= FTGMAC100_MACCR_RM_VLAN;
  1299. else
  1300. maccr &= ~FTGMAC100_MACCR_RM_VLAN;
  1301. iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
  1302. }
  1303. return 0;
  1304. }
  1305. #ifdef CONFIG_NET_POLL_CONTROLLER
  1306. static void ftgmac100_poll_controller(struct net_device *netdev)
  1307. {
  1308. unsigned long flags;
  1309. local_irq_save(flags);
  1310. ftgmac100_interrupt(netdev->irq, netdev);
  1311. local_irq_restore(flags);
  1312. }
  1313. #endif
  1314. static const struct net_device_ops ftgmac100_netdev_ops = {
  1315. .ndo_open = ftgmac100_open,
  1316. .ndo_stop = ftgmac100_stop,
  1317. .ndo_start_xmit = ftgmac100_hard_start_xmit,
  1318. .ndo_set_mac_address = ftgmac100_set_mac_addr,
  1319. .ndo_validate_addr = eth_validate_addr,
  1320. .ndo_do_ioctl = ftgmac100_do_ioctl,
  1321. .ndo_tx_timeout = ftgmac100_tx_timeout,
  1322. .ndo_set_rx_mode = ftgmac100_set_rx_mode,
  1323. .ndo_set_features = ftgmac100_set_features,
  1324. #ifdef CONFIG_NET_POLL_CONTROLLER
  1325. .ndo_poll_controller = ftgmac100_poll_controller,
  1326. #endif
  1327. .ndo_vlan_rx_add_vid = ncsi_vlan_rx_add_vid,
  1328. .ndo_vlan_rx_kill_vid = ncsi_vlan_rx_kill_vid,
  1329. };
  1330. static int ftgmac100_setup_mdio(struct net_device *netdev)
  1331. {
  1332. struct ftgmac100 *priv = netdev_priv(netdev);
  1333. struct platform_device *pdev = to_platform_device(priv->dev);
  1334. int phy_intf = PHY_INTERFACE_MODE_RGMII;
  1335. struct device_node *np = pdev->dev.of_node;
  1336. int i, err = 0;
  1337. u32 reg;
  1338. /* initialize mdio bus */
  1339. priv->mii_bus = mdiobus_alloc();
  1340. if (!priv->mii_bus)
  1341. return -EIO;
  1342. if (priv->is_aspeed) {
  1343. /* This driver supports the old MDIO interface */
  1344. reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR);
  1345. reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE;
  1346. iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR);
  1347. };
  1348. /* Get PHY mode from device-tree */
  1349. if (np) {
  1350. /* Default to RGMII. It's a gigabit part after all */
  1351. phy_intf = of_get_phy_mode(np);
  1352. if (phy_intf < 0)
  1353. phy_intf = PHY_INTERFACE_MODE_RGMII;
  1354. /* Aspeed only supports these. I don't know about other IP
  1355. * block vendors so I'm going to just let them through for
  1356. * now. Note that this is only a warning if for some obscure
  1357. * reason the DT really means to lie about it or it's a newer
  1358. * part we don't know about.
  1359. *
  1360. * On the Aspeed SoC there are additionally straps and SCU
  1361. * control bits that could tell us what the interface is
  1362. * (or allow us to configure it while the IP block is held
  1363. * in reset). For now I chose to keep this driver away from
  1364. * those SoC specific bits and assume the device-tree is
  1365. * right and the SCU has been configured properly by pinmux
  1366. * or the firmware.
  1367. */
  1368. if (priv->is_aspeed &&
  1369. phy_intf != PHY_INTERFACE_MODE_RMII &&
  1370. phy_intf != PHY_INTERFACE_MODE_RGMII &&
  1371. phy_intf != PHY_INTERFACE_MODE_RGMII_ID &&
  1372. phy_intf != PHY_INTERFACE_MODE_RGMII_RXID &&
  1373. phy_intf != PHY_INTERFACE_MODE_RGMII_TXID) {
  1374. netdev_warn(netdev,
  1375. "Unsupported PHY mode %s !\n",
  1376. phy_modes(phy_intf));
  1377. }
  1378. }
  1379. priv->mii_bus->name = "ftgmac100_mdio";
  1380. snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
  1381. pdev->name, pdev->id);
  1382. priv->mii_bus->parent = priv->dev;
  1383. priv->mii_bus->priv = priv->netdev;
  1384. priv->mii_bus->read = ftgmac100_mdiobus_read;
  1385. priv->mii_bus->write = ftgmac100_mdiobus_write;
  1386. for (i = 0; i < PHY_MAX_ADDR; i++)
  1387. priv->mii_bus->irq[i] = PHY_POLL;
  1388. err = mdiobus_register(priv->mii_bus);
  1389. if (err) {
  1390. dev_err(priv->dev, "Cannot register MDIO bus!\n");
  1391. goto err_register_mdiobus;
  1392. }
  1393. err = ftgmac100_mii_probe(priv, phy_intf);
  1394. if (err) {
  1395. dev_err(priv->dev, "MII Probe failed!\n");
  1396. goto err_mii_probe;
  1397. }
  1398. return 0;
  1399. err_mii_probe:
  1400. mdiobus_unregister(priv->mii_bus);
  1401. err_register_mdiobus:
  1402. mdiobus_free(priv->mii_bus);
  1403. return err;
  1404. }
  1405. static void ftgmac100_destroy_mdio(struct net_device *netdev)
  1406. {
  1407. struct ftgmac100 *priv = netdev_priv(netdev);
  1408. if (!netdev->phydev)
  1409. return;
  1410. phy_disconnect(netdev->phydev);
  1411. mdiobus_unregister(priv->mii_bus);
  1412. mdiobus_free(priv->mii_bus);
  1413. }
  1414. static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
  1415. {
  1416. if (unlikely(nd->state != ncsi_dev_state_functional))
  1417. return;
  1418. netdev_info(nd->dev, "NCSI interface %s\n",
  1419. nd->link_up ? "up" : "down");
  1420. }
  1421. static int ftgmac100_probe(struct platform_device *pdev)
  1422. {
  1423. struct resource *res;
  1424. int irq;
  1425. struct net_device *netdev;
  1426. struct ftgmac100 *priv;
  1427. struct device_node *np;
  1428. int err = 0;
  1429. if (!pdev)
  1430. return -ENODEV;
  1431. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1432. if (!res)
  1433. return -ENXIO;
  1434. irq = platform_get_irq(pdev, 0);
  1435. if (irq < 0)
  1436. return irq;
  1437. /* setup net_device */
  1438. netdev = alloc_etherdev(sizeof(*priv));
  1439. if (!netdev) {
  1440. err = -ENOMEM;
  1441. goto err_alloc_etherdev;
  1442. }
  1443. SET_NETDEV_DEV(netdev, &pdev->dev);
  1444. netdev->ethtool_ops = &ftgmac100_ethtool_ops;
  1445. netdev->netdev_ops = &ftgmac100_netdev_ops;
  1446. netdev->watchdog_timeo = 5 * HZ;
  1447. platform_set_drvdata(pdev, netdev);
  1448. /* setup private data */
  1449. priv = netdev_priv(netdev);
  1450. priv->netdev = netdev;
  1451. priv->dev = &pdev->dev;
  1452. INIT_WORK(&priv->reset_task, ftgmac100_reset_task);
  1453. /* map io memory */
  1454. priv->res = request_mem_region(res->start, resource_size(res),
  1455. dev_name(&pdev->dev));
  1456. if (!priv->res) {
  1457. dev_err(&pdev->dev, "Could not reserve memory region\n");
  1458. err = -ENOMEM;
  1459. goto err_req_mem;
  1460. }
  1461. priv->base = ioremap(res->start, resource_size(res));
  1462. if (!priv->base) {
  1463. dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
  1464. err = -EIO;
  1465. goto err_ioremap;
  1466. }
  1467. netdev->irq = irq;
  1468. /* Enable pause */
  1469. priv->tx_pause = true;
  1470. priv->rx_pause = true;
  1471. priv->aneg_pause = true;
  1472. /* MAC address from chip or random one */
  1473. ftgmac100_initial_mac(priv);
  1474. np = pdev->dev.of_node;
  1475. if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
  1476. of_device_is_compatible(np, "aspeed,ast2500-mac"))) {
  1477. priv->rxdes0_edorr_mask = BIT(30);
  1478. priv->txdes0_edotr_mask = BIT(30);
  1479. priv->is_aspeed = true;
  1480. } else {
  1481. priv->rxdes0_edorr_mask = BIT(15);
  1482. priv->txdes0_edotr_mask = BIT(15);
  1483. }
  1484. if (np && of_get_property(np, "use-ncsi", NULL)) {
  1485. if (!IS_ENABLED(CONFIG_NET_NCSI)) {
  1486. dev_err(&pdev->dev, "NCSI stack not enabled\n");
  1487. goto err_ncsi_dev;
  1488. }
  1489. dev_info(&pdev->dev, "Using NCSI interface\n");
  1490. priv->use_ncsi = true;
  1491. priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
  1492. if (!priv->ndev)
  1493. goto err_ncsi_dev;
  1494. } else {
  1495. priv->use_ncsi = false;
  1496. err = ftgmac100_setup_mdio(netdev);
  1497. if (err)
  1498. goto err_setup_mdio;
  1499. }
  1500. /* Default ring sizes */
  1501. priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES;
  1502. priv->tx_q_entries = priv->new_tx_q_entries = DEF_TX_QUEUE_ENTRIES;
  1503. /* Base feature set */
  1504. netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
  1505. NETIF_F_GRO | NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX |
  1506. NETIF_F_HW_VLAN_CTAG_TX;
  1507. if (priv->use_ncsi)
  1508. netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
  1509. /* AST2400 doesn't have working HW checksum generation */
  1510. if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
  1511. netdev->hw_features &= ~NETIF_F_HW_CSUM;
  1512. if (np && of_get_property(np, "no-hw-checksum", NULL))
  1513. netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
  1514. netdev->features |= netdev->hw_features;
  1515. /* register network device */
  1516. err = register_netdev(netdev);
  1517. if (err) {
  1518. dev_err(&pdev->dev, "Failed to register netdev\n");
  1519. goto err_register_netdev;
  1520. }
  1521. netdev_info(netdev, "irq %d, mapped at %p\n", netdev->irq, priv->base);
  1522. return 0;
  1523. err_ncsi_dev:
  1524. err_register_netdev:
  1525. ftgmac100_destroy_mdio(netdev);
  1526. err_setup_mdio:
  1527. iounmap(priv->base);
  1528. err_ioremap:
  1529. release_resource(priv->res);
  1530. err_req_mem:
  1531. free_netdev(netdev);
  1532. err_alloc_etherdev:
  1533. return err;
  1534. }
  1535. static int ftgmac100_remove(struct platform_device *pdev)
  1536. {
  1537. struct net_device *netdev;
  1538. struct ftgmac100 *priv;
  1539. netdev = platform_get_drvdata(pdev);
  1540. priv = netdev_priv(netdev);
  1541. unregister_netdev(netdev);
  1542. /* There's a small chance the reset task will have been re-queued,
  1543. * during stop, make sure it's gone before we free the structure.
  1544. */
  1545. cancel_work_sync(&priv->reset_task);
  1546. ftgmac100_destroy_mdio(netdev);
  1547. iounmap(priv->base);
  1548. release_resource(priv->res);
  1549. netif_napi_del(&priv->napi);
  1550. free_netdev(netdev);
  1551. return 0;
  1552. }
  1553. static const struct of_device_id ftgmac100_of_match[] = {
  1554. { .compatible = "faraday,ftgmac100" },
  1555. { }
  1556. };
  1557. MODULE_DEVICE_TABLE(of, ftgmac100_of_match);
  1558. static struct platform_driver ftgmac100_driver = {
  1559. .probe = ftgmac100_probe,
  1560. .remove = ftgmac100_remove,
  1561. .driver = {
  1562. .name = DRV_NAME,
  1563. .of_match_table = ftgmac100_of_match,
  1564. },
  1565. };
  1566. module_platform_driver(ftgmac100_driver);
  1567. MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
  1568. MODULE_DESCRIPTION("FTGMAC100 driver");
  1569. MODULE_LICENSE("GPL");