ftgmac100.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914
  1. /*
  2. * Faraday FTGMAC100 Gigabit Ethernet
  3. *
  4. * (C) Copyright 2009-2011 Faraday Technology
  5. * Po-Yu Chuang <ratbert@faraday-tech.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  20. */
  21. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  22. #include <linux/dma-mapping.h>
  23. #include <linux/etherdevice.h>
  24. #include <linux/ethtool.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/io.h>
  27. #include <linux/module.h>
  28. #include <linux/netdevice.h>
  29. #include <linux/of.h>
  30. #include <linux/phy.h>
  31. #include <linux/platform_device.h>
  32. #include <linux/property.h>
  33. #include <linux/crc32.h>
  34. #include <linux/if_vlan.h>
  35. #include <linux/of_net.h>
  36. #include <net/ip.h>
  37. #include <net/ncsi.h>
  38. #include "ftgmac100.h"
  39. #define DRV_NAME "ftgmac100"
  40. #define DRV_VERSION "0.7"
  41. /* Arbitrary values, I am not sure the HW has limits */
  42. #define MAX_RX_QUEUE_ENTRIES 1024
  43. #define MAX_TX_QUEUE_ENTRIES 1024
  44. #define MIN_RX_QUEUE_ENTRIES 32
  45. #define MIN_TX_QUEUE_ENTRIES 32
  46. /* Defaults */
  47. #define DEF_RX_QUEUE_ENTRIES 128
  48. #define DEF_TX_QUEUE_ENTRIES 128
  49. #define MAX_PKT_SIZE 1536
  50. #define RX_BUF_SIZE MAX_PKT_SIZE /* must be smaller than 0x3fff */
  51. /* Min number of tx ring entries before stopping queue */
  52. #define TX_THRESHOLD (MAX_SKB_FRAGS + 1)
  53. struct ftgmac100 {
  54. /* Registers */
  55. struct resource *res;
  56. void __iomem *base;
  57. /* Rx ring */
  58. unsigned int rx_q_entries;
  59. struct ftgmac100_rxdes *rxdes;
  60. dma_addr_t rxdes_dma;
  61. struct sk_buff **rx_skbs;
  62. unsigned int rx_pointer;
  63. u32 rxdes0_edorr_mask;
  64. /* Tx ring */
  65. unsigned int tx_q_entries;
  66. struct ftgmac100_txdes *txdes;
  67. dma_addr_t txdes_dma;
  68. struct sk_buff **tx_skbs;
  69. unsigned int tx_clean_pointer;
  70. unsigned int tx_pointer;
  71. u32 txdes0_edotr_mask;
  72. /* Used to signal the reset task of ring change request */
  73. unsigned int new_rx_q_entries;
  74. unsigned int new_tx_q_entries;
  75. /* Scratch page to use when rx skb alloc fails */
  76. void *rx_scratch;
  77. dma_addr_t rx_scratch_dma;
  78. /* Component structures */
  79. struct net_device *netdev;
  80. struct device *dev;
  81. struct ncsi_dev *ndev;
  82. struct napi_struct napi;
  83. struct work_struct reset_task;
  84. struct mii_bus *mii_bus;
  85. /* Link management */
  86. int cur_speed;
  87. int cur_duplex;
  88. bool use_ncsi;
  89. /* Multicast filter settings */
  90. u32 maht0;
  91. u32 maht1;
  92. /* Flow control settings */
  93. bool tx_pause;
  94. bool rx_pause;
  95. bool aneg_pause;
  96. /* Misc */
  97. bool need_mac_restart;
  98. bool is_aspeed;
  99. };
  100. static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr)
  101. {
  102. struct net_device *netdev = priv->netdev;
  103. int i;
  104. /* NOTE: reset clears all registers */
  105. iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
  106. iowrite32(maccr | FTGMAC100_MACCR_SW_RST,
  107. priv->base + FTGMAC100_OFFSET_MACCR);
  108. for (i = 0; i < 50; i++) {
  109. unsigned int maccr;
  110. maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
  111. if (!(maccr & FTGMAC100_MACCR_SW_RST))
  112. return 0;
  113. udelay(1);
  114. }
  115. netdev_err(netdev, "Hardware reset failed\n");
  116. return -EIO;
  117. }
  118. static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv)
  119. {
  120. u32 maccr = 0;
  121. switch (priv->cur_speed) {
  122. case SPEED_10:
  123. case 0: /* no link */
  124. break;
  125. case SPEED_100:
  126. maccr |= FTGMAC100_MACCR_FAST_MODE;
  127. break;
  128. case SPEED_1000:
  129. maccr |= FTGMAC100_MACCR_GIGA_MODE;
  130. break;
  131. default:
  132. netdev_err(priv->netdev, "Unknown speed %d !\n",
  133. priv->cur_speed);
  134. break;
  135. }
  136. /* (Re)initialize the queue pointers */
  137. priv->rx_pointer = 0;
  138. priv->tx_clean_pointer = 0;
  139. priv->tx_pointer = 0;
  140. /* The doc says reset twice with 10us interval */
  141. if (ftgmac100_reset_mac(priv, maccr))
  142. return -EIO;
  143. usleep_range(10, 1000);
  144. return ftgmac100_reset_mac(priv, maccr);
  145. }
  146. static void ftgmac100_write_mac_addr(struct ftgmac100 *priv, const u8 *mac)
  147. {
  148. unsigned int maddr = mac[0] << 8 | mac[1];
  149. unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
  150. iowrite32(maddr, priv->base + FTGMAC100_OFFSET_MAC_MADR);
  151. iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR);
  152. }
  153. static void ftgmac100_initial_mac(struct ftgmac100 *priv)
  154. {
  155. u8 mac[ETH_ALEN];
  156. unsigned int m;
  157. unsigned int l;
  158. void *addr;
  159. addr = device_get_mac_address(priv->dev, mac, ETH_ALEN);
  160. if (addr) {
  161. ether_addr_copy(priv->netdev->dev_addr, mac);
  162. dev_info(priv->dev, "Read MAC address %pM from device tree\n",
  163. mac);
  164. return;
  165. }
  166. m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR);
  167. l = ioread32(priv->base + FTGMAC100_OFFSET_MAC_LADR);
  168. mac[0] = (m >> 8) & 0xff;
  169. mac[1] = m & 0xff;
  170. mac[2] = (l >> 24) & 0xff;
  171. mac[3] = (l >> 16) & 0xff;
  172. mac[4] = (l >> 8) & 0xff;
  173. mac[5] = l & 0xff;
  174. if (is_valid_ether_addr(mac)) {
  175. ether_addr_copy(priv->netdev->dev_addr, mac);
  176. dev_info(priv->dev, "Read MAC address %pM from chip\n", mac);
  177. } else {
  178. eth_hw_addr_random(priv->netdev);
  179. dev_info(priv->dev, "Generated random MAC address %pM\n",
  180. priv->netdev->dev_addr);
  181. }
  182. }
  183. static int ftgmac100_set_mac_addr(struct net_device *dev, void *p)
  184. {
  185. int ret;
  186. ret = eth_prepare_mac_addr_change(dev, p);
  187. if (ret < 0)
  188. return ret;
  189. eth_commit_mac_addr_change(dev, p);
  190. ftgmac100_write_mac_addr(netdev_priv(dev), dev->dev_addr);
  191. return 0;
  192. }
  193. static void ftgmac100_config_pause(struct ftgmac100 *priv)
  194. {
  195. u32 fcr = FTGMAC100_FCR_PAUSE_TIME(16);
  196. /* Throttle tx queue when receiving pause frames */
  197. if (priv->rx_pause)
  198. fcr |= FTGMAC100_FCR_FC_EN;
  199. /* Enables sending pause frames when the RX queue is past a
  200. * certain threshold.
  201. */
  202. if (priv->tx_pause)
  203. fcr |= FTGMAC100_FCR_FCTHR_EN;
  204. iowrite32(fcr, priv->base + FTGMAC100_OFFSET_FCR);
  205. }
  206. static void ftgmac100_init_hw(struct ftgmac100 *priv)
  207. {
  208. u32 reg, rfifo_sz, tfifo_sz;
  209. /* Clear stale interrupts */
  210. reg = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
  211. iowrite32(reg, priv->base + FTGMAC100_OFFSET_ISR);
  212. /* Setup RX ring buffer base */
  213. iowrite32(priv->rxdes_dma, priv->base + FTGMAC100_OFFSET_RXR_BADR);
  214. /* Setup TX ring buffer base */
  215. iowrite32(priv->txdes_dma, priv->base + FTGMAC100_OFFSET_NPTXR_BADR);
  216. /* Configure RX buffer size */
  217. iowrite32(FTGMAC100_RBSR_SIZE(RX_BUF_SIZE),
  218. priv->base + FTGMAC100_OFFSET_RBSR);
  219. /* Set RX descriptor autopoll */
  220. iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1),
  221. priv->base + FTGMAC100_OFFSET_APTC);
  222. /* Write MAC address */
  223. ftgmac100_write_mac_addr(priv, priv->netdev->dev_addr);
  224. /* Write multicast filter */
  225. iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0);
  226. iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1);
  227. /* Configure descriptor sizes and increase burst sizes according
  228. * to values in Aspeed SDK. The FIFO arbitration is enabled and
  229. * the thresholds set based on the recommended values in the
  230. * AST2400 specification.
  231. */
  232. iowrite32(FTGMAC100_DBLAC_RXDES_SIZE(2) | /* 2*8 bytes RX descs */
  233. FTGMAC100_DBLAC_TXDES_SIZE(2) | /* 2*8 bytes TX descs */
  234. FTGMAC100_DBLAC_RXBURST_SIZE(3) | /* 512 bytes max RX bursts */
  235. FTGMAC100_DBLAC_TXBURST_SIZE(3) | /* 512 bytes max TX bursts */
  236. FTGMAC100_DBLAC_RX_THR_EN | /* Enable fifo threshold arb */
  237. FTGMAC100_DBLAC_RXFIFO_HTHR(6) | /* 6/8 of FIFO high threshold */
  238. FTGMAC100_DBLAC_RXFIFO_LTHR(2), /* 2/8 of FIFO low threshold */
  239. priv->base + FTGMAC100_OFFSET_DBLAC);
  240. /* Interrupt mitigation configured for 1 interrupt/packet. HW interrupt
  241. * mitigation doesn't seem to provide any benefit with NAPI so leave
  242. * it at that.
  243. */
  244. iowrite32(FTGMAC100_ITC_RXINT_THR(1) |
  245. FTGMAC100_ITC_TXINT_THR(1),
  246. priv->base + FTGMAC100_OFFSET_ITC);
  247. /* Configure FIFO sizes in the TPAFCR register */
  248. reg = ioread32(priv->base + FTGMAC100_OFFSET_FEAR);
  249. rfifo_sz = reg & 0x00000007;
  250. tfifo_sz = (reg >> 3) & 0x00000007;
  251. reg = ioread32(priv->base + FTGMAC100_OFFSET_TPAFCR);
  252. reg &= ~0x3f000000;
  253. reg |= (tfifo_sz << 27);
  254. reg |= (rfifo_sz << 24);
  255. iowrite32(reg, priv->base + FTGMAC100_OFFSET_TPAFCR);
  256. }
  257. static void ftgmac100_start_hw(struct ftgmac100 *priv)
  258. {
  259. u32 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
  260. /* Keep the original GMAC and FAST bits */
  261. maccr &= (FTGMAC100_MACCR_FAST_MODE | FTGMAC100_MACCR_GIGA_MODE);
  262. /* Add all the main enable bits */
  263. maccr |= FTGMAC100_MACCR_TXDMA_EN |
  264. FTGMAC100_MACCR_RXDMA_EN |
  265. FTGMAC100_MACCR_TXMAC_EN |
  266. FTGMAC100_MACCR_RXMAC_EN |
  267. FTGMAC100_MACCR_CRC_APD |
  268. FTGMAC100_MACCR_PHY_LINK_LEVEL |
  269. FTGMAC100_MACCR_RX_RUNT |
  270. FTGMAC100_MACCR_RX_BROADPKT;
  271. /* Add other bits as needed */
  272. if (priv->cur_duplex == DUPLEX_FULL)
  273. maccr |= FTGMAC100_MACCR_FULLDUP;
  274. if (priv->netdev->flags & IFF_PROMISC)
  275. maccr |= FTGMAC100_MACCR_RX_ALL;
  276. if (priv->netdev->flags & IFF_ALLMULTI)
  277. maccr |= FTGMAC100_MACCR_RX_MULTIPKT;
  278. else if (netdev_mc_count(priv->netdev))
  279. maccr |= FTGMAC100_MACCR_HT_MULTI_EN;
  280. /* Vlan filtering enabled */
  281. if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
  282. maccr |= FTGMAC100_MACCR_RM_VLAN;
  283. /* Hit the HW */
  284. iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
  285. }
  286. static void ftgmac100_stop_hw(struct ftgmac100 *priv)
  287. {
  288. iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR);
  289. }
  290. static void ftgmac100_calc_mc_hash(struct ftgmac100 *priv)
  291. {
  292. struct netdev_hw_addr *ha;
  293. priv->maht1 = 0;
  294. priv->maht0 = 0;
  295. netdev_for_each_mc_addr(ha, priv->netdev) {
  296. u32 crc_val = ether_crc_le(ETH_ALEN, ha->addr);
  297. crc_val = (~(crc_val >> 2)) & 0x3f;
  298. if (crc_val >= 32)
  299. priv->maht1 |= 1ul << (crc_val - 32);
  300. else
  301. priv->maht0 |= 1ul << (crc_val);
  302. }
  303. }
  304. static void ftgmac100_set_rx_mode(struct net_device *netdev)
  305. {
  306. struct ftgmac100 *priv = netdev_priv(netdev);
  307. /* Setup the hash filter */
  308. ftgmac100_calc_mc_hash(priv);
  309. /* Interface down ? that's all there is to do */
  310. if (!netif_running(netdev))
  311. return;
  312. /* Update the HW */
  313. iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0);
  314. iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1);
  315. /* Reconfigure MACCR */
  316. ftgmac100_start_hw(priv);
  317. }
  318. static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
  319. struct ftgmac100_rxdes *rxdes, gfp_t gfp)
  320. {
  321. struct net_device *netdev = priv->netdev;
  322. struct sk_buff *skb;
  323. dma_addr_t map;
  324. int err;
  325. skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE);
  326. if (unlikely(!skb)) {
  327. if (net_ratelimit())
  328. netdev_warn(netdev, "failed to allocate rx skb\n");
  329. err = -ENOMEM;
  330. map = priv->rx_scratch_dma;
  331. } else {
  332. map = dma_map_single(priv->dev, skb->data, RX_BUF_SIZE,
  333. DMA_FROM_DEVICE);
  334. if (unlikely(dma_mapping_error(priv->dev, map))) {
  335. if (net_ratelimit())
  336. netdev_err(netdev, "failed to map rx page\n");
  337. dev_kfree_skb_any(skb);
  338. map = priv->rx_scratch_dma;
  339. skb = NULL;
  340. err = -ENOMEM;
  341. }
  342. }
  343. /* Store skb */
  344. priv->rx_skbs[entry] = skb;
  345. /* Store DMA address into RX desc */
  346. rxdes->rxdes3 = cpu_to_le32(map);
  347. /* Ensure the above is ordered vs clearing the OWN bit */
  348. dma_wmb();
  349. /* Clean status (which resets own bit) */
  350. if (entry == (priv->rx_q_entries - 1))
  351. rxdes->rxdes0 = cpu_to_le32(priv->rxdes0_edorr_mask);
  352. else
  353. rxdes->rxdes0 = 0;
  354. return 0;
  355. }
  356. static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv,
  357. unsigned int pointer)
  358. {
  359. return (pointer + 1) & (priv->rx_q_entries - 1);
  360. }
  361. static void ftgmac100_rx_packet_error(struct ftgmac100 *priv, u32 status)
  362. {
  363. struct net_device *netdev = priv->netdev;
  364. if (status & FTGMAC100_RXDES0_RX_ERR)
  365. netdev->stats.rx_errors++;
  366. if (status & FTGMAC100_RXDES0_CRC_ERR)
  367. netdev->stats.rx_crc_errors++;
  368. if (status & (FTGMAC100_RXDES0_FTL |
  369. FTGMAC100_RXDES0_RUNT |
  370. FTGMAC100_RXDES0_RX_ODD_NB))
  371. netdev->stats.rx_length_errors++;
  372. }
  373. static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
  374. {
  375. struct net_device *netdev = priv->netdev;
  376. struct ftgmac100_rxdes *rxdes;
  377. struct sk_buff *skb;
  378. unsigned int pointer, size;
  379. u32 status, csum_vlan;
  380. dma_addr_t map;
  381. /* Grab next RX descriptor */
  382. pointer = priv->rx_pointer;
  383. rxdes = &priv->rxdes[pointer];
  384. /* Grab descriptor status */
  385. status = le32_to_cpu(rxdes->rxdes0);
  386. /* Do we have a packet ? */
  387. if (!(status & FTGMAC100_RXDES0_RXPKT_RDY))
  388. return false;
  389. /* Order subsequent reads with the test for the ready bit */
  390. dma_rmb();
  391. /* We don't cope with fragmented RX packets */
  392. if (unlikely(!(status & FTGMAC100_RXDES0_FRS) ||
  393. !(status & FTGMAC100_RXDES0_LRS)))
  394. goto drop;
  395. /* Grab received size and csum vlan field in the descriptor */
  396. size = status & FTGMAC100_RXDES0_VDBC;
  397. csum_vlan = le32_to_cpu(rxdes->rxdes1);
  398. /* Any error (other than csum offload) flagged ? */
  399. if (unlikely(status & RXDES0_ANY_ERROR)) {
  400. /* Correct for incorrect flagging of runt packets
  401. * with vlan tags... Just accept a runt packet that
  402. * has been flagged as vlan and whose size is at
  403. * least 60 bytes.
  404. */
  405. if ((status & FTGMAC100_RXDES0_RUNT) &&
  406. (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL) &&
  407. (size >= 60))
  408. status &= ~FTGMAC100_RXDES0_RUNT;
  409. /* Any error still in there ? */
  410. if (status & RXDES0_ANY_ERROR) {
  411. ftgmac100_rx_packet_error(priv, status);
  412. goto drop;
  413. }
  414. }
  415. /* If the packet had no skb (failed to allocate earlier)
  416. * then try to allocate one and skip
  417. */
  418. skb = priv->rx_skbs[pointer];
  419. if (!unlikely(skb)) {
  420. ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC);
  421. goto drop;
  422. }
  423. if (unlikely(status & FTGMAC100_RXDES0_MULTICAST))
  424. netdev->stats.multicast++;
  425. /* If the HW found checksum errors, bounce it to software.
  426. *
  427. * If we didn't, we need to see if the packet was recognized
  428. * by HW as one of the supported checksummed protocols before
  429. * we accept the HW test results.
  430. */
  431. if (netdev->features & NETIF_F_RXCSUM) {
  432. u32 err_bits = FTGMAC100_RXDES1_TCP_CHKSUM_ERR |
  433. FTGMAC100_RXDES1_UDP_CHKSUM_ERR |
  434. FTGMAC100_RXDES1_IP_CHKSUM_ERR;
  435. if ((csum_vlan & err_bits) ||
  436. !(csum_vlan & FTGMAC100_RXDES1_PROT_MASK))
  437. skb->ip_summed = CHECKSUM_NONE;
  438. else
  439. skb->ip_summed = CHECKSUM_UNNECESSARY;
  440. }
  441. /* Transfer received size to skb */
  442. skb_put(skb, size);
  443. /* Extract vlan tag */
  444. if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
  445. (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL))
  446. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  447. csum_vlan & 0xffff);
  448. /* Tear down DMA mapping, do necessary cache management */
  449. map = le32_to_cpu(rxdes->rxdes3);
  450. #if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU)
  451. /* When we don't have an iommu, we can save cycles by not
  452. * invalidating the cache for the part of the packet that
  453. * wasn't received.
  454. */
  455. dma_unmap_single(priv->dev, map, size, DMA_FROM_DEVICE);
  456. #else
  457. dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
  458. #endif
  459. /* Resplenish rx ring */
  460. ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC);
  461. priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
  462. skb->protocol = eth_type_trans(skb, netdev);
  463. netdev->stats.rx_packets++;
  464. netdev->stats.rx_bytes += size;
  465. /* push packet to protocol stack */
  466. if (skb->ip_summed == CHECKSUM_NONE)
  467. netif_receive_skb(skb);
  468. else
  469. napi_gro_receive(&priv->napi, skb);
  470. (*processed)++;
  471. return true;
  472. drop:
  473. /* Clean rxdes0 (which resets own bit) */
  474. rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask);
  475. priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
  476. netdev->stats.rx_dropped++;
  477. return true;
  478. }
  479. static u32 ftgmac100_base_tx_ctlstat(struct ftgmac100 *priv,
  480. unsigned int index)
  481. {
  482. if (index == (priv->tx_q_entries - 1))
  483. return priv->txdes0_edotr_mask;
  484. else
  485. return 0;
  486. }
  487. static unsigned int ftgmac100_next_tx_pointer(struct ftgmac100 *priv,
  488. unsigned int pointer)
  489. {
  490. return (pointer + 1) & (priv->tx_q_entries - 1);
  491. }
  492. static u32 ftgmac100_tx_buf_avail(struct ftgmac100 *priv)
  493. {
  494. /* Returns the number of available slots in the TX queue
  495. *
  496. * This always leaves one free slot so we don't have to
  497. * worry about empty vs. full, and this simplifies the
  498. * test for ftgmac100_tx_buf_cleanable() below
  499. */
  500. return (priv->tx_clean_pointer - priv->tx_pointer - 1) &
  501. (priv->tx_q_entries - 1);
  502. }
  503. static bool ftgmac100_tx_buf_cleanable(struct ftgmac100 *priv)
  504. {
  505. return priv->tx_pointer != priv->tx_clean_pointer;
  506. }
  507. static void ftgmac100_free_tx_packet(struct ftgmac100 *priv,
  508. unsigned int pointer,
  509. struct sk_buff *skb,
  510. struct ftgmac100_txdes *txdes,
  511. u32 ctl_stat)
  512. {
  513. dma_addr_t map = le32_to_cpu(txdes->txdes3);
  514. size_t len;
  515. if (ctl_stat & FTGMAC100_TXDES0_FTS) {
  516. len = skb_headlen(skb);
  517. dma_unmap_single(priv->dev, map, len, DMA_TO_DEVICE);
  518. } else {
  519. len = FTGMAC100_TXDES0_TXBUF_SIZE(ctl_stat);
  520. dma_unmap_page(priv->dev, map, len, DMA_TO_DEVICE);
  521. }
  522. /* Free SKB on last segment */
  523. if (ctl_stat & FTGMAC100_TXDES0_LTS)
  524. dev_kfree_skb(skb);
  525. priv->tx_skbs[pointer] = NULL;
  526. }
  527. static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
  528. {
  529. struct net_device *netdev = priv->netdev;
  530. struct ftgmac100_txdes *txdes;
  531. struct sk_buff *skb;
  532. unsigned int pointer;
  533. u32 ctl_stat;
  534. pointer = priv->tx_clean_pointer;
  535. txdes = &priv->txdes[pointer];
  536. ctl_stat = le32_to_cpu(txdes->txdes0);
  537. if (ctl_stat & FTGMAC100_TXDES0_TXDMA_OWN)
  538. return false;
  539. skb = priv->tx_skbs[pointer];
  540. netdev->stats.tx_packets++;
  541. netdev->stats.tx_bytes += skb->len;
  542. ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
  543. txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
  544. priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer);
  545. return true;
  546. }
  547. static void ftgmac100_tx_complete(struct ftgmac100 *priv)
  548. {
  549. struct net_device *netdev = priv->netdev;
  550. /* Process all completed packets */
  551. while (ftgmac100_tx_buf_cleanable(priv) &&
  552. ftgmac100_tx_complete_packet(priv))
  553. ;
  554. /* Restart queue if needed */
  555. smp_mb();
  556. if (unlikely(netif_queue_stopped(netdev) &&
  557. ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)) {
  558. struct netdev_queue *txq;
  559. txq = netdev_get_tx_queue(netdev, 0);
  560. __netif_tx_lock(txq, smp_processor_id());
  561. if (netif_queue_stopped(netdev) &&
  562. ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)
  563. netif_wake_queue(netdev);
  564. __netif_tx_unlock(txq);
  565. }
  566. }
  567. static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan)
  568. {
  569. if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
  570. u8 ip_proto = ip_hdr(skb)->protocol;
  571. *csum_vlan |= FTGMAC100_TXDES1_IP_CHKSUM;
  572. switch(ip_proto) {
  573. case IPPROTO_TCP:
  574. *csum_vlan |= FTGMAC100_TXDES1_TCP_CHKSUM;
  575. return true;
  576. case IPPROTO_UDP:
  577. *csum_vlan |= FTGMAC100_TXDES1_UDP_CHKSUM;
  578. return true;
  579. case IPPROTO_IP:
  580. return true;
  581. }
  582. }
  583. return skb_checksum_help(skb) == 0;
  584. }
  585. static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
  586. struct net_device *netdev)
  587. {
  588. struct ftgmac100 *priv = netdev_priv(netdev);
  589. struct ftgmac100_txdes *txdes, *first;
  590. unsigned int pointer, nfrags, len, i, j;
  591. u32 f_ctl_stat, ctl_stat, csum_vlan;
  592. dma_addr_t map;
  593. /* The HW doesn't pad small frames */
  594. if (eth_skb_pad(skb)) {
  595. netdev->stats.tx_dropped++;
  596. return NETDEV_TX_OK;
  597. }
  598. /* Reject oversize packets */
  599. if (unlikely(skb->len > MAX_PKT_SIZE)) {
  600. if (net_ratelimit())
  601. netdev_dbg(netdev, "tx packet too big\n");
  602. goto drop;
  603. }
  604. /* Do we have a limit on #fragments ? I yet have to get a reply
  605. * from Aspeed. If there's one I haven't hit it.
  606. */
  607. nfrags = skb_shinfo(skb)->nr_frags;
  608. /* Get header len */
  609. len = skb_headlen(skb);
  610. /* Map the packet head */
  611. map = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
  612. if (dma_mapping_error(priv->dev, map)) {
  613. if (net_ratelimit())
  614. netdev_err(netdev, "map tx packet head failed\n");
  615. goto drop;
  616. }
  617. /* Grab the next free tx descriptor */
  618. pointer = priv->tx_pointer;
  619. txdes = first = &priv->txdes[pointer];
  620. /* Setup it up with the packet head. Don't write the head to the
  621. * ring just yet
  622. */
  623. priv->tx_skbs[pointer] = skb;
  624. f_ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer);
  625. f_ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN;
  626. f_ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len);
  627. f_ctl_stat |= FTGMAC100_TXDES0_FTS;
  628. if (nfrags == 0)
  629. f_ctl_stat |= FTGMAC100_TXDES0_LTS;
  630. txdes->txdes3 = cpu_to_le32(map);
  631. /* Setup HW checksumming */
  632. csum_vlan = 0;
  633. if (skb->ip_summed == CHECKSUM_PARTIAL &&
  634. !ftgmac100_prep_tx_csum(skb, &csum_vlan))
  635. goto drop;
  636. /* Add VLAN tag */
  637. if (skb_vlan_tag_present(skb)) {
  638. csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
  639. csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
  640. }
  641. txdes->txdes1 = cpu_to_le32(csum_vlan);
  642. /* Next descriptor */
  643. pointer = ftgmac100_next_tx_pointer(priv, pointer);
  644. /* Add the fragments */
  645. for (i = 0; i < nfrags; i++) {
  646. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  647. len = frag->size;
  648. /* Map it */
  649. map = skb_frag_dma_map(priv->dev, frag, 0, len,
  650. DMA_TO_DEVICE);
  651. if (dma_mapping_error(priv->dev, map))
  652. goto dma_err;
  653. /* Setup descriptor */
  654. priv->tx_skbs[pointer] = skb;
  655. txdes = &priv->txdes[pointer];
  656. ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer);
  657. ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN;
  658. ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len);
  659. if (i == (nfrags - 1))
  660. ctl_stat |= FTGMAC100_TXDES0_LTS;
  661. txdes->txdes0 = cpu_to_le32(ctl_stat);
  662. txdes->txdes1 = 0;
  663. txdes->txdes3 = cpu_to_le32(map);
  664. /* Next one */
  665. pointer = ftgmac100_next_tx_pointer(priv, pointer);
  666. }
  667. /* Order the previous packet and descriptor udpates
  668. * before setting the OWN bit on the first descriptor.
  669. */
  670. dma_wmb();
  671. first->txdes0 = cpu_to_le32(f_ctl_stat);
  672. /* Update next TX pointer */
  673. priv->tx_pointer = pointer;
  674. /* If there isn't enough room for all the fragments of a new packet
  675. * in the TX ring, stop the queue. The sequence below is race free
  676. * vs. a concurrent restart in ftgmac100_poll()
  677. */
  678. if (unlikely(ftgmac100_tx_buf_avail(priv) < TX_THRESHOLD)) {
  679. netif_stop_queue(netdev);
  680. /* Order the queue stop with the test below */
  681. smp_mb();
  682. if (ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)
  683. netif_wake_queue(netdev);
  684. }
  685. /* Poke transmitter to read the updated TX descriptors */
  686. iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD);
  687. return NETDEV_TX_OK;
  688. dma_err:
  689. if (net_ratelimit())
  690. netdev_err(netdev, "map tx fragment failed\n");
  691. /* Free head */
  692. pointer = priv->tx_pointer;
  693. ftgmac100_free_tx_packet(priv, pointer, skb, first, f_ctl_stat);
  694. first->txdes0 = cpu_to_le32(f_ctl_stat & priv->txdes0_edotr_mask);
  695. /* Then all fragments */
  696. for (j = 0; j < i; j++) {
  697. pointer = ftgmac100_next_tx_pointer(priv, pointer);
  698. txdes = &priv->txdes[pointer];
  699. ctl_stat = le32_to_cpu(txdes->txdes0);
  700. ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
  701. txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
  702. }
  703. /* This cannot be reached if we successfully mapped the
  704. * last fragment, so we know ftgmac100_free_tx_packet()
  705. * hasn't freed the skb yet.
  706. */
  707. drop:
  708. /* Drop the packet */
  709. dev_kfree_skb_any(skb);
  710. netdev->stats.tx_dropped++;
  711. return NETDEV_TX_OK;
  712. }
  713. static void ftgmac100_free_buffers(struct ftgmac100 *priv)
  714. {
  715. int i;
  716. /* Free all RX buffers */
  717. for (i = 0; i < priv->rx_q_entries; i++) {
  718. struct ftgmac100_rxdes *rxdes = &priv->rxdes[i];
  719. struct sk_buff *skb = priv->rx_skbs[i];
  720. dma_addr_t map = le32_to_cpu(rxdes->rxdes3);
  721. if (!skb)
  722. continue;
  723. priv->rx_skbs[i] = NULL;
  724. dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
  725. dev_kfree_skb_any(skb);
  726. }
  727. /* Free all TX buffers */
  728. for (i = 0; i < priv->tx_q_entries; i++) {
  729. struct ftgmac100_txdes *txdes = &priv->txdes[i];
  730. struct sk_buff *skb = priv->tx_skbs[i];
  731. if (!skb)
  732. continue;
  733. ftgmac100_free_tx_packet(priv, i, skb, txdes,
  734. le32_to_cpu(txdes->txdes0));
  735. }
  736. }
  737. static void ftgmac100_free_rings(struct ftgmac100 *priv)
  738. {
  739. /* Free skb arrays */
  740. kfree(priv->rx_skbs);
  741. kfree(priv->tx_skbs);
  742. /* Free descriptors */
  743. if (priv->rxdes)
  744. dma_free_coherent(priv->dev, MAX_RX_QUEUE_ENTRIES *
  745. sizeof(struct ftgmac100_rxdes),
  746. priv->rxdes, priv->rxdes_dma);
  747. priv->rxdes = NULL;
  748. if (priv->txdes)
  749. dma_free_coherent(priv->dev, MAX_TX_QUEUE_ENTRIES *
  750. sizeof(struct ftgmac100_txdes),
  751. priv->txdes, priv->txdes_dma);
  752. priv->txdes = NULL;
  753. /* Free scratch packet buffer */
  754. if (priv->rx_scratch)
  755. dma_free_coherent(priv->dev, RX_BUF_SIZE,
  756. priv->rx_scratch, priv->rx_scratch_dma);
  757. }
  758. static int ftgmac100_alloc_rings(struct ftgmac100 *priv)
  759. {
  760. /* Allocate skb arrays */
  761. priv->rx_skbs = kcalloc(MAX_RX_QUEUE_ENTRIES, sizeof(void *),
  762. GFP_KERNEL);
  763. if (!priv->rx_skbs)
  764. return -ENOMEM;
  765. priv->tx_skbs = kcalloc(MAX_TX_QUEUE_ENTRIES, sizeof(void *),
  766. GFP_KERNEL);
  767. if (!priv->tx_skbs)
  768. return -ENOMEM;
  769. /* Allocate descriptors */
  770. priv->rxdes = dma_zalloc_coherent(priv->dev,
  771. MAX_RX_QUEUE_ENTRIES *
  772. sizeof(struct ftgmac100_rxdes),
  773. &priv->rxdes_dma, GFP_KERNEL);
  774. if (!priv->rxdes)
  775. return -ENOMEM;
  776. priv->txdes = dma_zalloc_coherent(priv->dev,
  777. MAX_TX_QUEUE_ENTRIES *
  778. sizeof(struct ftgmac100_txdes),
  779. &priv->txdes_dma, GFP_KERNEL);
  780. if (!priv->txdes)
  781. return -ENOMEM;
  782. /* Allocate scratch packet buffer */
  783. priv->rx_scratch = dma_alloc_coherent(priv->dev,
  784. RX_BUF_SIZE,
  785. &priv->rx_scratch_dma,
  786. GFP_KERNEL);
  787. if (!priv->rx_scratch)
  788. return -ENOMEM;
  789. return 0;
  790. }
  791. static void ftgmac100_init_rings(struct ftgmac100 *priv)
  792. {
  793. struct ftgmac100_rxdes *rxdes = NULL;
  794. struct ftgmac100_txdes *txdes = NULL;
  795. int i;
  796. /* Update entries counts */
  797. priv->rx_q_entries = priv->new_rx_q_entries;
  798. priv->tx_q_entries = priv->new_tx_q_entries;
  799. if (WARN_ON(priv->rx_q_entries < MIN_RX_QUEUE_ENTRIES))
  800. return;
  801. /* Initialize RX ring */
  802. for (i = 0; i < priv->rx_q_entries; i++) {
  803. rxdes = &priv->rxdes[i];
  804. rxdes->rxdes0 = 0;
  805. rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma);
  806. }
  807. /* Mark the end of the ring */
  808. rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask);
  809. if (WARN_ON(priv->tx_q_entries < MIN_RX_QUEUE_ENTRIES))
  810. return;
  811. /* Initialize TX ring */
  812. for (i = 0; i < priv->tx_q_entries; i++) {
  813. txdes = &priv->txdes[i];
  814. txdes->txdes0 = 0;
  815. }
  816. txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask);
  817. }
  818. static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv)
  819. {
  820. int i;
  821. for (i = 0; i < priv->rx_q_entries; i++) {
  822. struct ftgmac100_rxdes *rxdes = &priv->rxdes[i];
  823. if (ftgmac100_alloc_rx_buf(priv, i, rxdes, GFP_KERNEL))
  824. return -ENOMEM;
  825. }
  826. return 0;
  827. }
  828. static void ftgmac100_adjust_link(struct net_device *netdev)
  829. {
  830. struct ftgmac100 *priv = netdev_priv(netdev);
  831. struct phy_device *phydev = netdev->phydev;
  832. bool tx_pause, rx_pause;
  833. int new_speed;
  834. /* We store "no link" as speed 0 */
  835. if (!phydev->link)
  836. new_speed = 0;
  837. else
  838. new_speed = phydev->speed;
  839. /* Grab pause settings from PHY if configured to do so */
  840. if (priv->aneg_pause) {
  841. rx_pause = tx_pause = phydev->pause;
  842. if (phydev->asym_pause)
  843. tx_pause = !rx_pause;
  844. } else {
  845. rx_pause = priv->rx_pause;
  846. tx_pause = priv->tx_pause;
  847. }
  848. /* Link hasn't changed, do nothing */
  849. if (phydev->speed == priv->cur_speed &&
  850. phydev->duplex == priv->cur_duplex &&
  851. rx_pause == priv->rx_pause &&
  852. tx_pause == priv->tx_pause)
  853. return;
  854. /* Print status if we have a link or we had one and just lost it,
  855. * don't print otherwise.
  856. */
  857. if (new_speed || priv->cur_speed)
  858. phy_print_status(phydev);
  859. priv->cur_speed = new_speed;
  860. priv->cur_duplex = phydev->duplex;
  861. priv->rx_pause = rx_pause;
  862. priv->tx_pause = tx_pause;
  863. /* Link is down, do nothing else */
  864. if (!new_speed)
  865. return;
  866. /* Disable all interrupts */
  867. iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
  868. /* Reset the adapter asynchronously */
  869. schedule_work(&priv->reset_task);
  870. }
  871. static int ftgmac100_mii_probe(struct ftgmac100 *priv, phy_interface_t intf)
  872. {
  873. struct net_device *netdev = priv->netdev;
  874. struct phy_device *phydev;
  875. phydev = phy_find_first(priv->mii_bus);
  876. if (!phydev) {
  877. netdev_info(netdev, "%s: no PHY found\n", netdev->name);
  878. return -ENODEV;
  879. }
  880. phydev = phy_connect(netdev, phydev_name(phydev),
  881. &ftgmac100_adjust_link, intf);
  882. if (IS_ERR(phydev)) {
  883. netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
  884. return PTR_ERR(phydev);
  885. }
  886. /* Indicate that we support PAUSE frames (see comment in
  887. * Documentation/networking/phy.txt)
  888. */
  889. phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
  890. phydev->advertising = phydev->supported;
  891. /* Display what we found */
  892. phy_attached_info(phydev);
  893. return 0;
  894. }
  895. static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
  896. {
  897. struct net_device *netdev = bus->priv;
  898. struct ftgmac100 *priv = netdev_priv(netdev);
  899. unsigned int phycr;
  900. int i;
  901. phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
  902. /* preserve MDC cycle threshold */
  903. phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK;
  904. phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) |
  905. FTGMAC100_PHYCR_REGAD(regnum) |
  906. FTGMAC100_PHYCR_MIIRD;
  907. iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR);
  908. for (i = 0; i < 10; i++) {
  909. phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
  910. if ((phycr & FTGMAC100_PHYCR_MIIRD) == 0) {
  911. int data;
  912. data = ioread32(priv->base + FTGMAC100_OFFSET_PHYDATA);
  913. return FTGMAC100_PHYDATA_MIIRDATA(data);
  914. }
  915. udelay(100);
  916. }
  917. netdev_err(netdev, "mdio read timed out\n");
  918. return -EIO;
  919. }
  920. static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
  921. int regnum, u16 value)
  922. {
  923. struct net_device *netdev = bus->priv;
  924. struct ftgmac100 *priv = netdev_priv(netdev);
  925. unsigned int phycr;
  926. int data;
  927. int i;
  928. phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
  929. /* preserve MDC cycle threshold */
  930. phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK;
  931. phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) |
  932. FTGMAC100_PHYCR_REGAD(regnum) |
  933. FTGMAC100_PHYCR_MIIWR;
  934. data = FTGMAC100_PHYDATA_MIIWDATA(value);
  935. iowrite32(data, priv->base + FTGMAC100_OFFSET_PHYDATA);
  936. iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR);
  937. for (i = 0; i < 10; i++) {
  938. phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
  939. if ((phycr & FTGMAC100_PHYCR_MIIWR) == 0)
  940. return 0;
  941. udelay(100);
  942. }
  943. netdev_err(netdev, "mdio write timed out\n");
  944. return -EIO;
  945. }
  946. static void ftgmac100_get_drvinfo(struct net_device *netdev,
  947. struct ethtool_drvinfo *info)
  948. {
  949. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  950. strlcpy(info->version, DRV_VERSION, sizeof(info->version));
  951. strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
  952. }
  953. static void ftgmac100_get_ringparam(struct net_device *netdev,
  954. struct ethtool_ringparam *ering)
  955. {
  956. struct ftgmac100 *priv = netdev_priv(netdev);
  957. memset(ering, 0, sizeof(*ering));
  958. ering->rx_max_pending = MAX_RX_QUEUE_ENTRIES;
  959. ering->tx_max_pending = MAX_TX_QUEUE_ENTRIES;
  960. ering->rx_pending = priv->rx_q_entries;
  961. ering->tx_pending = priv->tx_q_entries;
  962. }
  963. static int ftgmac100_set_ringparam(struct net_device *netdev,
  964. struct ethtool_ringparam *ering)
  965. {
  966. struct ftgmac100 *priv = netdev_priv(netdev);
  967. if (ering->rx_pending > MAX_RX_QUEUE_ENTRIES ||
  968. ering->tx_pending > MAX_TX_QUEUE_ENTRIES ||
  969. ering->rx_pending < MIN_RX_QUEUE_ENTRIES ||
  970. ering->tx_pending < MIN_TX_QUEUE_ENTRIES ||
  971. !is_power_of_2(ering->rx_pending) ||
  972. !is_power_of_2(ering->tx_pending))
  973. return -EINVAL;
  974. priv->new_rx_q_entries = ering->rx_pending;
  975. priv->new_tx_q_entries = ering->tx_pending;
  976. if (netif_running(netdev))
  977. schedule_work(&priv->reset_task);
  978. return 0;
  979. }
  980. static void ftgmac100_get_pauseparam(struct net_device *netdev,
  981. struct ethtool_pauseparam *pause)
  982. {
  983. struct ftgmac100 *priv = netdev_priv(netdev);
  984. pause->autoneg = priv->aneg_pause;
  985. pause->tx_pause = priv->tx_pause;
  986. pause->rx_pause = priv->rx_pause;
  987. }
  988. static int ftgmac100_set_pauseparam(struct net_device *netdev,
  989. struct ethtool_pauseparam *pause)
  990. {
  991. struct ftgmac100 *priv = netdev_priv(netdev);
  992. struct phy_device *phydev = netdev->phydev;
  993. priv->aneg_pause = pause->autoneg;
  994. priv->tx_pause = pause->tx_pause;
  995. priv->rx_pause = pause->rx_pause;
  996. if (phydev) {
  997. phydev->advertising &= ~ADVERTISED_Pause;
  998. phydev->advertising &= ~ADVERTISED_Asym_Pause;
  999. if (pause->rx_pause) {
  1000. phydev->advertising |= ADVERTISED_Pause;
  1001. phydev->advertising |= ADVERTISED_Asym_Pause;
  1002. }
  1003. if (pause->tx_pause)
  1004. phydev->advertising ^= ADVERTISED_Asym_Pause;
  1005. }
  1006. if (netif_running(netdev)) {
  1007. if (phydev && priv->aneg_pause)
  1008. phy_start_aneg(phydev);
  1009. else
  1010. ftgmac100_config_pause(priv);
  1011. }
  1012. return 0;
  1013. }
  1014. static const struct ethtool_ops ftgmac100_ethtool_ops = {
  1015. .get_drvinfo = ftgmac100_get_drvinfo,
  1016. .get_link = ethtool_op_get_link,
  1017. .get_link_ksettings = phy_ethtool_get_link_ksettings,
  1018. .set_link_ksettings = phy_ethtool_set_link_ksettings,
  1019. .nway_reset = phy_ethtool_nway_reset,
  1020. .get_ringparam = ftgmac100_get_ringparam,
  1021. .set_ringparam = ftgmac100_set_ringparam,
  1022. .get_pauseparam = ftgmac100_get_pauseparam,
  1023. .set_pauseparam = ftgmac100_set_pauseparam,
  1024. };
  1025. static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id)
  1026. {
  1027. struct net_device *netdev = dev_id;
  1028. struct ftgmac100 *priv = netdev_priv(netdev);
  1029. unsigned int status, new_mask = FTGMAC100_INT_BAD;
  1030. /* Fetch and clear interrupt bits, process abnormal ones */
  1031. status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
  1032. iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
  1033. if (unlikely(status & FTGMAC100_INT_BAD)) {
  1034. /* RX buffer unavailable */
  1035. if (status & FTGMAC100_INT_NO_RXBUF)
  1036. netdev->stats.rx_over_errors++;
  1037. /* received packet lost due to RX FIFO full */
  1038. if (status & FTGMAC100_INT_RPKT_LOST)
  1039. netdev->stats.rx_fifo_errors++;
  1040. /* sent packet lost due to excessive TX collision */
  1041. if (status & FTGMAC100_INT_XPKT_LOST)
  1042. netdev->stats.tx_fifo_errors++;
  1043. /* AHB error -> Reset the chip */
  1044. if (status & FTGMAC100_INT_AHB_ERR) {
  1045. if (net_ratelimit())
  1046. netdev_warn(netdev,
  1047. "AHB bus error ! Resetting chip.\n");
  1048. iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
  1049. schedule_work(&priv->reset_task);
  1050. return IRQ_HANDLED;
  1051. }
  1052. /* We may need to restart the MAC after such errors, delay
  1053. * this until after we have freed some Rx buffers though
  1054. */
  1055. priv->need_mac_restart = true;
  1056. /* Disable those errors until we restart */
  1057. new_mask &= ~status;
  1058. }
  1059. /* Only enable "bad" interrupts while NAPI is on */
  1060. iowrite32(new_mask, priv->base + FTGMAC100_OFFSET_IER);
  1061. /* Schedule NAPI bh */
  1062. napi_schedule_irqoff(&priv->napi);
  1063. return IRQ_HANDLED;
  1064. }
  1065. static bool ftgmac100_check_rx(struct ftgmac100 *priv)
  1066. {
  1067. struct ftgmac100_rxdes *rxdes = &priv->rxdes[priv->rx_pointer];
  1068. /* Do we have a packet ? */
  1069. return !!(rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY));
  1070. }
  1071. static int ftgmac100_poll(struct napi_struct *napi, int budget)
  1072. {
  1073. struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi);
  1074. int work_done = 0;
  1075. bool more;
  1076. /* Handle TX completions */
  1077. if (ftgmac100_tx_buf_cleanable(priv))
  1078. ftgmac100_tx_complete(priv);
  1079. /* Handle RX packets */
  1080. do {
  1081. more = ftgmac100_rx_packet(priv, &work_done);
  1082. } while (more && work_done < budget);
  1083. /* The interrupt is telling us to kick the MAC back to life
  1084. * after an RX overflow
  1085. */
  1086. if (unlikely(priv->need_mac_restart)) {
  1087. ftgmac100_start_hw(priv);
  1088. /* Re-enable "bad" interrupts */
  1089. iowrite32(FTGMAC100_INT_BAD,
  1090. priv->base + FTGMAC100_OFFSET_IER);
  1091. }
  1092. /* As long as we are waiting for transmit packets to be
  1093. * completed we keep NAPI going
  1094. */
  1095. if (ftgmac100_tx_buf_cleanable(priv))
  1096. work_done = budget;
  1097. if (work_done < budget) {
  1098. /* We are about to re-enable all interrupts. However
  1099. * the HW has been latching RX/TX packet interrupts while
  1100. * they were masked. So we clear them first, then we need
  1101. * to re-check if there's something to process
  1102. */
  1103. iowrite32(FTGMAC100_INT_RXTX,
  1104. priv->base + FTGMAC100_OFFSET_ISR);
  1105. /* Push the above (and provides a barrier vs. subsequent
  1106. * reads of the descriptor).
  1107. */
  1108. ioread32(priv->base + FTGMAC100_OFFSET_ISR);
  1109. /* Check RX and TX descriptors for more work to do */
  1110. if (ftgmac100_check_rx(priv) ||
  1111. ftgmac100_tx_buf_cleanable(priv))
  1112. return budget;
  1113. /* deschedule NAPI */
  1114. napi_complete(napi);
  1115. /* enable all interrupts */
  1116. iowrite32(FTGMAC100_INT_ALL,
  1117. priv->base + FTGMAC100_OFFSET_IER);
  1118. }
  1119. return work_done;
  1120. }
  1121. static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err)
  1122. {
  1123. int err = 0;
  1124. /* Re-init descriptors (adjust queue sizes) */
  1125. ftgmac100_init_rings(priv);
  1126. /* Realloc rx descriptors */
  1127. err = ftgmac100_alloc_rx_buffers(priv);
  1128. if (err && !ignore_alloc_err)
  1129. return err;
  1130. /* Reinit and restart HW */
  1131. ftgmac100_init_hw(priv);
  1132. ftgmac100_config_pause(priv);
  1133. ftgmac100_start_hw(priv);
  1134. /* Re-enable the device */
  1135. napi_enable(&priv->napi);
  1136. netif_start_queue(priv->netdev);
  1137. /* Enable all interrupts */
  1138. iowrite32(FTGMAC100_INT_ALL, priv->base + FTGMAC100_OFFSET_IER);
  1139. return err;
  1140. }
  1141. static void ftgmac100_reset_task(struct work_struct *work)
  1142. {
  1143. struct ftgmac100 *priv = container_of(work, struct ftgmac100,
  1144. reset_task);
  1145. struct net_device *netdev = priv->netdev;
  1146. int err;
  1147. netdev_dbg(netdev, "Resetting NIC...\n");
  1148. /* Lock the world */
  1149. rtnl_lock();
  1150. if (netdev->phydev)
  1151. mutex_lock(&netdev->phydev->lock);
  1152. if (priv->mii_bus)
  1153. mutex_lock(&priv->mii_bus->mdio_lock);
  1154. /* Check if the interface is still up */
  1155. if (!netif_running(netdev))
  1156. goto bail;
  1157. /* Stop the network stack */
  1158. netif_trans_update(netdev);
  1159. napi_disable(&priv->napi);
  1160. netif_tx_disable(netdev);
  1161. /* Stop and reset the MAC */
  1162. ftgmac100_stop_hw(priv);
  1163. err = ftgmac100_reset_and_config_mac(priv);
  1164. if (err) {
  1165. /* Not much we can do ... it might come back... */
  1166. netdev_err(netdev, "attempting to continue...\n");
  1167. }
  1168. /* Free all rx and tx buffers */
  1169. ftgmac100_free_buffers(priv);
  1170. /* Setup everything again and restart chip */
  1171. ftgmac100_init_all(priv, true);
  1172. netdev_dbg(netdev, "Reset done !\n");
  1173. bail:
  1174. if (priv->mii_bus)
  1175. mutex_unlock(&priv->mii_bus->mdio_lock);
  1176. if (netdev->phydev)
  1177. mutex_unlock(&netdev->phydev->lock);
  1178. rtnl_unlock();
  1179. }
  1180. static int ftgmac100_open(struct net_device *netdev)
  1181. {
  1182. struct ftgmac100 *priv = netdev_priv(netdev);
  1183. int err;
  1184. /* Allocate ring buffers */
  1185. err = ftgmac100_alloc_rings(priv);
  1186. if (err) {
  1187. netdev_err(netdev, "Failed to allocate descriptors\n");
  1188. return err;
  1189. }
  1190. /* When using NC-SI we force the speed to 100Mbit/s full duplex,
  1191. *
  1192. * Otherwise we leave it set to 0 (no link), the link
  1193. * message from the PHY layer will handle setting it up to
  1194. * something else if needed.
  1195. */
  1196. if (priv->use_ncsi) {
  1197. priv->cur_duplex = DUPLEX_FULL;
  1198. priv->cur_speed = SPEED_100;
  1199. } else {
  1200. priv->cur_duplex = 0;
  1201. priv->cur_speed = 0;
  1202. }
  1203. /* Reset the hardware */
  1204. err = ftgmac100_reset_and_config_mac(priv);
  1205. if (err)
  1206. goto err_hw;
  1207. /* Initialize NAPI */
  1208. netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64);
  1209. /* Grab our interrupt */
  1210. err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
  1211. if (err) {
  1212. netdev_err(netdev, "failed to request irq %d\n", netdev->irq);
  1213. goto err_irq;
  1214. }
  1215. /* Start things up */
  1216. err = ftgmac100_init_all(priv, false);
  1217. if (err) {
  1218. netdev_err(netdev, "Failed to allocate packet buffers\n");
  1219. goto err_alloc;
  1220. }
  1221. if (netdev->phydev) {
  1222. /* If we have a PHY, start polling */
  1223. phy_start(netdev->phydev);
  1224. } else if (priv->use_ncsi) {
  1225. /* If using NC-SI, set our carrier on and start the stack */
  1226. netif_carrier_on(netdev);
  1227. /* Start the NCSI device */
  1228. err = ncsi_start_dev(priv->ndev);
  1229. if (err)
  1230. goto err_ncsi;
  1231. }
  1232. return 0;
  1233. err_ncsi:
  1234. napi_disable(&priv->napi);
  1235. netif_stop_queue(netdev);
  1236. err_alloc:
  1237. ftgmac100_free_buffers(priv);
  1238. free_irq(netdev->irq, netdev);
  1239. err_irq:
  1240. netif_napi_del(&priv->napi);
  1241. err_hw:
  1242. iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
  1243. ftgmac100_free_rings(priv);
  1244. return err;
  1245. }
  1246. static int ftgmac100_stop(struct net_device *netdev)
  1247. {
  1248. struct ftgmac100 *priv = netdev_priv(netdev);
  1249. /* Note about the reset task: We are called with the rtnl lock
  1250. * held, so we are synchronized against the core of the reset
  1251. * task. We must not try to synchronously cancel it otherwise
  1252. * we can deadlock. But since it will test for netif_running()
  1253. * which has already been cleared by the net core, we don't
  1254. * anything special to do.
  1255. */
  1256. /* disable all interrupts */
  1257. iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
  1258. netif_stop_queue(netdev);
  1259. napi_disable(&priv->napi);
  1260. netif_napi_del(&priv->napi);
  1261. if (netdev->phydev)
  1262. phy_stop(netdev->phydev);
  1263. else if (priv->use_ncsi)
  1264. ncsi_stop_dev(priv->ndev);
  1265. ftgmac100_stop_hw(priv);
  1266. free_irq(netdev->irq, netdev);
  1267. ftgmac100_free_buffers(priv);
  1268. ftgmac100_free_rings(priv);
  1269. return 0;
  1270. }
  1271. /* optional */
  1272. static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  1273. {
  1274. if (!netdev->phydev)
  1275. return -ENXIO;
  1276. return phy_mii_ioctl(netdev->phydev, ifr, cmd);
  1277. }
  1278. static void ftgmac100_tx_timeout(struct net_device *netdev)
  1279. {
  1280. struct ftgmac100 *priv = netdev_priv(netdev);
  1281. /* Disable all interrupts */
  1282. iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
  1283. /* Do the reset outside of interrupt context */
  1284. schedule_work(&priv->reset_task);
  1285. }
  1286. static int ftgmac100_set_features(struct net_device *netdev,
  1287. netdev_features_t features)
  1288. {
  1289. struct ftgmac100 *priv = netdev_priv(netdev);
  1290. netdev_features_t changed = netdev->features ^ features;
  1291. if (!netif_running(netdev))
  1292. return 0;
  1293. /* Update the vlan filtering bit */
  1294. if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
  1295. u32 maccr;
  1296. maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
  1297. if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
  1298. maccr |= FTGMAC100_MACCR_RM_VLAN;
  1299. else
  1300. maccr &= ~FTGMAC100_MACCR_RM_VLAN;
  1301. iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
  1302. }
  1303. return 0;
  1304. }
  1305. #ifdef CONFIG_NET_POLL_CONTROLLER
  1306. static void ftgmac100_poll_controller(struct net_device *netdev)
  1307. {
  1308. unsigned long flags;
  1309. local_irq_save(flags);
  1310. ftgmac100_interrupt(netdev->irq, netdev);
  1311. local_irq_restore(flags);
  1312. }
  1313. #endif
  1314. static const struct net_device_ops ftgmac100_netdev_ops = {
  1315. .ndo_open = ftgmac100_open,
  1316. .ndo_stop = ftgmac100_stop,
  1317. .ndo_start_xmit = ftgmac100_hard_start_xmit,
  1318. .ndo_set_mac_address = ftgmac100_set_mac_addr,
  1319. .ndo_validate_addr = eth_validate_addr,
  1320. .ndo_do_ioctl = ftgmac100_do_ioctl,
  1321. .ndo_tx_timeout = ftgmac100_tx_timeout,
  1322. .ndo_set_rx_mode = ftgmac100_set_rx_mode,
  1323. .ndo_set_features = ftgmac100_set_features,
  1324. #ifdef CONFIG_NET_POLL_CONTROLLER
  1325. .ndo_poll_controller = ftgmac100_poll_controller,
  1326. #endif
  1327. };
  1328. static int ftgmac100_setup_mdio(struct net_device *netdev)
  1329. {
  1330. struct ftgmac100 *priv = netdev_priv(netdev);
  1331. struct platform_device *pdev = to_platform_device(priv->dev);
  1332. int phy_intf = PHY_INTERFACE_MODE_RGMII;
  1333. struct device_node *np = pdev->dev.of_node;
  1334. int i, err = 0;
  1335. u32 reg;
  1336. /* initialize mdio bus */
  1337. priv->mii_bus = mdiobus_alloc();
  1338. if (!priv->mii_bus)
  1339. return -EIO;
  1340. if (priv->is_aspeed) {
  1341. /* This driver supports the old MDIO interface */
  1342. reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR);
  1343. reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE;
  1344. iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR);
  1345. };
  1346. /* Get PHY mode from device-tree */
  1347. if (np) {
  1348. /* Default to RGMII. It's a gigabit part after all */
  1349. phy_intf = of_get_phy_mode(np);
  1350. if (phy_intf < 0)
  1351. phy_intf = PHY_INTERFACE_MODE_RGMII;
  1352. /* Aspeed only supports these. I don't know about other IP
  1353. * block vendors so I'm going to just let them through for
  1354. * now. Note that this is only a warning if for some obscure
  1355. * reason the DT really means to lie about it or it's a newer
  1356. * part we don't know about.
  1357. *
  1358. * On the Aspeed SoC there are additionally straps and SCU
  1359. * control bits that could tell us what the interface is
  1360. * (or allow us to configure it while the IP block is held
  1361. * in reset). For now I chose to keep this driver away from
  1362. * those SoC specific bits and assume the device-tree is
  1363. * right and the SCU has been configured properly by pinmux
  1364. * or the firmware.
  1365. */
  1366. if (priv->is_aspeed &&
  1367. phy_intf != PHY_INTERFACE_MODE_RMII &&
  1368. phy_intf != PHY_INTERFACE_MODE_RGMII &&
  1369. phy_intf != PHY_INTERFACE_MODE_RGMII_ID &&
  1370. phy_intf != PHY_INTERFACE_MODE_RGMII_RXID &&
  1371. phy_intf != PHY_INTERFACE_MODE_RGMII_TXID) {
  1372. netdev_warn(netdev,
  1373. "Unsupported PHY mode %s !\n",
  1374. phy_modes(phy_intf));
  1375. }
  1376. }
  1377. priv->mii_bus->name = "ftgmac100_mdio";
  1378. snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
  1379. pdev->name, pdev->id);
  1380. priv->mii_bus->priv = priv->netdev;
  1381. priv->mii_bus->read = ftgmac100_mdiobus_read;
  1382. priv->mii_bus->write = ftgmac100_mdiobus_write;
  1383. for (i = 0; i < PHY_MAX_ADDR; i++)
  1384. priv->mii_bus->irq[i] = PHY_POLL;
  1385. err = mdiobus_register(priv->mii_bus);
  1386. if (err) {
  1387. dev_err(priv->dev, "Cannot register MDIO bus!\n");
  1388. goto err_register_mdiobus;
  1389. }
  1390. err = ftgmac100_mii_probe(priv, phy_intf);
  1391. if (err) {
  1392. dev_err(priv->dev, "MII Probe failed!\n");
  1393. goto err_mii_probe;
  1394. }
  1395. return 0;
  1396. err_mii_probe:
  1397. mdiobus_unregister(priv->mii_bus);
  1398. err_register_mdiobus:
  1399. mdiobus_free(priv->mii_bus);
  1400. return err;
  1401. }
  1402. static void ftgmac100_destroy_mdio(struct net_device *netdev)
  1403. {
  1404. struct ftgmac100 *priv = netdev_priv(netdev);
  1405. if (!netdev->phydev)
  1406. return;
  1407. phy_disconnect(netdev->phydev);
  1408. mdiobus_unregister(priv->mii_bus);
  1409. mdiobus_free(priv->mii_bus);
  1410. }
  1411. static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
  1412. {
  1413. if (unlikely(nd->state != ncsi_dev_state_functional))
  1414. return;
  1415. netdev_info(nd->dev, "NCSI interface %s\n",
  1416. nd->link_up ? "up" : "down");
  1417. }
  1418. static int ftgmac100_probe(struct platform_device *pdev)
  1419. {
  1420. struct resource *res;
  1421. int irq;
  1422. struct net_device *netdev;
  1423. struct ftgmac100 *priv;
  1424. struct device_node *np;
  1425. int err = 0;
  1426. if (!pdev)
  1427. return -ENODEV;
  1428. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1429. if (!res)
  1430. return -ENXIO;
  1431. irq = platform_get_irq(pdev, 0);
  1432. if (irq < 0)
  1433. return irq;
  1434. /* setup net_device */
  1435. netdev = alloc_etherdev(sizeof(*priv));
  1436. if (!netdev) {
  1437. err = -ENOMEM;
  1438. goto err_alloc_etherdev;
  1439. }
  1440. SET_NETDEV_DEV(netdev, &pdev->dev);
  1441. netdev->ethtool_ops = &ftgmac100_ethtool_ops;
  1442. netdev->netdev_ops = &ftgmac100_netdev_ops;
  1443. netdev->watchdog_timeo = 5 * HZ;
  1444. platform_set_drvdata(pdev, netdev);
  1445. /* setup private data */
  1446. priv = netdev_priv(netdev);
  1447. priv->netdev = netdev;
  1448. priv->dev = &pdev->dev;
  1449. INIT_WORK(&priv->reset_task, ftgmac100_reset_task);
  1450. /* map io memory */
  1451. priv->res = request_mem_region(res->start, resource_size(res),
  1452. dev_name(&pdev->dev));
  1453. if (!priv->res) {
  1454. dev_err(&pdev->dev, "Could not reserve memory region\n");
  1455. err = -ENOMEM;
  1456. goto err_req_mem;
  1457. }
  1458. priv->base = ioremap(res->start, resource_size(res));
  1459. if (!priv->base) {
  1460. dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
  1461. err = -EIO;
  1462. goto err_ioremap;
  1463. }
  1464. netdev->irq = irq;
  1465. /* Enable pause */
  1466. priv->tx_pause = true;
  1467. priv->rx_pause = true;
  1468. priv->aneg_pause = true;
  1469. /* MAC address from chip or random one */
  1470. ftgmac100_initial_mac(priv);
  1471. np = pdev->dev.of_node;
  1472. if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
  1473. of_device_is_compatible(np, "aspeed,ast2500-mac"))) {
  1474. priv->rxdes0_edorr_mask = BIT(30);
  1475. priv->txdes0_edotr_mask = BIT(30);
  1476. priv->is_aspeed = true;
  1477. } else {
  1478. priv->rxdes0_edorr_mask = BIT(15);
  1479. priv->txdes0_edotr_mask = BIT(15);
  1480. }
  1481. if (np && of_get_property(np, "use-ncsi", NULL)) {
  1482. if (!IS_ENABLED(CONFIG_NET_NCSI)) {
  1483. dev_err(&pdev->dev, "NCSI stack not enabled\n");
  1484. goto err_ncsi_dev;
  1485. }
  1486. dev_info(&pdev->dev, "Using NCSI interface\n");
  1487. priv->use_ncsi = true;
  1488. priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
  1489. if (!priv->ndev)
  1490. goto err_ncsi_dev;
  1491. } else {
  1492. priv->use_ncsi = false;
  1493. err = ftgmac100_setup_mdio(netdev);
  1494. if (err)
  1495. goto err_setup_mdio;
  1496. }
  1497. /* Default ring sizes */
  1498. priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES;
  1499. priv->tx_q_entries = priv->new_tx_q_entries = DEF_TX_QUEUE_ENTRIES;
  1500. /* Base feature set */
  1501. netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
  1502. NETIF_F_GRO | NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX |
  1503. NETIF_F_HW_VLAN_CTAG_TX;
  1504. /* AST2400 doesn't have working HW checksum generation */
  1505. if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
  1506. netdev->hw_features &= ~NETIF_F_HW_CSUM;
  1507. if (np && of_get_property(np, "no-hw-checksum", NULL))
  1508. netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
  1509. netdev->features |= netdev->hw_features;
  1510. /* register network device */
  1511. err = register_netdev(netdev);
  1512. if (err) {
  1513. dev_err(&pdev->dev, "Failed to register netdev\n");
  1514. goto err_register_netdev;
  1515. }
  1516. netdev_info(netdev, "irq %d, mapped at %p\n", netdev->irq, priv->base);
  1517. return 0;
  1518. err_ncsi_dev:
  1519. err_register_netdev:
  1520. ftgmac100_destroy_mdio(netdev);
  1521. err_setup_mdio:
  1522. iounmap(priv->base);
  1523. err_ioremap:
  1524. release_resource(priv->res);
  1525. err_req_mem:
  1526. netif_napi_del(&priv->napi);
  1527. free_netdev(netdev);
  1528. err_alloc_etherdev:
  1529. return err;
  1530. }
  1531. static int ftgmac100_remove(struct platform_device *pdev)
  1532. {
  1533. struct net_device *netdev;
  1534. struct ftgmac100 *priv;
  1535. netdev = platform_get_drvdata(pdev);
  1536. priv = netdev_priv(netdev);
  1537. unregister_netdev(netdev);
  1538. /* There's a small chance the reset task will have been re-queued,
  1539. * during stop, make sure it's gone before we free the structure.
  1540. */
  1541. cancel_work_sync(&priv->reset_task);
  1542. ftgmac100_destroy_mdio(netdev);
  1543. iounmap(priv->base);
  1544. release_resource(priv->res);
  1545. netif_napi_del(&priv->napi);
  1546. free_netdev(netdev);
  1547. return 0;
  1548. }
  1549. static const struct of_device_id ftgmac100_of_match[] = {
  1550. { .compatible = "faraday,ftgmac100" },
  1551. { }
  1552. };
  1553. MODULE_DEVICE_TABLE(of, ftgmac100_of_match);
  1554. static struct platform_driver ftgmac100_driver = {
  1555. .probe = ftgmac100_probe,
  1556. .remove = ftgmac100_remove,
  1557. .driver = {
  1558. .name = DRV_NAME,
  1559. .of_match_table = ftgmac100_of_match,
  1560. },
  1561. };
  1562. module_platform_driver(ftgmac100_driver);
  1563. MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
  1564. MODULE_DESCRIPTION("FTGMAC100 driver");
  1565. MODULE_LICENSE("GPL");