dwc-xlgmac-net.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351
  1. /* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
  2. *
  3. * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
  4. *
  5. * This program is dual-licensed; you may select either version 2 of
  6. * the GNU General Public License ("GPL") or BSD license ("BSD").
  7. *
  8. * This Synopsys DWC XLGMAC software driver and associated documentation
  9. * (hereinafter the "Software") is an unsupported proprietary work of
  10. * Synopsys, Inc. unless otherwise expressly agreed to in writing between
  11. * Synopsys and you. The Software IS NOT an item of Licensed Software or a
  12. * Licensed Product under any End User Software License Agreement or
  13. * Agreement for Licensed Products with Synopsys or any supplement thereto.
  14. * Synopsys is a registered trademark of Synopsys, Inc. Other names included
  15. * in the SOFTWARE may be the trademarks of their respective owners.
  16. */
  17. #include <linux/netdevice.h>
  18. #include <linux/tcp.h>
  19. #include <linux/interrupt.h>
  20. #include "dwc-xlgmac.h"
  21. #include "dwc-xlgmac-reg.h"
  22. static int xlgmac_one_poll(struct napi_struct *, int);
  23. static int xlgmac_all_poll(struct napi_struct *, int);
  24. static inline unsigned int xlgmac_tx_avail_desc(struct xlgmac_ring *ring)
  25. {
  26. return (ring->dma_desc_count - (ring->cur - ring->dirty));
  27. }
  28. static inline unsigned int xlgmac_rx_dirty_desc(struct xlgmac_ring *ring)
  29. {
  30. return (ring->cur - ring->dirty);
  31. }
  32. static int xlgmac_maybe_stop_tx_queue(
  33. struct xlgmac_channel *channel,
  34. struct xlgmac_ring *ring,
  35. unsigned int count)
  36. {
  37. struct xlgmac_pdata *pdata = channel->pdata;
  38. if (count > xlgmac_tx_avail_desc(ring)) {
  39. netif_info(pdata, drv, pdata->netdev,
  40. "Tx queue stopped, not enough descriptors available\n");
  41. netif_stop_subqueue(pdata->netdev, channel->queue_index);
  42. ring->tx.queue_stopped = 1;
  43. /* If we haven't notified the hardware because of xmit_more
  44. * support, tell it now
  45. */
  46. if (ring->tx.xmit_more)
  47. pdata->hw_ops.tx_start_xmit(channel, ring);
  48. return NETDEV_TX_BUSY;
  49. }
  50. return 0;
  51. }
  52. static void xlgmac_prep_vlan(struct sk_buff *skb,
  53. struct xlgmac_pkt_info *pkt_info)
  54. {
  55. if (skb_vlan_tag_present(skb))
  56. pkt_info->vlan_ctag = skb_vlan_tag_get(skb);
  57. }
  58. static int xlgmac_prep_tso(struct sk_buff *skb,
  59. struct xlgmac_pkt_info *pkt_info)
  60. {
  61. int ret;
  62. if (!XLGMAC_GET_REG_BITS(pkt_info->attributes,
  63. TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
  64. TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN))
  65. return 0;
  66. ret = skb_cow_head(skb, 0);
  67. if (ret)
  68. return ret;
  69. pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  70. pkt_info->tcp_header_len = tcp_hdrlen(skb);
  71. pkt_info->tcp_payload_len = skb->len - pkt_info->header_len;
  72. pkt_info->mss = skb_shinfo(skb)->gso_size;
  73. XLGMAC_PR("header_len=%u\n", pkt_info->header_len);
  74. XLGMAC_PR("tcp_header_len=%u, tcp_payload_len=%u\n",
  75. pkt_info->tcp_header_len, pkt_info->tcp_payload_len);
  76. XLGMAC_PR("mss=%u\n", pkt_info->mss);
  77. /* Update the number of packets that will ultimately be transmitted
  78. * along with the extra bytes for each extra packet
  79. */
  80. pkt_info->tx_packets = skb_shinfo(skb)->gso_segs;
  81. pkt_info->tx_bytes += (pkt_info->tx_packets - 1) * pkt_info->header_len;
  82. return 0;
  83. }
  84. static int xlgmac_is_tso(struct sk_buff *skb)
  85. {
  86. if (skb->ip_summed != CHECKSUM_PARTIAL)
  87. return 0;
  88. if (!skb_is_gso(skb))
  89. return 0;
  90. return 1;
  91. }
  92. static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata,
  93. struct xlgmac_ring *ring,
  94. struct sk_buff *skb,
  95. struct xlgmac_pkt_info *pkt_info)
  96. {
  97. struct skb_frag_struct *frag;
  98. unsigned int context_desc;
  99. unsigned int len;
  100. unsigned int i;
  101. pkt_info->skb = skb;
  102. context_desc = 0;
  103. pkt_info->desc_count = 0;
  104. pkt_info->tx_packets = 1;
  105. pkt_info->tx_bytes = skb->len;
  106. if (xlgmac_is_tso(skb)) {
  107. /* TSO requires an extra descriptor if mss is different */
  108. if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
  109. context_desc = 1;
  110. pkt_info->desc_count++;
  111. }
  112. /* TSO requires an extra descriptor for TSO header */
  113. pkt_info->desc_count++;
  114. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  115. pkt_info->attributes,
  116. TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
  117. TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN,
  118. 1);
  119. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  120. pkt_info->attributes,
  121. TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
  122. TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
  123. 1);
  124. } else if (skb->ip_summed == CHECKSUM_PARTIAL)
  125. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  126. pkt_info->attributes,
  127. TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
  128. TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
  129. 1);
  130. if (skb_vlan_tag_present(skb)) {
  131. /* VLAN requires an extra descriptor if tag is different */
  132. if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
  133. /* We can share with the TSO context descriptor */
  134. if (!context_desc) {
  135. context_desc = 1;
  136. pkt_info->desc_count++;
  137. }
  138. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  139. pkt_info->attributes,
  140. TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
  141. TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN,
  142. 1);
  143. }
  144. for (len = skb_headlen(skb); len;) {
  145. pkt_info->desc_count++;
  146. len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
  147. }
  148. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  149. frag = &skb_shinfo(skb)->frags[i];
  150. for (len = skb_frag_size(frag); len; ) {
  151. pkt_info->desc_count++;
  152. len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
  153. }
  154. }
  155. }
  156. static int xlgmac_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
  157. {
  158. unsigned int rx_buf_size;
  159. if (mtu > XLGMAC_JUMBO_PACKET_MTU) {
  160. netdev_alert(netdev, "MTU exceeds maximum supported value\n");
  161. return -EINVAL;
  162. }
  163. rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
  164. rx_buf_size = clamp_val(rx_buf_size, XLGMAC_RX_MIN_BUF_SIZE, PAGE_SIZE);
  165. rx_buf_size = (rx_buf_size + XLGMAC_RX_BUF_ALIGN - 1) &
  166. ~(XLGMAC_RX_BUF_ALIGN - 1);
  167. return rx_buf_size;
  168. }
  169. static void xlgmac_enable_rx_tx_ints(struct xlgmac_pdata *pdata)
  170. {
  171. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  172. struct xlgmac_channel *channel;
  173. enum xlgmac_int int_id;
  174. unsigned int i;
  175. channel = pdata->channel_head;
  176. for (i = 0; i < pdata->channel_count; i++, channel++) {
  177. if (channel->tx_ring && channel->rx_ring)
  178. int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
  179. else if (channel->tx_ring)
  180. int_id = XLGMAC_INT_DMA_CH_SR_TI;
  181. else if (channel->rx_ring)
  182. int_id = XLGMAC_INT_DMA_CH_SR_RI;
  183. else
  184. continue;
  185. hw_ops->enable_int(channel, int_id);
  186. }
  187. }
  188. static void xlgmac_disable_rx_tx_ints(struct xlgmac_pdata *pdata)
  189. {
  190. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  191. struct xlgmac_channel *channel;
  192. enum xlgmac_int int_id;
  193. unsigned int i;
  194. channel = pdata->channel_head;
  195. for (i = 0; i < pdata->channel_count; i++, channel++) {
  196. if (channel->tx_ring && channel->rx_ring)
  197. int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
  198. else if (channel->tx_ring)
  199. int_id = XLGMAC_INT_DMA_CH_SR_TI;
  200. else if (channel->rx_ring)
  201. int_id = XLGMAC_INT_DMA_CH_SR_RI;
  202. else
  203. continue;
  204. hw_ops->disable_int(channel, int_id);
  205. }
  206. }
  207. static irqreturn_t xlgmac_isr(int irq, void *data)
  208. {
  209. unsigned int dma_isr, dma_ch_isr, mac_isr;
  210. struct xlgmac_pdata *pdata = data;
  211. struct xlgmac_channel *channel;
  212. struct xlgmac_hw_ops *hw_ops;
  213. unsigned int i, ti, ri;
  214. hw_ops = &pdata->hw_ops;
  215. /* The DMA interrupt status register also reports MAC and MTL
  216. * interrupts. So for polling mode, we just need to check for
  217. * this register to be non-zero
  218. */
  219. dma_isr = readl(pdata->mac_regs + DMA_ISR);
  220. if (!dma_isr)
  221. return IRQ_HANDLED;
  222. netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
  223. for (i = 0; i < pdata->channel_count; i++) {
  224. if (!(dma_isr & (1 << i)))
  225. continue;
  226. channel = pdata->channel_head + i;
  227. dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR));
  228. netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
  229. i, dma_ch_isr);
  230. /* The TI or RI interrupt bits may still be set even if using
  231. * per channel DMA interrupts. Check to be sure those are not
  232. * enabled before using the private data napi structure.
  233. */
  234. ti = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS,
  235. DMA_CH_SR_TI_LEN);
  236. ri = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS,
  237. DMA_CH_SR_RI_LEN);
  238. if (!pdata->per_channel_irq && (ti || ri)) {
  239. if (napi_schedule_prep(&pdata->napi)) {
  240. /* Disable Tx and Rx interrupts */
  241. xlgmac_disable_rx_tx_ints(pdata);
  242. pdata->stats.napi_poll_isr++;
  243. /* Turn on polling */
  244. __napi_schedule_irqoff(&pdata->napi);
  245. }
  246. }
  247. if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TPS_POS,
  248. DMA_CH_SR_TPS_LEN))
  249. pdata->stats.tx_process_stopped++;
  250. if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RPS_POS,
  251. DMA_CH_SR_RPS_LEN))
  252. pdata->stats.rx_process_stopped++;
  253. if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TBU_POS,
  254. DMA_CH_SR_TBU_LEN))
  255. pdata->stats.tx_buffer_unavailable++;
  256. if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RBU_POS,
  257. DMA_CH_SR_RBU_LEN))
  258. pdata->stats.rx_buffer_unavailable++;
  259. /* Restart the device on a Fatal Bus Error */
  260. if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_FBE_POS,
  261. DMA_CH_SR_FBE_LEN)) {
  262. pdata->stats.fatal_bus_error++;
  263. schedule_work(&pdata->restart_work);
  264. }
  265. /* Clear all interrupt signals */
  266. writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR));
  267. }
  268. if (XLGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS,
  269. DMA_ISR_MACIS_LEN)) {
  270. mac_isr = readl(pdata->mac_regs + MAC_ISR);
  271. if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS,
  272. MAC_ISR_MMCTXIS_LEN))
  273. hw_ops->tx_mmc_int(pdata);
  274. if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS,
  275. MAC_ISR_MMCRXIS_LEN))
  276. hw_ops->rx_mmc_int(pdata);
  277. }
  278. return IRQ_HANDLED;
  279. }
  280. static irqreturn_t xlgmac_dma_isr(int irq, void *data)
  281. {
  282. struct xlgmac_channel *channel = data;
  283. /* Per channel DMA interrupts are enabled, so we use the per
  284. * channel napi structure and not the private data napi structure
  285. */
  286. if (napi_schedule_prep(&channel->napi)) {
  287. /* Disable Tx and Rx interrupts */
  288. disable_irq_nosync(channel->dma_irq);
  289. /* Turn on polling */
  290. __napi_schedule_irqoff(&channel->napi);
  291. }
  292. return IRQ_HANDLED;
  293. }
  294. static void xlgmac_tx_timer(unsigned long data)
  295. {
  296. struct xlgmac_channel *channel = (struct xlgmac_channel *)data;
  297. struct xlgmac_pdata *pdata = channel->pdata;
  298. struct napi_struct *napi;
  299. napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
  300. if (napi_schedule_prep(napi)) {
  301. /* Disable Tx and Rx interrupts */
  302. if (pdata->per_channel_irq)
  303. disable_irq_nosync(channel->dma_irq);
  304. else
  305. xlgmac_disable_rx_tx_ints(pdata);
  306. pdata->stats.napi_poll_txtimer++;
  307. /* Turn on polling */
  308. __napi_schedule(napi);
  309. }
  310. channel->tx_timer_active = 0;
  311. }
  312. static void xlgmac_init_timers(struct xlgmac_pdata *pdata)
  313. {
  314. struct xlgmac_channel *channel;
  315. unsigned int i;
  316. channel = pdata->channel_head;
  317. for (i = 0; i < pdata->channel_count; i++, channel++) {
  318. if (!channel->tx_ring)
  319. break;
  320. setup_timer(&channel->tx_timer, xlgmac_tx_timer,
  321. (unsigned long)channel);
  322. }
  323. }
  324. static void xlgmac_stop_timers(struct xlgmac_pdata *pdata)
  325. {
  326. struct xlgmac_channel *channel;
  327. unsigned int i;
  328. channel = pdata->channel_head;
  329. for (i = 0; i < pdata->channel_count; i++, channel++) {
  330. if (!channel->tx_ring)
  331. break;
  332. del_timer_sync(&channel->tx_timer);
  333. }
  334. }
  335. static void xlgmac_napi_enable(struct xlgmac_pdata *pdata, unsigned int add)
  336. {
  337. struct xlgmac_channel *channel;
  338. unsigned int i;
  339. if (pdata->per_channel_irq) {
  340. channel = pdata->channel_head;
  341. for (i = 0; i < pdata->channel_count; i++, channel++) {
  342. if (add)
  343. netif_napi_add(pdata->netdev, &channel->napi,
  344. xlgmac_one_poll,
  345. NAPI_POLL_WEIGHT);
  346. napi_enable(&channel->napi);
  347. }
  348. } else {
  349. if (add)
  350. netif_napi_add(pdata->netdev, &pdata->napi,
  351. xlgmac_all_poll, NAPI_POLL_WEIGHT);
  352. napi_enable(&pdata->napi);
  353. }
  354. }
  355. static void xlgmac_napi_disable(struct xlgmac_pdata *pdata, unsigned int del)
  356. {
  357. struct xlgmac_channel *channel;
  358. unsigned int i;
  359. if (pdata->per_channel_irq) {
  360. channel = pdata->channel_head;
  361. for (i = 0; i < pdata->channel_count; i++, channel++) {
  362. napi_disable(&channel->napi);
  363. if (del)
  364. netif_napi_del(&channel->napi);
  365. }
  366. } else {
  367. napi_disable(&pdata->napi);
  368. if (del)
  369. netif_napi_del(&pdata->napi);
  370. }
  371. }
  372. static int xlgmac_request_irqs(struct xlgmac_pdata *pdata)
  373. {
  374. struct net_device *netdev = pdata->netdev;
  375. struct xlgmac_channel *channel;
  376. unsigned int i;
  377. int ret;
  378. ret = devm_request_irq(pdata->dev, pdata->dev_irq, xlgmac_isr,
  379. IRQF_SHARED, netdev->name, pdata);
  380. if (ret) {
  381. netdev_alert(netdev, "error requesting irq %d\n",
  382. pdata->dev_irq);
  383. return ret;
  384. }
  385. if (!pdata->per_channel_irq)
  386. return 0;
  387. channel = pdata->channel_head;
  388. for (i = 0; i < pdata->channel_count; i++, channel++) {
  389. snprintf(channel->dma_irq_name,
  390. sizeof(channel->dma_irq_name) - 1,
  391. "%s-TxRx-%u", netdev_name(netdev),
  392. channel->queue_index);
  393. ret = devm_request_irq(pdata->dev, channel->dma_irq,
  394. xlgmac_dma_isr, 0,
  395. channel->dma_irq_name, channel);
  396. if (ret) {
  397. netdev_alert(netdev, "error requesting irq %d\n",
  398. channel->dma_irq);
  399. goto err_irq;
  400. }
  401. }
  402. return 0;
  403. err_irq:
  404. /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
  405. for (i--, channel--; i < pdata->channel_count; i--, channel--)
  406. devm_free_irq(pdata->dev, channel->dma_irq, channel);
  407. devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
  408. return ret;
  409. }
  410. static void xlgmac_free_irqs(struct xlgmac_pdata *pdata)
  411. {
  412. struct xlgmac_channel *channel;
  413. unsigned int i;
  414. devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
  415. if (!pdata->per_channel_irq)
  416. return;
  417. channel = pdata->channel_head;
  418. for (i = 0; i < pdata->channel_count; i++, channel++)
  419. devm_free_irq(pdata->dev, channel->dma_irq, channel);
  420. }
  421. static void xlgmac_free_tx_data(struct xlgmac_pdata *pdata)
  422. {
  423. struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
  424. struct xlgmac_desc_data *desc_data;
  425. struct xlgmac_channel *channel;
  426. struct xlgmac_ring *ring;
  427. unsigned int i, j;
  428. channel = pdata->channel_head;
  429. for (i = 0; i < pdata->channel_count; i++, channel++) {
  430. ring = channel->tx_ring;
  431. if (!ring)
  432. break;
  433. for (j = 0; j < ring->dma_desc_count; j++) {
  434. desc_data = XLGMAC_GET_DESC_DATA(ring, j);
  435. desc_ops->unmap_desc_data(pdata, desc_data);
  436. }
  437. }
  438. }
  439. static void xlgmac_free_rx_data(struct xlgmac_pdata *pdata)
  440. {
  441. struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
  442. struct xlgmac_desc_data *desc_data;
  443. struct xlgmac_channel *channel;
  444. struct xlgmac_ring *ring;
  445. unsigned int i, j;
  446. channel = pdata->channel_head;
  447. for (i = 0; i < pdata->channel_count; i++, channel++) {
  448. ring = channel->rx_ring;
  449. if (!ring)
  450. break;
  451. for (j = 0; j < ring->dma_desc_count; j++) {
  452. desc_data = XLGMAC_GET_DESC_DATA(ring, j);
  453. desc_ops->unmap_desc_data(pdata, desc_data);
  454. }
  455. }
  456. }
  457. static int xlgmac_start(struct xlgmac_pdata *pdata)
  458. {
  459. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  460. struct net_device *netdev = pdata->netdev;
  461. int ret;
  462. hw_ops->init(pdata);
  463. xlgmac_napi_enable(pdata, 1);
  464. ret = xlgmac_request_irqs(pdata);
  465. if (ret)
  466. goto err_napi;
  467. hw_ops->enable_tx(pdata);
  468. hw_ops->enable_rx(pdata);
  469. netif_tx_start_all_queues(netdev);
  470. return 0;
  471. err_napi:
  472. xlgmac_napi_disable(pdata, 1);
  473. hw_ops->exit(pdata);
  474. return ret;
  475. }
  476. static void xlgmac_stop(struct xlgmac_pdata *pdata)
  477. {
  478. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  479. struct net_device *netdev = pdata->netdev;
  480. struct xlgmac_channel *channel;
  481. struct netdev_queue *txq;
  482. unsigned int i;
  483. netif_tx_stop_all_queues(netdev);
  484. xlgmac_stop_timers(pdata);
  485. hw_ops->disable_tx(pdata);
  486. hw_ops->disable_rx(pdata);
  487. xlgmac_free_irqs(pdata);
  488. xlgmac_napi_disable(pdata, 1);
  489. hw_ops->exit(pdata);
  490. channel = pdata->channel_head;
  491. for (i = 0; i < pdata->channel_count; i++, channel++) {
  492. if (!channel->tx_ring)
  493. continue;
  494. txq = netdev_get_tx_queue(netdev, channel->queue_index);
  495. netdev_tx_reset_queue(txq);
  496. }
  497. }
  498. static void xlgmac_restart_dev(struct xlgmac_pdata *pdata)
  499. {
  500. /* If not running, "restart" will happen on open */
  501. if (!netif_running(pdata->netdev))
  502. return;
  503. xlgmac_stop(pdata);
  504. xlgmac_free_tx_data(pdata);
  505. xlgmac_free_rx_data(pdata);
  506. xlgmac_start(pdata);
  507. }
  508. static void xlgmac_restart(struct work_struct *work)
  509. {
  510. struct xlgmac_pdata *pdata = container_of(work,
  511. struct xlgmac_pdata,
  512. restart_work);
  513. rtnl_lock();
  514. xlgmac_restart_dev(pdata);
  515. rtnl_unlock();
  516. }
  517. static int xlgmac_open(struct net_device *netdev)
  518. {
  519. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  520. struct xlgmac_desc_ops *desc_ops;
  521. int ret;
  522. desc_ops = &pdata->desc_ops;
  523. /* TODO: Initialize the phy */
  524. /* Calculate the Rx buffer size before allocating rings */
  525. ret = xlgmac_calc_rx_buf_size(netdev, netdev->mtu);
  526. if (ret < 0)
  527. return ret;
  528. pdata->rx_buf_size = ret;
  529. /* Allocate the channels and rings */
  530. ret = desc_ops->alloc_channles_and_rings(pdata);
  531. if (ret)
  532. return ret;
  533. INIT_WORK(&pdata->restart_work, xlgmac_restart);
  534. xlgmac_init_timers(pdata);
  535. ret = xlgmac_start(pdata);
  536. if (ret)
  537. goto err_channels_and_rings;
  538. return 0;
  539. err_channels_and_rings:
  540. desc_ops->free_channels_and_rings(pdata);
  541. return ret;
  542. }
  543. static int xlgmac_close(struct net_device *netdev)
  544. {
  545. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  546. struct xlgmac_desc_ops *desc_ops;
  547. desc_ops = &pdata->desc_ops;
  548. /* Stop the device */
  549. xlgmac_stop(pdata);
  550. /* Free the channels and rings */
  551. desc_ops->free_channels_and_rings(pdata);
  552. return 0;
  553. }
  554. static void xlgmac_tx_timeout(struct net_device *netdev)
  555. {
  556. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  557. netdev_warn(netdev, "tx timeout, device restarting\n");
  558. schedule_work(&pdata->restart_work);
  559. }
  560. static int xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev)
  561. {
  562. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  563. struct xlgmac_pkt_info *tx_pkt_info;
  564. struct xlgmac_desc_ops *desc_ops;
  565. struct xlgmac_channel *channel;
  566. struct xlgmac_hw_ops *hw_ops;
  567. struct netdev_queue *txq;
  568. struct xlgmac_ring *ring;
  569. int ret;
  570. desc_ops = &pdata->desc_ops;
  571. hw_ops = &pdata->hw_ops;
  572. XLGMAC_PR("skb->len = %d\n", skb->len);
  573. channel = pdata->channel_head + skb->queue_mapping;
  574. txq = netdev_get_tx_queue(netdev, channel->queue_index);
  575. ring = channel->tx_ring;
  576. tx_pkt_info = &ring->pkt_info;
  577. if (skb->len == 0) {
  578. netif_err(pdata, tx_err, netdev,
  579. "empty skb received from stack\n");
  580. dev_kfree_skb_any(skb);
  581. return NETDEV_TX_OK;
  582. }
  583. /* Prepare preliminary packet info for TX */
  584. memset(tx_pkt_info, 0, sizeof(*tx_pkt_info));
  585. xlgmac_prep_tx_pkt(pdata, ring, skb, tx_pkt_info);
  586. /* Check that there are enough descriptors available */
  587. ret = xlgmac_maybe_stop_tx_queue(channel, ring,
  588. tx_pkt_info->desc_count);
  589. if (ret)
  590. return ret;
  591. ret = xlgmac_prep_tso(skb, tx_pkt_info);
  592. if (ret) {
  593. netif_err(pdata, tx_err, netdev,
  594. "error processing TSO packet\n");
  595. dev_kfree_skb_any(skb);
  596. return ret;
  597. }
  598. xlgmac_prep_vlan(skb, tx_pkt_info);
  599. if (!desc_ops->map_tx_skb(channel, skb)) {
  600. dev_kfree_skb_any(skb);
  601. return NETDEV_TX_OK;
  602. }
  603. /* Report on the actual number of bytes (to be) sent */
  604. netdev_tx_sent_queue(txq, tx_pkt_info->tx_bytes);
  605. /* Configure required descriptor fields for transmission */
  606. hw_ops->dev_xmit(channel);
  607. if (netif_msg_pktdata(pdata))
  608. xlgmac_print_pkt(netdev, skb, true);
  609. /* Stop the queue in advance if there may not be enough descriptors */
  610. xlgmac_maybe_stop_tx_queue(channel, ring, XLGMAC_TX_MAX_DESC_NR);
  611. return NETDEV_TX_OK;
  612. }
  613. static void xlgmac_get_stats64(struct net_device *netdev,
  614. struct rtnl_link_stats64 *s)
  615. {
  616. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  617. struct xlgmac_stats *pstats = &pdata->stats;
  618. pdata->hw_ops.read_mmc_stats(pdata);
  619. s->rx_packets = pstats->rxframecount_gb;
  620. s->rx_bytes = pstats->rxoctetcount_gb;
  621. s->rx_errors = pstats->rxframecount_gb -
  622. pstats->rxbroadcastframes_g -
  623. pstats->rxmulticastframes_g -
  624. pstats->rxunicastframes_g;
  625. s->multicast = pstats->rxmulticastframes_g;
  626. s->rx_length_errors = pstats->rxlengtherror;
  627. s->rx_crc_errors = pstats->rxcrcerror;
  628. s->rx_fifo_errors = pstats->rxfifooverflow;
  629. s->tx_packets = pstats->txframecount_gb;
  630. s->tx_bytes = pstats->txoctetcount_gb;
  631. s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
  632. s->tx_dropped = netdev->stats.tx_dropped;
  633. }
  634. static int xlgmac_set_mac_address(struct net_device *netdev, void *addr)
  635. {
  636. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  637. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  638. struct sockaddr *saddr = addr;
  639. if (!is_valid_ether_addr(saddr->sa_data))
  640. return -EADDRNOTAVAIL;
  641. memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
  642. hw_ops->set_mac_address(pdata, netdev->dev_addr);
  643. return 0;
  644. }
  645. static int xlgmac_ioctl(struct net_device *netdev,
  646. struct ifreq *ifreq, int cmd)
  647. {
  648. if (!netif_running(netdev))
  649. return -ENODEV;
  650. return 0;
  651. }
  652. static int xlgmac_change_mtu(struct net_device *netdev, int mtu)
  653. {
  654. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  655. int ret;
  656. ret = xlgmac_calc_rx_buf_size(netdev, mtu);
  657. if (ret < 0)
  658. return ret;
  659. pdata->rx_buf_size = ret;
  660. netdev->mtu = mtu;
  661. xlgmac_restart_dev(pdata);
  662. return 0;
  663. }
  664. static int xlgmac_vlan_rx_add_vid(struct net_device *netdev,
  665. __be16 proto,
  666. u16 vid)
  667. {
  668. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  669. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  670. set_bit(vid, pdata->active_vlans);
  671. hw_ops->update_vlan_hash_table(pdata);
  672. return 0;
  673. }
  674. static int xlgmac_vlan_rx_kill_vid(struct net_device *netdev,
  675. __be16 proto,
  676. u16 vid)
  677. {
  678. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  679. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  680. clear_bit(vid, pdata->active_vlans);
  681. hw_ops->update_vlan_hash_table(pdata);
  682. return 0;
  683. }
  684. #ifdef CONFIG_NET_POLL_CONTROLLER
  685. static void xlgmac_poll_controller(struct net_device *netdev)
  686. {
  687. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  688. struct xlgmac_channel *channel;
  689. unsigned int i;
  690. if (pdata->per_channel_irq) {
  691. channel = pdata->channel_head;
  692. for (i = 0; i < pdata->channel_count; i++, channel++)
  693. xlgmac_dma_isr(channel->dma_irq, channel);
  694. } else {
  695. disable_irq(pdata->dev_irq);
  696. xlgmac_isr(pdata->dev_irq, pdata);
  697. enable_irq(pdata->dev_irq);
  698. }
  699. }
  700. #endif /* CONFIG_NET_POLL_CONTROLLER */
  701. static int xlgmac_set_features(struct net_device *netdev,
  702. netdev_features_t features)
  703. {
  704. netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
  705. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  706. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  707. int ret = 0;
  708. rxhash = pdata->netdev_features & NETIF_F_RXHASH;
  709. rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
  710. rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
  711. rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
  712. if ((features & NETIF_F_RXHASH) && !rxhash)
  713. ret = hw_ops->enable_rss(pdata);
  714. else if (!(features & NETIF_F_RXHASH) && rxhash)
  715. ret = hw_ops->disable_rss(pdata);
  716. if (ret)
  717. return ret;
  718. if ((features & NETIF_F_RXCSUM) && !rxcsum)
  719. hw_ops->enable_rx_csum(pdata);
  720. else if (!(features & NETIF_F_RXCSUM) && rxcsum)
  721. hw_ops->disable_rx_csum(pdata);
  722. if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
  723. hw_ops->enable_rx_vlan_stripping(pdata);
  724. else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
  725. hw_ops->disable_rx_vlan_stripping(pdata);
  726. if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
  727. hw_ops->enable_rx_vlan_filtering(pdata);
  728. else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
  729. hw_ops->disable_rx_vlan_filtering(pdata);
  730. pdata->netdev_features = features;
  731. return 0;
  732. }
  733. static void xlgmac_set_rx_mode(struct net_device *netdev)
  734. {
  735. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  736. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  737. hw_ops->config_rx_mode(pdata);
  738. }
  739. static const struct net_device_ops xlgmac_netdev_ops = {
  740. .ndo_open = xlgmac_open,
  741. .ndo_stop = xlgmac_close,
  742. .ndo_start_xmit = xlgmac_xmit,
  743. .ndo_tx_timeout = xlgmac_tx_timeout,
  744. .ndo_get_stats64 = xlgmac_get_stats64,
  745. .ndo_change_mtu = xlgmac_change_mtu,
  746. .ndo_set_mac_address = xlgmac_set_mac_address,
  747. .ndo_validate_addr = eth_validate_addr,
  748. .ndo_do_ioctl = xlgmac_ioctl,
  749. .ndo_vlan_rx_add_vid = xlgmac_vlan_rx_add_vid,
  750. .ndo_vlan_rx_kill_vid = xlgmac_vlan_rx_kill_vid,
  751. #ifdef CONFIG_NET_POLL_CONTROLLER
  752. .ndo_poll_controller = xlgmac_poll_controller,
  753. #endif
  754. .ndo_set_features = xlgmac_set_features,
  755. .ndo_set_rx_mode = xlgmac_set_rx_mode,
  756. };
  757. const struct net_device_ops *xlgmac_get_netdev_ops(void)
  758. {
  759. return &xlgmac_netdev_ops;
  760. }
  761. static void xlgmac_rx_refresh(struct xlgmac_channel *channel)
  762. {
  763. struct xlgmac_pdata *pdata = channel->pdata;
  764. struct xlgmac_ring *ring = channel->rx_ring;
  765. struct xlgmac_desc_data *desc_data;
  766. struct xlgmac_desc_ops *desc_ops;
  767. struct xlgmac_hw_ops *hw_ops;
  768. desc_ops = &pdata->desc_ops;
  769. hw_ops = &pdata->hw_ops;
  770. while (ring->dirty != ring->cur) {
  771. desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
  772. /* Reset desc_data values */
  773. desc_ops->unmap_desc_data(pdata, desc_data);
  774. if (desc_ops->map_rx_buffer(pdata, ring, desc_data))
  775. break;
  776. hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty);
  777. ring->dirty++;
  778. }
  779. /* Make sure everything is written before the register write */
  780. wmb();
  781. /* Update the Rx Tail Pointer Register with address of
  782. * the last cleaned entry
  783. */
  784. desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty - 1);
  785. writel(lower_32_bits(desc_data->dma_desc_addr),
  786. XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO));
  787. }
  788. static struct sk_buff *xlgmac_create_skb(struct xlgmac_pdata *pdata,
  789. struct napi_struct *napi,
  790. struct xlgmac_desc_data *desc_data,
  791. unsigned int len)
  792. {
  793. unsigned int copy_len;
  794. struct sk_buff *skb;
  795. u8 *packet;
  796. skb = napi_alloc_skb(napi, desc_data->rx.hdr.dma_len);
  797. if (!skb)
  798. return NULL;
  799. /* Start with the header buffer which may contain just the header
  800. * or the header plus data
  801. */
  802. dma_sync_single_range_for_cpu(pdata->dev, desc_data->rx.hdr.dma_base,
  803. desc_data->rx.hdr.dma_off,
  804. desc_data->rx.hdr.dma_len,
  805. DMA_FROM_DEVICE);
  806. packet = page_address(desc_data->rx.hdr.pa.pages) +
  807. desc_data->rx.hdr.pa.pages_offset;
  808. copy_len = (desc_data->rx.hdr_len) ? desc_data->rx.hdr_len : len;
  809. copy_len = min(desc_data->rx.hdr.dma_len, copy_len);
  810. skb_copy_to_linear_data(skb, packet, copy_len);
  811. skb_put(skb, copy_len);
  812. len -= copy_len;
  813. if (len) {
  814. /* Add the remaining data as a frag */
  815. dma_sync_single_range_for_cpu(pdata->dev,
  816. desc_data->rx.buf.dma_base,
  817. desc_data->rx.buf.dma_off,
  818. desc_data->rx.buf.dma_len,
  819. DMA_FROM_DEVICE);
  820. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
  821. desc_data->rx.buf.pa.pages,
  822. desc_data->rx.buf.pa.pages_offset,
  823. len, desc_data->rx.buf.dma_len);
  824. desc_data->rx.buf.pa.pages = NULL;
  825. }
  826. return skb;
  827. }
  828. static int xlgmac_tx_poll(struct xlgmac_channel *channel)
  829. {
  830. struct xlgmac_pdata *pdata = channel->pdata;
  831. struct xlgmac_ring *ring = channel->tx_ring;
  832. struct net_device *netdev = pdata->netdev;
  833. unsigned int tx_packets = 0, tx_bytes = 0;
  834. struct xlgmac_desc_data *desc_data;
  835. struct xlgmac_dma_desc *dma_desc;
  836. struct xlgmac_desc_ops *desc_ops;
  837. struct xlgmac_hw_ops *hw_ops;
  838. struct netdev_queue *txq;
  839. int processed = 0;
  840. unsigned int cur;
  841. desc_ops = &pdata->desc_ops;
  842. hw_ops = &pdata->hw_ops;
  843. /* Nothing to do if there isn't a Tx ring for this channel */
  844. if (!ring)
  845. return 0;
  846. cur = ring->cur;
  847. /* Be sure we get ring->cur before accessing descriptor data */
  848. smp_rmb();
  849. txq = netdev_get_tx_queue(netdev, channel->queue_index);
  850. while ((processed < XLGMAC_TX_DESC_MAX_PROC) &&
  851. (ring->dirty != cur)) {
  852. desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
  853. dma_desc = desc_data->dma_desc;
  854. if (!hw_ops->tx_complete(dma_desc))
  855. break;
  856. /* Make sure descriptor fields are read after reading
  857. * the OWN bit
  858. */
  859. dma_rmb();
  860. if (netif_msg_tx_done(pdata))
  861. xlgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
  862. if (hw_ops->is_last_desc(dma_desc)) {
  863. tx_packets += desc_data->tx.packets;
  864. tx_bytes += desc_data->tx.bytes;
  865. }
  866. /* Free the SKB and reset the descriptor for re-use */
  867. desc_ops->unmap_desc_data(pdata, desc_data);
  868. hw_ops->tx_desc_reset(desc_data);
  869. processed++;
  870. ring->dirty++;
  871. }
  872. if (!processed)
  873. return 0;
  874. netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
  875. if ((ring->tx.queue_stopped == 1) &&
  876. (xlgmac_tx_avail_desc(ring) > XLGMAC_TX_DESC_MIN_FREE)) {
  877. ring->tx.queue_stopped = 0;
  878. netif_tx_wake_queue(txq);
  879. }
  880. XLGMAC_PR("processed=%d\n", processed);
  881. return processed;
  882. }
  883. static int xlgmac_rx_poll(struct xlgmac_channel *channel, int budget)
  884. {
  885. struct xlgmac_pdata *pdata = channel->pdata;
  886. struct xlgmac_ring *ring = channel->rx_ring;
  887. struct net_device *netdev = pdata->netdev;
  888. unsigned int len, dma_desc_len, max_len;
  889. unsigned int context_next, context;
  890. struct xlgmac_desc_data *desc_data;
  891. struct xlgmac_pkt_info *pkt_info;
  892. unsigned int incomplete, error;
  893. struct xlgmac_hw_ops *hw_ops;
  894. unsigned int received = 0;
  895. struct napi_struct *napi;
  896. struct sk_buff *skb;
  897. int packet_count = 0;
  898. hw_ops = &pdata->hw_ops;
  899. /* Nothing to do if there isn't a Rx ring for this channel */
  900. if (!ring)
  901. return 0;
  902. incomplete = 0;
  903. context_next = 0;
  904. napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
  905. desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
  906. pkt_info = &ring->pkt_info;
  907. while (packet_count < budget) {
  908. /* First time in loop see if we need to restore state */
  909. if (!received && desc_data->state_saved) {
  910. skb = desc_data->state.skb;
  911. error = desc_data->state.error;
  912. len = desc_data->state.len;
  913. } else {
  914. memset(pkt_info, 0, sizeof(*pkt_info));
  915. skb = NULL;
  916. error = 0;
  917. len = 0;
  918. }
  919. read_again:
  920. desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
  921. if (xlgmac_rx_dirty_desc(ring) > XLGMAC_RX_DESC_MAX_DIRTY)
  922. xlgmac_rx_refresh(channel);
  923. if (hw_ops->dev_read(channel))
  924. break;
  925. received++;
  926. ring->cur++;
  927. incomplete = XLGMAC_GET_REG_BITS(
  928. pkt_info->attributes,
  929. RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
  930. RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN);
  931. context_next = XLGMAC_GET_REG_BITS(
  932. pkt_info->attributes,
  933. RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
  934. RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN);
  935. context = XLGMAC_GET_REG_BITS(
  936. pkt_info->attributes,
  937. RX_PACKET_ATTRIBUTES_CONTEXT_POS,
  938. RX_PACKET_ATTRIBUTES_CONTEXT_LEN);
  939. /* Earlier error, just drain the remaining data */
  940. if ((incomplete || context_next) && error)
  941. goto read_again;
  942. if (error || pkt_info->errors) {
  943. if (pkt_info->errors)
  944. netif_err(pdata, rx_err, netdev,
  945. "error in received packet\n");
  946. dev_kfree_skb(skb);
  947. goto next_packet;
  948. }
  949. if (!context) {
  950. /* Length is cumulative, get this descriptor's length */
  951. dma_desc_len = desc_data->rx.len - len;
  952. len += dma_desc_len;
  953. if (dma_desc_len && !skb) {
  954. skb = xlgmac_create_skb(pdata, napi, desc_data,
  955. dma_desc_len);
  956. if (!skb)
  957. error = 1;
  958. } else if (dma_desc_len) {
  959. dma_sync_single_range_for_cpu(
  960. pdata->dev,
  961. desc_data->rx.buf.dma_base,
  962. desc_data->rx.buf.dma_off,
  963. desc_data->rx.buf.dma_len,
  964. DMA_FROM_DEVICE);
  965. skb_add_rx_frag(
  966. skb, skb_shinfo(skb)->nr_frags,
  967. desc_data->rx.buf.pa.pages,
  968. desc_data->rx.buf.pa.pages_offset,
  969. dma_desc_len,
  970. desc_data->rx.buf.dma_len);
  971. desc_data->rx.buf.pa.pages = NULL;
  972. }
  973. }
  974. if (incomplete || context_next)
  975. goto read_again;
  976. if (!skb)
  977. goto next_packet;
  978. /* Be sure we don't exceed the configured MTU */
  979. max_len = netdev->mtu + ETH_HLEN;
  980. if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
  981. (skb->protocol == htons(ETH_P_8021Q)))
  982. max_len += VLAN_HLEN;
  983. if (skb->len > max_len) {
  984. netif_err(pdata, rx_err, netdev,
  985. "packet length exceeds configured MTU\n");
  986. dev_kfree_skb(skb);
  987. goto next_packet;
  988. }
  989. if (netif_msg_pktdata(pdata))
  990. xlgmac_print_pkt(netdev, skb, false);
  991. skb_checksum_none_assert(skb);
  992. if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
  993. RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
  994. RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN))
  995. skb->ip_summed = CHECKSUM_UNNECESSARY;
  996. if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
  997. RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
  998. RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN)) {
  999. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  1000. pkt_info->vlan_ctag);
  1001. pdata->stats.rx_vlan_packets++;
  1002. }
  1003. if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
  1004. RX_PACKET_ATTRIBUTES_RSS_HASH_POS,
  1005. RX_PACKET_ATTRIBUTES_RSS_HASH_LEN))
  1006. skb_set_hash(skb, pkt_info->rss_hash,
  1007. pkt_info->rss_hash_type);
  1008. skb->dev = netdev;
  1009. skb->protocol = eth_type_trans(skb, netdev);
  1010. skb_record_rx_queue(skb, channel->queue_index);
  1011. napi_gro_receive(napi, skb);
  1012. next_packet:
  1013. packet_count++;
  1014. }
  1015. /* Check if we need to save state before leaving */
  1016. if (received && (incomplete || context_next)) {
  1017. desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
  1018. desc_data->state_saved = 1;
  1019. desc_data->state.skb = skb;
  1020. desc_data->state.len = len;
  1021. desc_data->state.error = error;
  1022. }
  1023. XLGMAC_PR("packet_count = %d\n", packet_count);
  1024. return packet_count;
  1025. }
  1026. static int xlgmac_one_poll(struct napi_struct *napi, int budget)
  1027. {
  1028. struct xlgmac_channel *channel = container_of(napi,
  1029. struct xlgmac_channel,
  1030. napi);
  1031. int processed = 0;
  1032. XLGMAC_PR("budget=%d\n", budget);
  1033. /* Cleanup Tx ring first */
  1034. xlgmac_tx_poll(channel);
  1035. /* Process Rx ring next */
  1036. processed = xlgmac_rx_poll(channel, budget);
  1037. /* If we processed everything, we are done */
  1038. if (processed < budget) {
  1039. /* Turn off polling */
  1040. napi_complete_done(napi, processed);
  1041. /* Enable Tx and Rx interrupts */
  1042. enable_irq(channel->dma_irq);
  1043. }
  1044. XLGMAC_PR("received = %d\n", processed);
  1045. return processed;
  1046. }
  1047. static int xlgmac_all_poll(struct napi_struct *napi, int budget)
  1048. {
  1049. struct xlgmac_pdata *pdata = container_of(napi,
  1050. struct xlgmac_pdata,
  1051. napi);
  1052. struct xlgmac_channel *channel;
  1053. int processed, last_processed;
  1054. int ring_budget;
  1055. unsigned int i;
  1056. XLGMAC_PR("budget=%d\n", budget);
  1057. processed = 0;
  1058. ring_budget = budget / pdata->rx_ring_count;
  1059. do {
  1060. last_processed = processed;
  1061. channel = pdata->channel_head;
  1062. for (i = 0; i < pdata->channel_count; i++, channel++) {
  1063. /* Cleanup Tx ring first */
  1064. xlgmac_tx_poll(channel);
  1065. /* Process Rx ring next */
  1066. if (ring_budget > (budget - processed))
  1067. ring_budget = budget - processed;
  1068. processed += xlgmac_rx_poll(channel, ring_budget);
  1069. }
  1070. } while ((processed < budget) && (processed != last_processed));
  1071. /* If we processed everything, we are done */
  1072. if (processed < budget) {
  1073. /* Turn off polling */
  1074. napi_complete_done(napi, processed);
  1075. /* Enable Tx and Rx interrupts */
  1076. xlgmac_enable_rx_tx_ints(pdata);
  1077. }
  1078. XLGMAC_PR("received = %d\n", processed);
  1079. return processed;
  1080. }