xgbe-drv.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351
  1. /*
  2. * AMD 10Gb Ethernet driver
  3. *
  4. * This file is available to you under your choice of the following two
  5. * licenses:
  6. *
  7. * License 1: GPLv2
  8. *
  9. * Copyright (c) 2014 Advanced Micro Devices, Inc.
  10. *
  11. * This file is free software; you may copy, redistribute and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation, either version 2 of the License, or (at
  14. * your option) any later version.
  15. *
  16. * This file is distributed in the hope that it will be useful, but
  17. * WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  23. *
  24. * This file incorporates work covered by the following copyright and
  25. * permission notice:
  26. * The Synopsys DWC ETHER XGMAC Software Driver and documentation
  27. * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
  28. * Inc. unless otherwise expressly agreed to in writing between Synopsys
  29. * and you.
  30. *
  31. * The Software IS NOT an item of Licensed Software or Licensed Product
  32. * under any End User Software License Agreement or Agreement for Licensed
  33. * Product with Synopsys or any supplement thereto. Permission is hereby
  34. * granted, free of charge, to any person obtaining a copy of this software
  35. * annotated with this license and the Software, to deal in the Software
  36. * without restriction, including without limitation the rights to use,
  37. * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
  38. * of the Software, and to permit persons to whom the Software is furnished
  39. * to do so, subject to the following conditions:
  40. *
  41. * The above copyright notice and this permission notice shall be included
  42. * in all copies or substantial portions of the Software.
  43. *
  44. * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
  45. * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  46. * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  47. * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
  48. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  49. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  50. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  51. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  52. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  53. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  54. * THE POSSIBILITY OF SUCH DAMAGE.
  55. *
  56. *
  57. * License 2: Modified BSD
  58. *
  59. * Copyright (c) 2014 Advanced Micro Devices, Inc.
  60. * All rights reserved.
  61. *
  62. * Redistribution and use in source and binary forms, with or without
  63. * modification, are permitted provided that the following conditions are met:
  64. * * Redistributions of source code must retain the above copyright
  65. * notice, this list of conditions and the following disclaimer.
  66. * * Redistributions in binary form must reproduce the above copyright
  67. * notice, this list of conditions and the following disclaimer in the
  68. * documentation and/or other materials provided with the distribution.
  69. * * Neither the name of Advanced Micro Devices, Inc. nor the
  70. * names of its contributors may be used to endorse or promote products
  71. * derived from this software without specific prior written permission.
  72. *
  73. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  74. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  75. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  76. * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
  77. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  78. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  79. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  80. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  81. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  82. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  83. *
  84. * This file incorporates work covered by the following copyright and
  85. * permission notice:
  86. * The Synopsys DWC ETHER XGMAC Software Driver and documentation
  87. * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
  88. * Inc. unless otherwise expressly agreed to in writing between Synopsys
  89. * and you.
  90. *
  91. * The Software IS NOT an item of Licensed Software or Licensed Product
  92. * under any End User Software License Agreement or Agreement for Licensed
  93. * Product with Synopsys or any supplement thereto. Permission is hereby
  94. * granted, free of charge, to any person obtaining a copy of this software
  95. * annotated with this license and the Software, to deal in the Software
  96. * without restriction, including without limitation the rights to use,
  97. * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
  98. * of the Software, and to permit persons to whom the Software is furnished
  99. * to do so, subject to the following conditions:
  100. *
  101. * The above copyright notice and this permission notice shall be included
  102. * in all copies or substantial portions of the Software.
  103. *
  104. * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
  105. * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  106. * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  107. * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
  108. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  109. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  110. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  111. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  112. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  113. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  114. * THE POSSIBILITY OF SUCH DAMAGE.
  115. */
  116. #include <linux/spinlock.h>
  117. #include <linux/tcp.h>
  118. #include <linux/if_vlan.h>
  119. #include <linux/phy.h>
  120. #include <net/busy_poll.h>
  121. #include <linux/clk.h>
  122. #include <linux/if_ether.h>
  123. #include "xgbe.h"
  124. #include "xgbe-common.h"
  125. static int xgbe_poll(struct napi_struct *, int);
  126. static void xgbe_set_rx_mode(struct net_device *);
  127. static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
  128. {
  129. return (ring->rdesc_count - (ring->cur - ring->dirty));
  130. }
  131. static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
  132. {
  133. unsigned int rx_buf_size;
  134. if (mtu > XGMAC_JUMBO_PACKET_MTU) {
  135. netdev_alert(netdev, "MTU exceeds maximum supported value\n");
  136. return -EINVAL;
  137. }
  138. rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
  139. if (rx_buf_size < RX_MIN_BUF_SIZE)
  140. rx_buf_size = RX_MIN_BUF_SIZE;
  141. rx_buf_size = (rx_buf_size + RX_BUF_ALIGN - 1) & ~(RX_BUF_ALIGN - 1);
  142. return rx_buf_size;
  143. }
  144. static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
  145. {
  146. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  147. struct xgbe_channel *channel;
  148. unsigned int i;
  149. channel = pdata->channel;
  150. for (i = 0; i < pdata->channel_count; i++, channel++) {
  151. if (channel->tx_ring)
  152. hw_if->enable_int(channel,
  153. XGMAC_INT_DMA_CH_SR_TI);
  154. if (channel->rx_ring)
  155. hw_if->enable_int(channel,
  156. XGMAC_INT_DMA_CH_SR_RI);
  157. }
  158. }
  159. static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
  160. {
  161. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  162. struct xgbe_channel *channel;
  163. unsigned int i;
  164. channel = pdata->channel;
  165. for (i = 0; i < pdata->channel_count; i++, channel++) {
  166. if (channel->tx_ring)
  167. hw_if->disable_int(channel,
  168. XGMAC_INT_DMA_CH_SR_TI);
  169. if (channel->rx_ring)
  170. hw_if->disable_int(channel,
  171. XGMAC_INT_DMA_CH_SR_RI);
  172. }
  173. }
  174. static irqreturn_t xgbe_isr(int irq, void *data)
  175. {
  176. struct xgbe_prv_data *pdata = data;
  177. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  178. struct xgbe_channel *channel;
  179. unsigned int dma_isr, dma_ch_isr;
  180. unsigned int mac_isr;
  181. unsigned int i;
  182. /* The DMA interrupt status register also reports MAC and MTL
  183. * interrupts. So for polling mode, we just need to check for
  184. * this register to be non-zero
  185. */
  186. dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
  187. if (!dma_isr)
  188. goto isr_done;
  189. DBGPR("-->xgbe_isr\n");
  190. DBGPR(" DMA_ISR = %08x\n", dma_isr);
  191. DBGPR(" DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR0));
  192. DBGPR(" DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR1));
  193. for (i = 0; i < pdata->channel_count; i++) {
  194. if (!(dma_isr & (1 << i)))
  195. continue;
  196. channel = pdata->channel + i;
  197. dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
  198. DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
  199. if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
  200. XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
  201. if (napi_schedule_prep(&pdata->napi)) {
  202. /* Disable Tx and Rx interrupts */
  203. xgbe_disable_rx_tx_ints(pdata);
  204. /* Turn on polling */
  205. __napi_schedule(&pdata->napi);
  206. }
  207. }
  208. /* Restart the device on a Fatal Bus Error */
  209. if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
  210. schedule_work(&pdata->restart_work);
  211. /* Clear all interrupt signals */
  212. XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
  213. }
  214. if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
  215. mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
  216. if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
  217. hw_if->tx_mmc_int(pdata);
  218. if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
  219. hw_if->rx_mmc_int(pdata);
  220. }
  221. DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
  222. DBGPR("<--xgbe_isr\n");
  223. isr_done:
  224. return IRQ_HANDLED;
  225. }
  226. static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
  227. {
  228. struct xgbe_channel *channel = container_of(timer,
  229. struct xgbe_channel,
  230. tx_timer);
  231. struct xgbe_ring *ring = channel->tx_ring;
  232. struct xgbe_prv_data *pdata = channel->pdata;
  233. unsigned long flags;
  234. DBGPR("-->xgbe_tx_timer\n");
  235. spin_lock_irqsave(&ring->lock, flags);
  236. if (napi_schedule_prep(&pdata->napi)) {
  237. /* Disable Tx and Rx interrupts */
  238. xgbe_disable_rx_tx_ints(pdata);
  239. /* Turn on polling */
  240. __napi_schedule(&pdata->napi);
  241. }
  242. channel->tx_timer_active = 0;
  243. spin_unlock_irqrestore(&ring->lock, flags);
  244. DBGPR("<--xgbe_tx_timer\n");
  245. return HRTIMER_NORESTART;
  246. }
  247. static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
  248. {
  249. struct xgbe_channel *channel;
  250. unsigned int i;
  251. DBGPR("-->xgbe_init_tx_timers\n");
  252. channel = pdata->channel;
  253. for (i = 0; i < pdata->channel_count; i++, channel++) {
  254. if (!channel->tx_ring)
  255. break;
  256. DBGPR(" %s adding tx timer\n", channel->name);
  257. hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC,
  258. HRTIMER_MODE_REL);
  259. channel->tx_timer.function = xgbe_tx_timer;
  260. }
  261. DBGPR("<--xgbe_init_tx_timers\n");
  262. }
  263. static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
  264. {
  265. struct xgbe_channel *channel;
  266. unsigned int i;
  267. DBGPR("-->xgbe_stop_tx_timers\n");
  268. channel = pdata->channel;
  269. for (i = 0; i < pdata->channel_count; i++, channel++) {
  270. if (!channel->tx_ring)
  271. break;
  272. DBGPR(" %s deleting tx timer\n", channel->name);
  273. channel->tx_timer_active = 0;
  274. hrtimer_cancel(&channel->tx_timer);
  275. }
  276. DBGPR("<--xgbe_stop_tx_timers\n");
  277. }
  278. void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
  279. {
  280. unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
  281. struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
  282. DBGPR("-->xgbe_get_all_hw_features\n");
  283. mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
  284. mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
  285. mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
  286. memset(hw_feat, 0, sizeof(*hw_feat));
  287. /* Hardware feature register 0 */
  288. hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
  289. hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
  290. hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
  291. hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
  292. hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
  293. hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
  294. hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
  295. hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
  296. hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
  297. hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
  298. hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
  299. hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
  300. ADDMACADRSEL);
  301. hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
  302. hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
  303. /* Hardware feature register 1 */
  304. hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
  305. RXFIFOSIZE);
  306. hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
  307. TXFIFOSIZE);
  308. hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
  309. hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
  310. hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
  311. hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
  312. hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
  313. HASHTBLSZ);
  314. hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
  315. L3L4FNUM);
  316. /* Hardware feature register 2 */
  317. hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
  318. hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
  319. hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
  320. hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
  321. hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
  322. hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
  323. /* The Queue and Channel counts are zero based so increment them
  324. * to get the actual number
  325. */
  326. hw_feat->rx_q_cnt++;
  327. hw_feat->tx_q_cnt++;
  328. hw_feat->rx_ch_cnt++;
  329. hw_feat->tx_ch_cnt++;
  330. DBGPR("<--xgbe_get_all_hw_features\n");
  331. }
  332. static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
  333. {
  334. if (add)
  335. netif_napi_add(pdata->netdev, &pdata->napi, xgbe_poll,
  336. NAPI_POLL_WEIGHT);
  337. napi_enable(&pdata->napi);
  338. }
  339. static void xgbe_napi_disable(struct xgbe_prv_data *pdata)
  340. {
  341. napi_disable(&pdata->napi);
  342. }
  343. void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
  344. {
  345. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  346. DBGPR("-->xgbe_init_tx_coalesce\n");
  347. pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
  348. pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
  349. hw_if->config_tx_coalesce(pdata);
  350. DBGPR("<--xgbe_init_tx_coalesce\n");
  351. }
  352. void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
  353. {
  354. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  355. DBGPR("-->xgbe_init_rx_coalesce\n");
  356. pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
  357. pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
  358. hw_if->config_rx_coalesce(pdata);
  359. DBGPR("<--xgbe_init_rx_coalesce\n");
  360. }
  361. static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
  362. {
  363. struct xgbe_desc_if *desc_if = &pdata->desc_if;
  364. struct xgbe_channel *channel;
  365. struct xgbe_ring *ring;
  366. struct xgbe_ring_data *rdata;
  367. unsigned int i, j;
  368. DBGPR("-->xgbe_free_tx_skbuff\n");
  369. channel = pdata->channel;
  370. for (i = 0; i < pdata->channel_count; i++, channel++) {
  371. ring = channel->tx_ring;
  372. if (!ring)
  373. break;
  374. for (j = 0; j < ring->rdesc_count; j++) {
  375. rdata = GET_DESC_DATA(ring, j);
  376. desc_if->unmap_skb(pdata, rdata);
  377. }
  378. }
  379. DBGPR("<--xgbe_free_tx_skbuff\n");
  380. }
  381. static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
  382. {
  383. struct xgbe_desc_if *desc_if = &pdata->desc_if;
  384. struct xgbe_channel *channel;
  385. struct xgbe_ring *ring;
  386. struct xgbe_ring_data *rdata;
  387. unsigned int i, j;
  388. DBGPR("-->xgbe_free_rx_skbuff\n");
  389. channel = pdata->channel;
  390. for (i = 0; i < pdata->channel_count; i++, channel++) {
  391. ring = channel->rx_ring;
  392. if (!ring)
  393. break;
  394. for (j = 0; j < ring->rdesc_count; j++) {
  395. rdata = GET_DESC_DATA(ring, j);
  396. desc_if->unmap_skb(pdata, rdata);
  397. }
  398. }
  399. DBGPR("<--xgbe_free_rx_skbuff\n");
  400. }
  401. int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
  402. {
  403. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  404. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  405. unsigned long flags;
  406. DBGPR("-->xgbe_powerdown\n");
  407. if (!netif_running(netdev) ||
  408. (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
  409. netdev_alert(netdev, "Device is already powered down\n");
  410. DBGPR("<--xgbe_powerdown\n");
  411. return -EINVAL;
  412. }
  413. phy_stop(pdata->phydev);
  414. spin_lock_irqsave(&pdata->lock, flags);
  415. if (caller == XGMAC_DRIVER_CONTEXT)
  416. netif_device_detach(netdev);
  417. netif_tx_stop_all_queues(netdev);
  418. xgbe_napi_disable(pdata);
  419. /* Powerdown Tx/Rx */
  420. hw_if->powerdown_tx(pdata);
  421. hw_if->powerdown_rx(pdata);
  422. pdata->power_down = 1;
  423. spin_unlock_irqrestore(&pdata->lock, flags);
  424. DBGPR("<--xgbe_powerdown\n");
  425. return 0;
  426. }
  427. int xgbe_powerup(struct net_device *netdev, unsigned int caller)
  428. {
  429. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  430. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  431. unsigned long flags;
  432. DBGPR("-->xgbe_powerup\n");
  433. if (!netif_running(netdev) ||
  434. (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
  435. netdev_alert(netdev, "Device is already powered up\n");
  436. DBGPR("<--xgbe_powerup\n");
  437. return -EINVAL;
  438. }
  439. spin_lock_irqsave(&pdata->lock, flags);
  440. pdata->power_down = 0;
  441. phy_start(pdata->phydev);
  442. /* Enable Tx/Rx */
  443. hw_if->powerup_tx(pdata);
  444. hw_if->powerup_rx(pdata);
  445. if (caller == XGMAC_DRIVER_CONTEXT)
  446. netif_device_attach(netdev);
  447. xgbe_napi_enable(pdata, 0);
  448. netif_tx_start_all_queues(netdev);
  449. spin_unlock_irqrestore(&pdata->lock, flags);
  450. DBGPR("<--xgbe_powerup\n");
  451. return 0;
  452. }
  453. static int xgbe_start(struct xgbe_prv_data *pdata)
  454. {
  455. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  456. struct net_device *netdev = pdata->netdev;
  457. DBGPR("-->xgbe_start\n");
  458. xgbe_set_rx_mode(netdev);
  459. hw_if->init(pdata);
  460. phy_start(pdata->phydev);
  461. hw_if->enable_tx(pdata);
  462. hw_if->enable_rx(pdata);
  463. xgbe_init_tx_timers(pdata);
  464. xgbe_napi_enable(pdata, 1);
  465. netif_tx_start_all_queues(netdev);
  466. DBGPR("<--xgbe_start\n");
  467. return 0;
  468. }
  469. static void xgbe_stop(struct xgbe_prv_data *pdata)
  470. {
  471. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  472. struct net_device *netdev = pdata->netdev;
  473. DBGPR("-->xgbe_stop\n");
  474. phy_stop(pdata->phydev);
  475. netif_tx_stop_all_queues(netdev);
  476. xgbe_napi_disable(pdata);
  477. xgbe_stop_tx_timers(pdata);
  478. hw_if->disable_tx(pdata);
  479. hw_if->disable_rx(pdata);
  480. DBGPR("<--xgbe_stop\n");
  481. }
  482. static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
  483. {
  484. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  485. DBGPR("-->xgbe_restart_dev\n");
  486. /* If not running, "restart" will happen on open */
  487. if (!netif_running(pdata->netdev))
  488. return;
  489. xgbe_stop(pdata);
  490. synchronize_irq(pdata->irq_number);
  491. xgbe_free_tx_skbuff(pdata);
  492. xgbe_free_rx_skbuff(pdata);
  493. /* Issue software reset to device if requested */
  494. if (reset)
  495. hw_if->exit(pdata);
  496. xgbe_start(pdata);
  497. DBGPR("<--xgbe_restart_dev\n");
  498. }
  499. static void xgbe_restart(struct work_struct *work)
  500. {
  501. struct xgbe_prv_data *pdata = container_of(work,
  502. struct xgbe_prv_data,
  503. restart_work);
  504. rtnl_lock();
  505. xgbe_restart_dev(pdata, 1);
  506. rtnl_unlock();
  507. }
  508. static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
  509. {
  510. if (vlan_tx_tag_present(skb))
  511. packet->vlan_ctag = vlan_tx_tag_get(skb);
  512. }
  513. static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
  514. {
  515. int ret;
  516. if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  517. TSO_ENABLE))
  518. return 0;
  519. ret = skb_cow_head(skb, 0);
  520. if (ret)
  521. return ret;
  522. packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  523. packet->tcp_header_len = tcp_hdrlen(skb);
  524. packet->tcp_payload_len = skb->len - packet->header_len;
  525. packet->mss = skb_shinfo(skb)->gso_size;
  526. DBGPR(" packet->header_len=%u\n", packet->header_len);
  527. DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
  528. packet->tcp_header_len, packet->tcp_payload_len);
  529. DBGPR(" packet->mss=%u\n", packet->mss);
  530. return 0;
  531. }
  532. static int xgbe_is_tso(struct sk_buff *skb)
  533. {
  534. if (skb->ip_summed != CHECKSUM_PARTIAL)
  535. return 0;
  536. if (!skb_is_gso(skb))
  537. return 0;
  538. DBGPR(" TSO packet to be processed\n");
  539. return 1;
  540. }
  541. static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb,
  542. struct xgbe_packet_data *packet)
  543. {
  544. struct skb_frag_struct *frag;
  545. unsigned int context_desc;
  546. unsigned int len;
  547. unsigned int i;
  548. context_desc = 0;
  549. packet->rdesc_count = 0;
  550. if (xgbe_is_tso(skb)) {
  551. /* TSO requires an extra desriptor if mss is different */
  552. if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
  553. context_desc = 1;
  554. packet->rdesc_count++;
  555. }
  556. /* TSO requires an extra desriptor for TSO header */
  557. packet->rdesc_count++;
  558. XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  559. TSO_ENABLE, 1);
  560. XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  561. CSUM_ENABLE, 1);
  562. } else if (skb->ip_summed == CHECKSUM_PARTIAL)
  563. XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  564. CSUM_ENABLE, 1);
  565. if (vlan_tx_tag_present(skb)) {
  566. /* VLAN requires an extra descriptor if tag is different */
  567. if (vlan_tx_tag_get(skb) != ring->tx.cur_vlan_ctag)
  568. /* We can share with the TSO context descriptor */
  569. if (!context_desc) {
  570. context_desc = 1;
  571. packet->rdesc_count++;
  572. }
  573. XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  574. VLAN_CTAG, 1);
  575. }
  576. for (len = skb_headlen(skb); len;) {
  577. packet->rdesc_count++;
  578. len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE);
  579. }
  580. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  581. frag = &skb_shinfo(skb)->frags[i];
  582. for (len = skb_frag_size(frag); len; ) {
  583. packet->rdesc_count++;
  584. len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE);
  585. }
  586. }
  587. }
  588. static int xgbe_open(struct net_device *netdev)
  589. {
  590. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  591. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  592. struct xgbe_desc_if *desc_if = &pdata->desc_if;
  593. int ret;
  594. DBGPR("-->xgbe_open\n");
  595. /* Enable the clock */
  596. ret = clk_prepare_enable(pdata->sysclock);
  597. if (ret) {
  598. netdev_alert(netdev, "clk_prepare_enable failed\n");
  599. return ret;
  600. }
  601. /* Calculate the Rx buffer size before allocating rings */
  602. ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
  603. if (ret < 0)
  604. goto err_clk;
  605. pdata->rx_buf_size = ret;
  606. /* Allocate the ring descriptors and buffers */
  607. ret = desc_if->alloc_ring_resources(pdata);
  608. if (ret)
  609. goto err_clk;
  610. /* Initialize the device restart work struct */
  611. INIT_WORK(&pdata->restart_work, xgbe_restart);
  612. /* Request interrupts */
  613. ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0,
  614. netdev->name, pdata);
  615. if (ret) {
  616. netdev_alert(netdev, "error requesting irq %d\n",
  617. pdata->irq_number);
  618. goto err_irq;
  619. }
  620. pdata->irq_number = netdev->irq;
  621. ret = xgbe_start(pdata);
  622. if (ret)
  623. goto err_start;
  624. DBGPR("<--xgbe_open\n");
  625. return 0;
  626. err_start:
  627. hw_if->exit(pdata);
  628. devm_free_irq(pdata->dev, pdata->irq_number, pdata);
  629. pdata->irq_number = 0;
  630. err_irq:
  631. desc_if->free_ring_resources(pdata);
  632. err_clk:
  633. clk_disable_unprepare(pdata->sysclock);
  634. return ret;
  635. }
  636. static int xgbe_close(struct net_device *netdev)
  637. {
  638. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  639. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  640. struct xgbe_desc_if *desc_if = &pdata->desc_if;
  641. DBGPR("-->xgbe_close\n");
  642. /* Stop the device */
  643. xgbe_stop(pdata);
  644. /* Issue software reset to device */
  645. hw_if->exit(pdata);
  646. /* Free all the ring data */
  647. desc_if->free_ring_resources(pdata);
  648. /* Release the interrupt */
  649. if (pdata->irq_number != 0) {
  650. devm_free_irq(pdata->dev, pdata->irq_number, pdata);
  651. pdata->irq_number = 0;
  652. }
  653. /* Disable the clock */
  654. clk_disable_unprepare(pdata->sysclock);
  655. DBGPR("<--xgbe_close\n");
  656. return 0;
  657. }
  658. static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
  659. {
  660. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  661. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  662. struct xgbe_desc_if *desc_if = &pdata->desc_if;
  663. struct xgbe_channel *channel;
  664. struct xgbe_ring *ring;
  665. struct xgbe_packet_data *packet;
  666. unsigned long flags;
  667. int ret;
  668. DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
  669. channel = pdata->channel + skb->queue_mapping;
  670. ring = channel->tx_ring;
  671. packet = &ring->packet_data;
  672. ret = NETDEV_TX_OK;
  673. spin_lock_irqsave(&ring->lock, flags);
  674. if (skb->len == 0) {
  675. netdev_err(netdev, "empty skb received from stack\n");
  676. dev_kfree_skb_any(skb);
  677. goto tx_netdev_return;
  678. }
  679. /* Calculate preliminary packet info */
  680. memset(packet, 0, sizeof(*packet));
  681. xgbe_packet_info(ring, skb, packet);
  682. /* Check that there are enough descriptors available */
  683. if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) {
  684. DBGPR(" Tx queue stopped, not enough descriptors available\n");
  685. netif_stop_subqueue(netdev, channel->queue_index);
  686. ring->tx.queue_stopped = 1;
  687. ret = NETDEV_TX_BUSY;
  688. goto tx_netdev_return;
  689. }
  690. ret = xgbe_prep_tso(skb, packet);
  691. if (ret) {
  692. netdev_err(netdev, "error processing TSO packet\n");
  693. dev_kfree_skb_any(skb);
  694. goto tx_netdev_return;
  695. }
  696. xgbe_prep_vlan(skb, packet);
  697. if (!desc_if->map_tx_skb(channel, skb)) {
  698. dev_kfree_skb_any(skb);
  699. goto tx_netdev_return;
  700. }
  701. /* Configure required descriptor fields for transmission */
  702. hw_if->pre_xmit(channel);
  703. #ifdef XGMAC_ENABLE_TX_PKT_DUMP
  704. xgbe_print_pkt(netdev, skb, true);
  705. #endif
  706. tx_netdev_return:
  707. spin_unlock_irqrestore(&ring->lock, flags);
  708. DBGPR("<--xgbe_xmit\n");
  709. return ret;
  710. }
  711. static void xgbe_set_rx_mode(struct net_device *netdev)
  712. {
  713. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  714. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  715. unsigned int pr_mode, am_mode;
  716. DBGPR("-->xgbe_set_rx_mode\n");
  717. pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
  718. am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
  719. if (netdev_uc_count(netdev) > pdata->hw_feat.addn_mac)
  720. pr_mode = 1;
  721. if (netdev_mc_count(netdev) > pdata->hw_feat.addn_mac)
  722. am_mode = 1;
  723. if ((netdev_uc_count(netdev) + netdev_mc_count(netdev)) >
  724. pdata->hw_feat.addn_mac)
  725. pr_mode = 1;
  726. hw_if->set_promiscuous_mode(pdata, pr_mode);
  727. hw_if->set_all_multicast_mode(pdata, am_mode);
  728. if (!pr_mode)
  729. hw_if->set_addn_mac_addrs(pdata, am_mode);
  730. DBGPR("<--xgbe_set_rx_mode\n");
  731. }
  732. static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
  733. {
  734. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  735. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  736. struct sockaddr *saddr = addr;
  737. DBGPR("-->xgbe_set_mac_address\n");
  738. if (!is_valid_ether_addr(saddr->sa_data))
  739. return -EADDRNOTAVAIL;
  740. memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
  741. hw_if->set_mac_address(pdata, netdev->dev_addr);
  742. DBGPR("<--xgbe_set_mac_address\n");
  743. return 0;
  744. }
  745. static int xgbe_change_mtu(struct net_device *netdev, int mtu)
  746. {
  747. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  748. int ret;
  749. DBGPR("-->xgbe_change_mtu\n");
  750. ret = xgbe_calc_rx_buf_size(netdev, mtu);
  751. if (ret < 0)
  752. return ret;
  753. pdata->rx_buf_size = ret;
  754. netdev->mtu = mtu;
  755. xgbe_restart_dev(pdata, 0);
  756. DBGPR("<--xgbe_change_mtu\n");
  757. return 0;
  758. }
  759. static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
  760. struct rtnl_link_stats64 *s)
  761. {
  762. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  763. struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
  764. DBGPR("-->%s\n", __func__);
  765. pdata->hw_if.read_mmc_stats(pdata);
  766. s->rx_packets = pstats->rxframecount_gb;
  767. s->rx_bytes = pstats->rxoctetcount_gb;
  768. s->rx_errors = pstats->rxframecount_gb -
  769. pstats->rxbroadcastframes_g -
  770. pstats->rxmulticastframes_g -
  771. pstats->rxunicastframes_g;
  772. s->multicast = pstats->rxmulticastframes_g;
  773. s->rx_length_errors = pstats->rxlengtherror;
  774. s->rx_crc_errors = pstats->rxcrcerror;
  775. s->rx_fifo_errors = pstats->rxfifooverflow;
  776. s->tx_packets = pstats->txframecount_gb;
  777. s->tx_bytes = pstats->txoctetcount_gb;
  778. s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
  779. s->tx_dropped = netdev->stats.tx_dropped;
  780. DBGPR("<--%s\n", __func__);
  781. return s;
  782. }
  783. #ifdef CONFIG_NET_POLL_CONTROLLER
  784. static void xgbe_poll_controller(struct net_device *netdev)
  785. {
  786. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  787. DBGPR("-->xgbe_poll_controller\n");
  788. disable_irq(pdata->irq_number);
  789. xgbe_isr(pdata->irq_number, pdata);
  790. enable_irq(pdata->irq_number);
  791. DBGPR("<--xgbe_poll_controller\n");
  792. }
  793. #endif /* End CONFIG_NET_POLL_CONTROLLER */
  794. static int xgbe_set_features(struct net_device *netdev,
  795. netdev_features_t features)
  796. {
  797. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  798. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  799. unsigned int rxcsum_enabled, rxvlan_enabled;
  800. rxcsum_enabled = !!(pdata->netdev_features & NETIF_F_RXCSUM);
  801. rxvlan_enabled = !!(pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX);
  802. if ((features & NETIF_F_RXCSUM) && !rxcsum_enabled) {
  803. hw_if->enable_rx_csum(pdata);
  804. netdev_alert(netdev, "state change - rxcsum enabled\n");
  805. } else if (!(features & NETIF_F_RXCSUM) && rxcsum_enabled) {
  806. hw_if->disable_rx_csum(pdata);
  807. netdev_alert(netdev, "state change - rxcsum disabled\n");
  808. }
  809. if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan_enabled) {
  810. hw_if->enable_rx_vlan_stripping(pdata);
  811. netdev_alert(netdev, "state change - rxvlan enabled\n");
  812. } else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan_enabled) {
  813. hw_if->disable_rx_vlan_stripping(pdata);
  814. netdev_alert(netdev, "state change - rxvlan disabled\n");
  815. }
  816. pdata->netdev_features = features;
  817. DBGPR("<--xgbe_set_features\n");
  818. return 0;
  819. }
  820. static const struct net_device_ops xgbe_netdev_ops = {
  821. .ndo_open = xgbe_open,
  822. .ndo_stop = xgbe_close,
  823. .ndo_start_xmit = xgbe_xmit,
  824. .ndo_set_rx_mode = xgbe_set_rx_mode,
  825. .ndo_set_mac_address = xgbe_set_mac_address,
  826. .ndo_validate_addr = eth_validate_addr,
  827. .ndo_change_mtu = xgbe_change_mtu,
  828. .ndo_get_stats64 = xgbe_get_stats64,
  829. #ifdef CONFIG_NET_POLL_CONTROLLER
  830. .ndo_poll_controller = xgbe_poll_controller,
  831. #endif
  832. .ndo_set_features = xgbe_set_features,
  833. };
  834. struct net_device_ops *xgbe_get_netdev_ops(void)
  835. {
  836. return (struct net_device_ops *)&xgbe_netdev_ops;
  837. }
  838. static int xgbe_tx_poll(struct xgbe_channel *channel)
  839. {
  840. struct xgbe_prv_data *pdata = channel->pdata;
  841. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  842. struct xgbe_desc_if *desc_if = &pdata->desc_if;
  843. struct xgbe_ring *ring = channel->tx_ring;
  844. struct xgbe_ring_data *rdata;
  845. struct xgbe_ring_desc *rdesc;
  846. struct net_device *netdev = pdata->netdev;
  847. unsigned long flags;
  848. int processed = 0;
  849. DBGPR("-->xgbe_tx_poll\n");
  850. /* Nothing to do if there isn't a Tx ring for this channel */
  851. if (!ring)
  852. return 0;
  853. spin_lock_irqsave(&ring->lock, flags);
  854. while ((processed < TX_DESC_MAX_PROC) && (ring->dirty < ring->cur)) {
  855. rdata = GET_DESC_DATA(ring, ring->dirty);
  856. rdesc = rdata->rdesc;
  857. if (!hw_if->tx_complete(rdesc))
  858. break;
  859. #ifdef XGMAC_ENABLE_TX_DESC_DUMP
  860. xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
  861. #endif
  862. /* Free the SKB and reset the descriptor for re-use */
  863. desc_if->unmap_skb(pdata, rdata);
  864. hw_if->tx_desc_reset(rdata);
  865. processed++;
  866. ring->dirty++;
  867. }
  868. if ((ring->tx.queue_stopped == 1) &&
  869. (xgbe_tx_avail_desc(ring) > TX_DESC_MIN_FREE)) {
  870. ring->tx.queue_stopped = 0;
  871. netif_wake_subqueue(netdev, channel->queue_index);
  872. }
  873. DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
  874. spin_unlock_irqrestore(&ring->lock, flags);
  875. return processed;
  876. }
  877. static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
  878. {
  879. struct xgbe_prv_data *pdata = channel->pdata;
  880. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  881. struct xgbe_desc_if *desc_if = &pdata->desc_if;
  882. struct xgbe_ring *ring = channel->rx_ring;
  883. struct xgbe_ring_data *rdata;
  884. struct xgbe_packet_data *packet;
  885. struct net_device *netdev = pdata->netdev;
  886. struct sk_buff *skb;
  887. unsigned int incomplete, error;
  888. unsigned int cur_len, put_len, max_len;
  889. int received = 0;
  890. DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
  891. /* Nothing to do if there isn't a Rx ring for this channel */
  892. if (!ring)
  893. return 0;
  894. packet = &ring->packet_data;
  895. while (received < budget) {
  896. DBGPR(" cur = %d\n", ring->cur);
  897. /* Clear the packet data information */
  898. memset(packet, 0, sizeof(*packet));
  899. skb = NULL;
  900. error = 0;
  901. cur_len = 0;
  902. read_again:
  903. rdata = GET_DESC_DATA(ring, ring->cur);
  904. if (hw_if->dev_read(channel))
  905. break;
  906. received++;
  907. ring->cur++;
  908. ring->dirty++;
  909. dma_unmap_single(pdata->dev, rdata->skb_dma,
  910. rdata->skb_dma_len, DMA_FROM_DEVICE);
  911. rdata->skb_dma = 0;
  912. incomplete = XGMAC_GET_BITS(packet->attributes,
  913. RX_PACKET_ATTRIBUTES,
  914. INCOMPLETE);
  915. /* Earlier error, just drain the remaining data */
  916. if (incomplete && error)
  917. goto read_again;
  918. if (error || packet->errors) {
  919. if (packet->errors)
  920. DBGPR("Error in received packet\n");
  921. dev_kfree_skb(skb);
  922. continue;
  923. }
  924. put_len = rdata->len - cur_len;
  925. if (skb) {
  926. if (pskb_expand_head(skb, 0, put_len, GFP_ATOMIC)) {
  927. DBGPR("pskb_expand_head error\n");
  928. if (incomplete) {
  929. error = 1;
  930. goto read_again;
  931. }
  932. dev_kfree_skb(skb);
  933. continue;
  934. }
  935. memcpy(skb_tail_pointer(skb), rdata->skb->data,
  936. put_len);
  937. } else {
  938. skb = rdata->skb;
  939. rdata->skb = NULL;
  940. }
  941. skb_put(skb, put_len);
  942. cur_len += put_len;
  943. if (incomplete)
  944. goto read_again;
  945. /* Be sure we don't exceed the configured MTU */
  946. max_len = netdev->mtu + ETH_HLEN;
  947. if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
  948. (skb->protocol == htons(ETH_P_8021Q)))
  949. max_len += VLAN_HLEN;
  950. if (skb->len > max_len) {
  951. DBGPR("packet length exceeds configured MTU\n");
  952. dev_kfree_skb(skb);
  953. continue;
  954. }
  955. #ifdef XGMAC_ENABLE_RX_PKT_DUMP
  956. xgbe_print_pkt(netdev, skb, false);
  957. #endif
  958. skb_checksum_none_assert(skb);
  959. if (XGMAC_GET_BITS(packet->attributes,
  960. RX_PACKET_ATTRIBUTES, CSUM_DONE))
  961. skb->ip_summed = CHECKSUM_UNNECESSARY;
  962. if (XGMAC_GET_BITS(packet->attributes,
  963. RX_PACKET_ATTRIBUTES, VLAN_CTAG))
  964. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  965. packet->vlan_ctag);
  966. skb->dev = netdev;
  967. skb->protocol = eth_type_trans(skb, netdev);
  968. skb_record_rx_queue(skb, channel->queue_index);
  969. skb_mark_napi_id(skb, &pdata->napi);
  970. netdev->last_rx = jiffies;
  971. napi_gro_receive(&pdata->napi, skb);
  972. }
  973. if (received) {
  974. desc_if->realloc_skb(channel);
  975. /* Update the Rx Tail Pointer Register with address of
  976. * the last cleaned entry */
  977. rdata = GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
  978. XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
  979. lower_32_bits(rdata->rdesc_dma));
  980. }
  981. DBGPR("<--xgbe_rx_poll: received = %d\n", received);
  982. return received;
  983. }
  984. static int xgbe_poll(struct napi_struct *napi, int budget)
  985. {
  986. struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
  987. napi);
  988. struct xgbe_channel *channel;
  989. int processed;
  990. unsigned int i;
  991. DBGPR("-->xgbe_poll: budget=%d\n", budget);
  992. /* Cleanup Tx ring first */
  993. channel = pdata->channel;
  994. for (i = 0; i < pdata->channel_count; i++, channel++)
  995. xgbe_tx_poll(channel);
  996. /* Process Rx ring next */
  997. processed = 0;
  998. channel = pdata->channel;
  999. for (i = 0; i < pdata->channel_count; i++, channel++)
  1000. processed += xgbe_rx_poll(channel, budget - processed);
  1001. /* If we processed everything, we are done */
  1002. if (processed < budget) {
  1003. /* Turn off polling */
  1004. napi_complete(napi);
  1005. /* Enable Tx and Rx interrupts */
  1006. xgbe_enable_rx_tx_ints(pdata);
  1007. }
  1008. DBGPR("<--xgbe_poll: received = %d\n", processed);
  1009. return processed;
  1010. }
  1011. void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
  1012. unsigned int count, unsigned int flag)
  1013. {
  1014. struct xgbe_ring_data *rdata;
  1015. struct xgbe_ring_desc *rdesc;
  1016. while (count--) {
  1017. rdata = GET_DESC_DATA(ring, idx);
  1018. rdesc = rdata->rdesc;
  1019. DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
  1020. (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
  1021. le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
  1022. le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
  1023. idx++;
  1024. }
  1025. }
  1026. void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
  1027. unsigned int idx)
  1028. {
  1029. DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
  1030. le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
  1031. le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
  1032. }
  1033. void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
  1034. {
  1035. struct ethhdr *eth = (struct ethhdr *)skb->data;
  1036. unsigned char *buf = skb->data;
  1037. unsigned char buffer[128];
  1038. unsigned int i, j;
  1039. netdev_alert(netdev, "\n************** SKB dump ****************\n");
  1040. netdev_alert(netdev, "%s packet of %d bytes\n",
  1041. (tx_rx ? "TX" : "RX"), skb->len);
  1042. netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
  1043. netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
  1044. netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
  1045. for (i = 0, j = 0; i < skb->len;) {
  1046. j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
  1047. buf[i++]);
  1048. if ((i % 32) == 0) {
  1049. netdev_alert(netdev, " 0x%04x: %s\n", i - 32, buffer);
  1050. j = 0;
  1051. } else if ((i % 16) == 0) {
  1052. buffer[j++] = ' ';
  1053. buffer[j++] = ' ';
  1054. } else if ((i % 4) == 0) {
  1055. buffer[j++] = ' ';
  1056. }
  1057. }
  1058. if (i % 32)
  1059. netdev_alert(netdev, " 0x%04x: %s\n", i - (i % 32), buffer);
  1060. netdev_alert(netdev, "\n************** SKB dump ****************\n");
  1061. }