xgbe-drv.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394
  1. /*
  2. * AMD 10Gb Ethernet driver
  3. *
  4. * This file is available to you under your choice of the following two
  5. * licenses:
  6. *
  7. * License 1: GPLv2
  8. *
  9. * Copyright (c) 2014 Advanced Micro Devices, Inc.
  10. *
  11. * This file is free software; you may copy, redistribute and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation, either version 2 of the License, or (at
  14. * your option) any later version.
  15. *
  16. * This file is distributed in the hope that it will be useful, but
  17. * WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  23. *
  24. * This file incorporates work covered by the following copyright and
  25. * permission notice:
  26. * The Synopsys DWC ETHER XGMAC Software Driver and documentation
  27. * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
  28. * Inc. unless otherwise expressly agreed to in writing between Synopsys
  29. * and you.
  30. *
  31. * The Software IS NOT an item of Licensed Software or Licensed Product
  32. * under any End User Software License Agreement or Agreement for Licensed
  33. * Product with Synopsys or any supplement thereto. Permission is hereby
  34. * granted, free of charge, to any person obtaining a copy of this software
  35. * annotated with this license and the Software, to deal in the Software
  36. * without restriction, including without limitation the rights to use,
  37. * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
  38. * of the Software, and to permit persons to whom the Software is furnished
  39. * to do so, subject to the following conditions:
  40. *
  41. * The above copyright notice and this permission notice shall be included
  42. * in all copies or substantial portions of the Software.
  43. *
  44. * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
  45. * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  46. * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  47. * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
  48. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  49. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  50. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  51. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  52. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  53. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  54. * THE POSSIBILITY OF SUCH DAMAGE.
  55. *
  56. *
  57. * License 2: Modified BSD
  58. *
  59. * Copyright (c) 2014 Advanced Micro Devices, Inc.
  60. * All rights reserved.
  61. *
  62. * Redistribution and use in source and binary forms, with or without
  63. * modification, are permitted provided that the following conditions are met:
  64. * * Redistributions of source code must retain the above copyright
  65. * notice, this list of conditions and the following disclaimer.
  66. * * Redistributions in binary form must reproduce the above copyright
  67. * notice, this list of conditions and the following disclaimer in the
  68. * documentation and/or other materials provided with the distribution.
  69. * * Neither the name of Advanced Micro Devices, Inc. nor the
  70. * names of its contributors may be used to endorse or promote products
  71. * derived from this software without specific prior written permission.
  72. *
  73. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  74. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  75. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  76. * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
  77. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  78. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  79. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  80. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  81. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  82. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  83. *
  84. * This file incorporates work covered by the following copyright and
  85. * permission notice:
  86. * The Synopsys DWC ETHER XGMAC Software Driver and documentation
  87. * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
  88. * Inc. unless otherwise expressly agreed to in writing between Synopsys
  89. * and you.
  90. *
  91. * The Software IS NOT an item of Licensed Software or Licensed Product
  92. * under any End User Software License Agreement or Agreement for Licensed
  93. * Product with Synopsys or any supplement thereto. Permission is hereby
  94. * granted, free of charge, to any person obtaining a copy of this software
  95. * annotated with this license and the Software, to deal in the Software
  96. * without restriction, including without limitation the rights to use,
  97. * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
  98. * of the Software, and to permit persons to whom the Software is furnished
  99. * to do so, subject to the following conditions:
  100. *
  101. * The above copyright notice and this permission notice shall be included
  102. * in all copies or substantial portions of the Software.
  103. *
  104. * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
  105. * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  106. * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  107. * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
  108. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  109. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  110. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  111. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  112. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  113. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  114. * THE POSSIBILITY OF SUCH DAMAGE.
  115. */
  116. #include <linux/spinlock.h>
  117. #include <linux/tcp.h>
  118. #include <linux/if_vlan.h>
  119. #include <linux/phy.h>
  120. #include <net/busy_poll.h>
  121. #include <linux/clk.h>
  122. #include <linux/if_ether.h>
  123. #include "xgbe.h"
  124. #include "xgbe-common.h"
  125. static int xgbe_poll(struct napi_struct *, int);
  126. static void xgbe_set_rx_mode(struct net_device *);
  127. static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
  128. {
  129. return (ring->rdesc_count - (ring->cur - ring->dirty));
  130. }
  131. static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
  132. {
  133. unsigned int rx_buf_size;
  134. if (mtu > XGMAC_JUMBO_PACKET_MTU) {
  135. netdev_alert(netdev, "MTU exceeds maximum supported value\n");
  136. return -EINVAL;
  137. }
  138. rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
  139. if (rx_buf_size < XGBE_RX_MIN_BUF_SIZE)
  140. rx_buf_size = XGBE_RX_MIN_BUF_SIZE;
  141. rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
  142. ~(XGBE_RX_BUF_ALIGN - 1);
  143. return rx_buf_size;
  144. }
  145. static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
  146. {
  147. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  148. struct xgbe_channel *channel;
  149. unsigned int i;
  150. channel = pdata->channel;
  151. for (i = 0; i < pdata->channel_count; i++, channel++) {
  152. if (channel->tx_ring)
  153. hw_if->enable_int(channel,
  154. XGMAC_INT_DMA_CH_SR_TI);
  155. if (channel->rx_ring)
  156. hw_if->enable_int(channel,
  157. XGMAC_INT_DMA_CH_SR_RI);
  158. }
  159. }
  160. static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
  161. {
  162. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  163. struct xgbe_channel *channel;
  164. unsigned int i;
  165. channel = pdata->channel;
  166. for (i = 0; i < pdata->channel_count; i++, channel++) {
  167. if (channel->tx_ring)
  168. hw_if->disable_int(channel,
  169. XGMAC_INT_DMA_CH_SR_TI);
  170. if (channel->rx_ring)
  171. hw_if->disable_int(channel,
  172. XGMAC_INT_DMA_CH_SR_RI);
  173. }
  174. }
  175. static irqreturn_t xgbe_isr(int irq, void *data)
  176. {
  177. struct xgbe_prv_data *pdata = data;
  178. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  179. struct xgbe_channel *channel;
  180. unsigned int dma_isr, dma_ch_isr;
  181. unsigned int mac_isr;
  182. unsigned int i;
  183. /* The DMA interrupt status register also reports MAC and MTL
  184. * interrupts. So for polling mode, we just need to check for
  185. * this register to be non-zero
  186. */
  187. dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
  188. if (!dma_isr)
  189. goto isr_done;
  190. DBGPR("-->xgbe_isr\n");
  191. DBGPR(" DMA_ISR = %08x\n", dma_isr);
  192. DBGPR(" DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR0));
  193. DBGPR(" DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR1));
  194. for (i = 0; i < pdata->channel_count; i++) {
  195. if (!(dma_isr & (1 << i)))
  196. continue;
  197. channel = pdata->channel + i;
  198. dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
  199. DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
  200. if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
  201. XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
  202. if (napi_schedule_prep(&pdata->napi)) {
  203. /* Disable Tx and Rx interrupts */
  204. xgbe_disable_rx_tx_ints(pdata);
  205. /* Turn on polling */
  206. __napi_schedule(&pdata->napi);
  207. }
  208. }
  209. /* Restart the device on a Fatal Bus Error */
  210. if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
  211. schedule_work(&pdata->restart_work);
  212. /* Clear all interrupt signals */
  213. XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
  214. }
  215. if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
  216. mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
  217. if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
  218. hw_if->tx_mmc_int(pdata);
  219. if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
  220. hw_if->rx_mmc_int(pdata);
  221. }
  222. DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
  223. DBGPR("<--xgbe_isr\n");
  224. isr_done:
  225. return IRQ_HANDLED;
  226. }
  227. static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
  228. {
  229. struct xgbe_channel *channel = container_of(timer,
  230. struct xgbe_channel,
  231. tx_timer);
  232. struct xgbe_ring *ring = channel->tx_ring;
  233. struct xgbe_prv_data *pdata = channel->pdata;
  234. unsigned long flags;
  235. DBGPR("-->xgbe_tx_timer\n");
  236. spin_lock_irqsave(&ring->lock, flags);
  237. if (napi_schedule_prep(&pdata->napi)) {
  238. /* Disable Tx and Rx interrupts */
  239. xgbe_disable_rx_tx_ints(pdata);
  240. /* Turn on polling */
  241. __napi_schedule(&pdata->napi);
  242. }
  243. channel->tx_timer_active = 0;
  244. spin_unlock_irqrestore(&ring->lock, flags);
  245. DBGPR("<--xgbe_tx_timer\n");
  246. return HRTIMER_NORESTART;
  247. }
  248. static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
  249. {
  250. struct xgbe_channel *channel;
  251. unsigned int i;
  252. DBGPR("-->xgbe_init_tx_timers\n");
  253. channel = pdata->channel;
  254. for (i = 0; i < pdata->channel_count; i++, channel++) {
  255. if (!channel->tx_ring)
  256. break;
  257. DBGPR(" %s adding tx timer\n", channel->name);
  258. hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC,
  259. HRTIMER_MODE_REL);
  260. channel->tx_timer.function = xgbe_tx_timer;
  261. }
  262. DBGPR("<--xgbe_init_tx_timers\n");
  263. }
  264. static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
  265. {
  266. struct xgbe_channel *channel;
  267. unsigned int i;
  268. DBGPR("-->xgbe_stop_tx_timers\n");
  269. channel = pdata->channel;
  270. for (i = 0; i < pdata->channel_count; i++, channel++) {
  271. if (!channel->tx_ring)
  272. break;
  273. DBGPR(" %s deleting tx timer\n", channel->name);
  274. channel->tx_timer_active = 0;
  275. hrtimer_cancel(&channel->tx_timer);
  276. }
  277. DBGPR("<--xgbe_stop_tx_timers\n");
  278. }
  279. void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
  280. {
  281. unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
  282. struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
  283. DBGPR("-->xgbe_get_all_hw_features\n");
  284. mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
  285. mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
  286. mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
  287. memset(hw_feat, 0, sizeof(*hw_feat));
  288. /* Hardware feature register 0 */
  289. hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
  290. hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
  291. hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
  292. hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
  293. hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
  294. hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
  295. hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
  296. hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
  297. hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
  298. hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
  299. hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
  300. hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
  301. ADDMACADRSEL);
  302. hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
  303. hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
  304. /* Hardware feature register 1 */
  305. hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
  306. RXFIFOSIZE);
  307. hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
  308. TXFIFOSIZE);
  309. hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
  310. hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
  311. hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
  312. hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
  313. hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
  314. HASHTBLSZ);
  315. hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
  316. L3L4FNUM);
  317. /* Hardware feature register 2 */
  318. hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
  319. hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
  320. hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
  321. hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
  322. hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
  323. hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
  324. /* Translate the Hash Table size into actual number */
  325. switch (hw_feat->hash_table_size) {
  326. case 0:
  327. break;
  328. case 1:
  329. hw_feat->hash_table_size = 64;
  330. break;
  331. case 2:
  332. hw_feat->hash_table_size = 128;
  333. break;
  334. case 3:
  335. hw_feat->hash_table_size = 256;
  336. break;
  337. }
  338. /* The Queue and Channel counts are zero based so increment them
  339. * to get the actual number
  340. */
  341. hw_feat->rx_q_cnt++;
  342. hw_feat->tx_q_cnt++;
  343. hw_feat->rx_ch_cnt++;
  344. hw_feat->tx_ch_cnt++;
  345. DBGPR("<--xgbe_get_all_hw_features\n");
  346. }
  347. static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
  348. {
  349. if (add)
  350. netif_napi_add(pdata->netdev, &pdata->napi, xgbe_poll,
  351. NAPI_POLL_WEIGHT);
  352. napi_enable(&pdata->napi);
  353. }
  354. static void xgbe_napi_disable(struct xgbe_prv_data *pdata)
  355. {
  356. napi_disable(&pdata->napi);
  357. }
  358. void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
  359. {
  360. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  361. DBGPR("-->xgbe_init_tx_coalesce\n");
  362. pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
  363. pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
  364. hw_if->config_tx_coalesce(pdata);
  365. DBGPR("<--xgbe_init_tx_coalesce\n");
  366. }
  367. void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
  368. {
  369. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  370. DBGPR("-->xgbe_init_rx_coalesce\n");
  371. pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
  372. pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
  373. hw_if->config_rx_coalesce(pdata);
  374. DBGPR("<--xgbe_init_rx_coalesce\n");
  375. }
  376. static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
  377. {
  378. struct xgbe_desc_if *desc_if = &pdata->desc_if;
  379. struct xgbe_channel *channel;
  380. struct xgbe_ring *ring;
  381. struct xgbe_ring_data *rdata;
  382. unsigned int i, j;
  383. DBGPR("-->xgbe_free_tx_skbuff\n");
  384. channel = pdata->channel;
  385. for (i = 0; i < pdata->channel_count; i++, channel++) {
  386. ring = channel->tx_ring;
  387. if (!ring)
  388. break;
  389. for (j = 0; j < ring->rdesc_count; j++) {
  390. rdata = XGBE_GET_DESC_DATA(ring, j);
  391. desc_if->unmap_skb(pdata, rdata);
  392. }
  393. }
  394. DBGPR("<--xgbe_free_tx_skbuff\n");
  395. }
  396. static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
  397. {
  398. struct xgbe_desc_if *desc_if = &pdata->desc_if;
  399. struct xgbe_channel *channel;
  400. struct xgbe_ring *ring;
  401. struct xgbe_ring_data *rdata;
  402. unsigned int i, j;
  403. DBGPR("-->xgbe_free_rx_skbuff\n");
  404. channel = pdata->channel;
  405. for (i = 0; i < pdata->channel_count; i++, channel++) {
  406. ring = channel->rx_ring;
  407. if (!ring)
  408. break;
  409. for (j = 0; j < ring->rdesc_count; j++) {
  410. rdata = XGBE_GET_DESC_DATA(ring, j);
  411. desc_if->unmap_skb(pdata, rdata);
  412. }
  413. }
  414. DBGPR("<--xgbe_free_rx_skbuff\n");
  415. }
  416. int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
  417. {
  418. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  419. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  420. unsigned long flags;
  421. DBGPR("-->xgbe_powerdown\n");
  422. if (!netif_running(netdev) ||
  423. (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
  424. netdev_alert(netdev, "Device is already powered down\n");
  425. DBGPR("<--xgbe_powerdown\n");
  426. return -EINVAL;
  427. }
  428. phy_stop(pdata->phydev);
  429. spin_lock_irqsave(&pdata->lock, flags);
  430. if (caller == XGMAC_DRIVER_CONTEXT)
  431. netif_device_detach(netdev);
  432. netif_tx_stop_all_queues(netdev);
  433. xgbe_napi_disable(pdata);
  434. /* Powerdown Tx/Rx */
  435. hw_if->powerdown_tx(pdata);
  436. hw_if->powerdown_rx(pdata);
  437. pdata->power_down = 1;
  438. spin_unlock_irqrestore(&pdata->lock, flags);
  439. DBGPR("<--xgbe_powerdown\n");
  440. return 0;
  441. }
  442. int xgbe_powerup(struct net_device *netdev, unsigned int caller)
  443. {
  444. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  445. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  446. unsigned long flags;
  447. DBGPR("-->xgbe_powerup\n");
  448. if (!netif_running(netdev) ||
  449. (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
  450. netdev_alert(netdev, "Device is already powered up\n");
  451. DBGPR("<--xgbe_powerup\n");
  452. return -EINVAL;
  453. }
  454. spin_lock_irqsave(&pdata->lock, flags);
  455. pdata->power_down = 0;
  456. phy_start(pdata->phydev);
  457. /* Enable Tx/Rx */
  458. hw_if->powerup_tx(pdata);
  459. hw_if->powerup_rx(pdata);
  460. if (caller == XGMAC_DRIVER_CONTEXT)
  461. netif_device_attach(netdev);
  462. xgbe_napi_enable(pdata, 0);
  463. netif_tx_start_all_queues(netdev);
  464. spin_unlock_irqrestore(&pdata->lock, flags);
  465. DBGPR("<--xgbe_powerup\n");
  466. return 0;
  467. }
  468. static int xgbe_start(struct xgbe_prv_data *pdata)
  469. {
  470. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  471. struct net_device *netdev = pdata->netdev;
  472. DBGPR("-->xgbe_start\n");
  473. xgbe_set_rx_mode(netdev);
  474. hw_if->init(pdata);
  475. phy_start(pdata->phydev);
  476. hw_if->enable_tx(pdata);
  477. hw_if->enable_rx(pdata);
  478. xgbe_init_tx_timers(pdata);
  479. xgbe_napi_enable(pdata, 1);
  480. netif_tx_start_all_queues(netdev);
  481. DBGPR("<--xgbe_start\n");
  482. return 0;
  483. }
  484. static void xgbe_stop(struct xgbe_prv_data *pdata)
  485. {
  486. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  487. struct net_device *netdev = pdata->netdev;
  488. DBGPR("-->xgbe_stop\n");
  489. phy_stop(pdata->phydev);
  490. netif_tx_stop_all_queues(netdev);
  491. xgbe_napi_disable(pdata);
  492. xgbe_stop_tx_timers(pdata);
  493. hw_if->disable_tx(pdata);
  494. hw_if->disable_rx(pdata);
  495. DBGPR("<--xgbe_stop\n");
  496. }
  497. static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
  498. {
  499. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  500. DBGPR("-->xgbe_restart_dev\n");
  501. /* If not running, "restart" will happen on open */
  502. if (!netif_running(pdata->netdev))
  503. return;
  504. xgbe_stop(pdata);
  505. synchronize_irq(pdata->irq_number);
  506. xgbe_free_tx_skbuff(pdata);
  507. xgbe_free_rx_skbuff(pdata);
  508. /* Issue software reset to device if requested */
  509. if (reset)
  510. hw_if->exit(pdata);
  511. xgbe_start(pdata);
  512. DBGPR("<--xgbe_restart_dev\n");
  513. }
  514. static void xgbe_restart(struct work_struct *work)
  515. {
  516. struct xgbe_prv_data *pdata = container_of(work,
  517. struct xgbe_prv_data,
  518. restart_work);
  519. rtnl_lock();
  520. xgbe_restart_dev(pdata, 1);
  521. rtnl_unlock();
  522. }
  523. static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
  524. {
  525. if (vlan_tx_tag_present(skb))
  526. packet->vlan_ctag = vlan_tx_tag_get(skb);
  527. }
  528. static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
  529. {
  530. int ret;
  531. if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  532. TSO_ENABLE))
  533. return 0;
  534. ret = skb_cow_head(skb, 0);
  535. if (ret)
  536. return ret;
  537. packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  538. packet->tcp_header_len = tcp_hdrlen(skb);
  539. packet->tcp_payload_len = skb->len - packet->header_len;
  540. packet->mss = skb_shinfo(skb)->gso_size;
  541. DBGPR(" packet->header_len=%u\n", packet->header_len);
  542. DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
  543. packet->tcp_header_len, packet->tcp_payload_len);
  544. DBGPR(" packet->mss=%u\n", packet->mss);
  545. return 0;
  546. }
  547. static int xgbe_is_tso(struct sk_buff *skb)
  548. {
  549. if (skb->ip_summed != CHECKSUM_PARTIAL)
  550. return 0;
  551. if (!skb_is_gso(skb))
  552. return 0;
  553. DBGPR(" TSO packet to be processed\n");
  554. return 1;
  555. }
  556. static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb,
  557. struct xgbe_packet_data *packet)
  558. {
  559. struct skb_frag_struct *frag;
  560. unsigned int context_desc;
  561. unsigned int len;
  562. unsigned int i;
  563. context_desc = 0;
  564. packet->rdesc_count = 0;
  565. if (xgbe_is_tso(skb)) {
  566. /* TSO requires an extra desriptor if mss is different */
  567. if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
  568. context_desc = 1;
  569. packet->rdesc_count++;
  570. }
  571. /* TSO requires an extra desriptor for TSO header */
  572. packet->rdesc_count++;
  573. XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  574. TSO_ENABLE, 1);
  575. XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  576. CSUM_ENABLE, 1);
  577. } else if (skb->ip_summed == CHECKSUM_PARTIAL)
  578. XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  579. CSUM_ENABLE, 1);
  580. if (vlan_tx_tag_present(skb)) {
  581. /* VLAN requires an extra descriptor if tag is different */
  582. if (vlan_tx_tag_get(skb) != ring->tx.cur_vlan_ctag)
  583. /* We can share with the TSO context descriptor */
  584. if (!context_desc) {
  585. context_desc = 1;
  586. packet->rdesc_count++;
  587. }
  588. XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  589. VLAN_CTAG, 1);
  590. }
  591. for (len = skb_headlen(skb); len;) {
  592. packet->rdesc_count++;
  593. len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
  594. }
  595. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  596. frag = &skb_shinfo(skb)->frags[i];
  597. for (len = skb_frag_size(frag); len; ) {
  598. packet->rdesc_count++;
  599. len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
  600. }
  601. }
  602. }
  603. static int xgbe_open(struct net_device *netdev)
  604. {
  605. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  606. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  607. struct xgbe_desc_if *desc_if = &pdata->desc_if;
  608. int ret;
  609. DBGPR("-->xgbe_open\n");
  610. /* Enable the clock */
  611. ret = clk_prepare_enable(pdata->sysclock);
  612. if (ret) {
  613. netdev_alert(netdev, "clk_prepare_enable failed\n");
  614. return ret;
  615. }
  616. /* Calculate the Rx buffer size before allocating rings */
  617. ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
  618. if (ret < 0)
  619. goto err_clk;
  620. pdata->rx_buf_size = ret;
  621. /* Allocate the ring descriptors and buffers */
  622. ret = desc_if->alloc_ring_resources(pdata);
  623. if (ret)
  624. goto err_clk;
  625. /* Initialize the device restart work struct */
  626. INIT_WORK(&pdata->restart_work, xgbe_restart);
  627. /* Request interrupts */
  628. ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0,
  629. netdev->name, pdata);
  630. if (ret) {
  631. netdev_alert(netdev, "error requesting irq %d\n",
  632. pdata->irq_number);
  633. goto err_irq;
  634. }
  635. pdata->irq_number = netdev->irq;
  636. ret = xgbe_start(pdata);
  637. if (ret)
  638. goto err_start;
  639. DBGPR("<--xgbe_open\n");
  640. return 0;
  641. err_start:
  642. hw_if->exit(pdata);
  643. devm_free_irq(pdata->dev, pdata->irq_number, pdata);
  644. pdata->irq_number = 0;
  645. err_irq:
  646. desc_if->free_ring_resources(pdata);
  647. err_clk:
  648. clk_disable_unprepare(pdata->sysclock);
  649. return ret;
  650. }
  651. static int xgbe_close(struct net_device *netdev)
  652. {
  653. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  654. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  655. struct xgbe_desc_if *desc_if = &pdata->desc_if;
  656. DBGPR("-->xgbe_close\n");
  657. /* Stop the device */
  658. xgbe_stop(pdata);
  659. /* Issue software reset to device */
  660. hw_if->exit(pdata);
  661. /* Free all the ring data */
  662. desc_if->free_ring_resources(pdata);
  663. /* Release the interrupt */
  664. if (pdata->irq_number != 0) {
  665. devm_free_irq(pdata->dev, pdata->irq_number, pdata);
  666. pdata->irq_number = 0;
  667. }
  668. /* Disable the clock */
  669. clk_disable_unprepare(pdata->sysclock);
  670. DBGPR("<--xgbe_close\n");
  671. return 0;
  672. }
  673. static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
  674. {
  675. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  676. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  677. struct xgbe_desc_if *desc_if = &pdata->desc_if;
  678. struct xgbe_channel *channel;
  679. struct xgbe_ring *ring;
  680. struct xgbe_packet_data *packet;
  681. unsigned long flags;
  682. int ret;
  683. DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
  684. channel = pdata->channel + skb->queue_mapping;
  685. ring = channel->tx_ring;
  686. packet = &ring->packet_data;
  687. ret = NETDEV_TX_OK;
  688. spin_lock_irqsave(&ring->lock, flags);
  689. if (skb->len == 0) {
  690. netdev_err(netdev, "empty skb received from stack\n");
  691. dev_kfree_skb_any(skb);
  692. goto tx_netdev_return;
  693. }
  694. /* Calculate preliminary packet info */
  695. memset(packet, 0, sizeof(*packet));
  696. xgbe_packet_info(ring, skb, packet);
  697. /* Check that there are enough descriptors available */
  698. if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) {
  699. DBGPR(" Tx queue stopped, not enough descriptors available\n");
  700. netif_stop_subqueue(netdev, channel->queue_index);
  701. ring->tx.queue_stopped = 1;
  702. ret = NETDEV_TX_BUSY;
  703. goto tx_netdev_return;
  704. }
  705. ret = xgbe_prep_tso(skb, packet);
  706. if (ret) {
  707. netdev_err(netdev, "error processing TSO packet\n");
  708. dev_kfree_skb_any(skb);
  709. goto tx_netdev_return;
  710. }
  711. xgbe_prep_vlan(skb, packet);
  712. if (!desc_if->map_tx_skb(channel, skb)) {
  713. dev_kfree_skb_any(skb);
  714. goto tx_netdev_return;
  715. }
  716. /* Configure required descriptor fields for transmission */
  717. hw_if->pre_xmit(channel);
  718. #ifdef XGMAC_ENABLE_TX_PKT_DUMP
  719. xgbe_print_pkt(netdev, skb, true);
  720. #endif
  721. tx_netdev_return:
  722. spin_unlock_irqrestore(&ring->lock, flags);
  723. DBGPR("<--xgbe_xmit\n");
  724. return ret;
  725. }
  726. static void xgbe_set_rx_mode(struct net_device *netdev)
  727. {
  728. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  729. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  730. unsigned int pr_mode, am_mode;
  731. DBGPR("-->xgbe_set_rx_mode\n");
  732. pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
  733. am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
  734. hw_if->set_promiscuous_mode(pdata, pr_mode);
  735. hw_if->set_all_multicast_mode(pdata, am_mode);
  736. hw_if->add_mac_addresses(pdata);
  737. DBGPR("<--xgbe_set_rx_mode\n");
  738. }
  739. static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
  740. {
  741. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  742. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  743. struct sockaddr *saddr = addr;
  744. DBGPR("-->xgbe_set_mac_address\n");
  745. if (!is_valid_ether_addr(saddr->sa_data))
  746. return -EADDRNOTAVAIL;
  747. memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
  748. hw_if->set_mac_address(pdata, netdev->dev_addr);
  749. DBGPR("<--xgbe_set_mac_address\n");
  750. return 0;
  751. }
  752. static int xgbe_change_mtu(struct net_device *netdev, int mtu)
  753. {
  754. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  755. int ret;
  756. DBGPR("-->xgbe_change_mtu\n");
  757. ret = xgbe_calc_rx_buf_size(netdev, mtu);
  758. if (ret < 0)
  759. return ret;
  760. pdata->rx_buf_size = ret;
  761. netdev->mtu = mtu;
  762. xgbe_restart_dev(pdata, 0);
  763. DBGPR("<--xgbe_change_mtu\n");
  764. return 0;
  765. }
  766. static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
  767. struct rtnl_link_stats64 *s)
  768. {
  769. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  770. struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
  771. DBGPR("-->%s\n", __func__);
  772. pdata->hw_if.read_mmc_stats(pdata);
  773. s->rx_packets = pstats->rxframecount_gb;
  774. s->rx_bytes = pstats->rxoctetcount_gb;
  775. s->rx_errors = pstats->rxframecount_gb -
  776. pstats->rxbroadcastframes_g -
  777. pstats->rxmulticastframes_g -
  778. pstats->rxunicastframes_g;
  779. s->multicast = pstats->rxmulticastframes_g;
  780. s->rx_length_errors = pstats->rxlengtherror;
  781. s->rx_crc_errors = pstats->rxcrcerror;
  782. s->rx_fifo_errors = pstats->rxfifooverflow;
  783. s->tx_packets = pstats->txframecount_gb;
  784. s->tx_bytes = pstats->txoctetcount_gb;
  785. s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
  786. s->tx_dropped = netdev->stats.tx_dropped;
  787. DBGPR("<--%s\n", __func__);
  788. return s;
  789. }
  790. static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
  791. u16 vid)
  792. {
  793. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  794. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  795. DBGPR("-->%s\n", __func__);
  796. set_bit(vid, pdata->active_vlans);
  797. hw_if->update_vlan_hash_table(pdata);
  798. DBGPR("<--%s\n", __func__);
  799. return 0;
  800. }
  801. static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
  802. u16 vid)
  803. {
  804. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  805. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  806. DBGPR("-->%s\n", __func__);
  807. clear_bit(vid, pdata->active_vlans);
  808. hw_if->update_vlan_hash_table(pdata);
  809. DBGPR("<--%s\n", __func__);
  810. return 0;
  811. }
  812. #ifdef CONFIG_NET_POLL_CONTROLLER
  813. static void xgbe_poll_controller(struct net_device *netdev)
  814. {
  815. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  816. DBGPR("-->xgbe_poll_controller\n");
  817. disable_irq(pdata->irq_number);
  818. xgbe_isr(pdata->irq_number, pdata);
  819. enable_irq(pdata->irq_number);
  820. DBGPR("<--xgbe_poll_controller\n");
  821. }
  822. #endif /* End CONFIG_NET_POLL_CONTROLLER */
  823. static int xgbe_set_features(struct net_device *netdev,
  824. netdev_features_t features)
  825. {
  826. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  827. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  828. unsigned int rxcsum, rxvlan, rxvlan_filter;
  829. rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
  830. rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
  831. rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
  832. if ((features & NETIF_F_RXCSUM) && !rxcsum)
  833. hw_if->enable_rx_csum(pdata);
  834. else if (!(features & NETIF_F_RXCSUM) && rxcsum)
  835. hw_if->disable_rx_csum(pdata);
  836. if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
  837. hw_if->enable_rx_vlan_stripping(pdata);
  838. else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
  839. hw_if->disable_rx_vlan_stripping(pdata);
  840. if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
  841. hw_if->enable_rx_vlan_filtering(pdata);
  842. else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
  843. hw_if->disable_rx_vlan_filtering(pdata);
  844. pdata->netdev_features = features;
  845. DBGPR("<--xgbe_set_features\n");
  846. return 0;
  847. }
  848. static const struct net_device_ops xgbe_netdev_ops = {
  849. .ndo_open = xgbe_open,
  850. .ndo_stop = xgbe_close,
  851. .ndo_start_xmit = xgbe_xmit,
  852. .ndo_set_rx_mode = xgbe_set_rx_mode,
  853. .ndo_set_mac_address = xgbe_set_mac_address,
  854. .ndo_validate_addr = eth_validate_addr,
  855. .ndo_change_mtu = xgbe_change_mtu,
  856. .ndo_get_stats64 = xgbe_get_stats64,
  857. .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid,
  858. .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid,
  859. #ifdef CONFIG_NET_POLL_CONTROLLER
  860. .ndo_poll_controller = xgbe_poll_controller,
  861. #endif
  862. .ndo_set_features = xgbe_set_features,
  863. };
  864. struct net_device_ops *xgbe_get_netdev_ops(void)
  865. {
  866. return (struct net_device_ops *)&xgbe_netdev_ops;
  867. }
  868. static int xgbe_tx_poll(struct xgbe_channel *channel)
  869. {
  870. struct xgbe_prv_data *pdata = channel->pdata;
  871. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  872. struct xgbe_desc_if *desc_if = &pdata->desc_if;
  873. struct xgbe_ring *ring = channel->tx_ring;
  874. struct xgbe_ring_data *rdata;
  875. struct xgbe_ring_desc *rdesc;
  876. struct net_device *netdev = pdata->netdev;
  877. unsigned long flags;
  878. int processed = 0;
  879. DBGPR("-->xgbe_tx_poll\n");
  880. /* Nothing to do if there isn't a Tx ring for this channel */
  881. if (!ring)
  882. return 0;
  883. spin_lock_irqsave(&ring->lock, flags);
  884. while ((processed < XGBE_TX_DESC_MAX_PROC) &&
  885. (ring->dirty < ring->cur)) {
  886. rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
  887. rdesc = rdata->rdesc;
  888. if (!hw_if->tx_complete(rdesc))
  889. break;
  890. #ifdef XGMAC_ENABLE_TX_DESC_DUMP
  891. xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
  892. #endif
  893. /* Free the SKB and reset the descriptor for re-use */
  894. desc_if->unmap_skb(pdata, rdata);
  895. hw_if->tx_desc_reset(rdata);
  896. processed++;
  897. ring->dirty++;
  898. }
  899. if ((ring->tx.queue_stopped == 1) &&
  900. (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
  901. ring->tx.queue_stopped = 0;
  902. netif_wake_subqueue(netdev, channel->queue_index);
  903. }
  904. DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
  905. spin_unlock_irqrestore(&ring->lock, flags);
  906. return processed;
  907. }
  908. static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
  909. {
  910. struct xgbe_prv_data *pdata = channel->pdata;
  911. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  912. struct xgbe_desc_if *desc_if = &pdata->desc_if;
  913. struct xgbe_ring *ring = channel->rx_ring;
  914. struct xgbe_ring_data *rdata;
  915. struct xgbe_packet_data *packet;
  916. struct net_device *netdev = pdata->netdev;
  917. struct sk_buff *skb;
  918. unsigned int incomplete, error;
  919. unsigned int cur_len, put_len, max_len;
  920. int received = 0;
  921. DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
  922. /* Nothing to do if there isn't a Rx ring for this channel */
  923. if (!ring)
  924. return 0;
  925. packet = &ring->packet_data;
  926. while (received < budget) {
  927. DBGPR(" cur = %d\n", ring->cur);
  928. /* Clear the packet data information */
  929. memset(packet, 0, sizeof(*packet));
  930. skb = NULL;
  931. error = 0;
  932. cur_len = 0;
  933. read_again:
  934. rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
  935. if (hw_if->dev_read(channel))
  936. break;
  937. received++;
  938. ring->cur++;
  939. ring->dirty++;
  940. dma_unmap_single(pdata->dev, rdata->skb_dma,
  941. rdata->skb_dma_len, DMA_FROM_DEVICE);
  942. rdata->skb_dma = 0;
  943. incomplete = XGMAC_GET_BITS(packet->attributes,
  944. RX_PACKET_ATTRIBUTES,
  945. INCOMPLETE);
  946. /* Earlier error, just drain the remaining data */
  947. if (incomplete && error)
  948. goto read_again;
  949. if (error || packet->errors) {
  950. if (packet->errors)
  951. DBGPR("Error in received packet\n");
  952. dev_kfree_skb(skb);
  953. continue;
  954. }
  955. put_len = rdata->len - cur_len;
  956. if (skb) {
  957. if (pskb_expand_head(skb, 0, put_len, GFP_ATOMIC)) {
  958. DBGPR("pskb_expand_head error\n");
  959. if (incomplete) {
  960. error = 1;
  961. goto read_again;
  962. }
  963. dev_kfree_skb(skb);
  964. continue;
  965. }
  966. memcpy(skb_tail_pointer(skb), rdata->skb->data,
  967. put_len);
  968. } else {
  969. skb = rdata->skb;
  970. rdata->skb = NULL;
  971. }
  972. skb_put(skb, put_len);
  973. cur_len += put_len;
  974. if (incomplete)
  975. goto read_again;
  976. /* Be sure we don't exceed the configured MTU */
  977. max_len = netdev->mtu + ETH_HLEN;
  978. if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
  979. (skb->protocol == htons(ETH_P_8021Q)))
  980. max_len += VLAN_HLEN;
  981. if (skb->len > max_len) {
  982. DBGPR("packet length exceeds configured MTU\n");
  983. dev_kfree_skb(skb);
  984. continue;
  985. }
  986. #ifdef XGMAC_ENABLE_RX_PKT_DUMP
  987. xgbe_print_pkt(netdev, skb, false);
  988. #endif
  989. skb_checksum_none_assert(skb);
  990. if (XGMAC_GET_BITS(packet->attributes,
  991. RX_PACKET_ATTRIBUTES, CSUM_DONE))
  992. skb->ip_summed = CHECKSUM_UNNECESSARY;
  993. if (XGMAC_GET_BITS(packet->attributes,
  994. RX_PACKET_ATTRIBUTES, VLAN_CTAG))
  995. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  996. packet->vlan_ctag);
  997. skb->dev = netdev;
  998. skb->protocol = eth_type_trans(skb, netdev);
  999. skb_record_rx_queue(skb, channel->queue_index);
  1000. skb_mark_napi_id(skb, &pdata->napi);
  1001. netdev->last_rx = jiffies;
  1002. napi_gro_receive(&pdata->napi, skb);
  1003. }
  1004. if (received) {
  1005. desc_if->realloc_skb(channel);
  1006. /* Update the Rx Tail Pointer Register with address of
  1007. * the last cleaned entry */
  1008. rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
  1009. XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
  1010. lower_32_bits(rdata->rdesc_dma));
  1011. }
  1012. DBGPR("<--xgbe_rx_poll: received = %d\n", received);
  1013. return received;
  1014. }
  1015. static int xgbe_poll(struct napi_struct *napi, int budget)
  1016. {
  1017. struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
  1018. napi);
  1019. struct xgbe_channel *channel;
  1020. int processed;
  1021. unsigned int i;
  1022. DBGPR("-->xgbe_poll: budget=%d\n", budget);
  1023. /* Cleanup Tx ring first */
  1024. channel = pdata->channel;
  1025. for (i = 0; i < pdata->channel_count; i++, channel++)
  1026. xgbe_tx_poll(channel);
  1027. /* Process Rx ring next */
  1028. processed = 0;
  1029. channel = pdata->channel;
  1030. for (i = 0; i < pdata->channel_count; i++, channel++)
  1031. processed += xgbe_rx_poll(channel, budget - processed);
  1032. /* If we processed everything, we are done */
  1033. if (processed < budget) {
  1034. /* Turn off polling */
  1035. napi_complete(napi);
  1036. /* Enable Tx and Rx interrupts */
  1037. xgbe_enable_rx_tx_ints(pdata);
  1038. }
  1039. DBGPR("<--xgbe_poll: received = %d\n", processed);
  1040. return processed;
  1041. }
  1042. void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
  1043. unsigned int count, unsigned int flag)
  1044. {
  1045. struct xgbe_ring_data *rdata;
  1046. struct xgbe_ring_desc *rdesc;
  1047. while (count--) {
  1048. rdata = XGBE_GET_DESC_DATA(ring, idx);
  1049. rdesc = rdata->rdesc;
  1050. DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
  1051. (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
  1052. le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
  1053. le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
  1054. idx++;
  1055. }
  1056. }
  1057. void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
  1058. unsigned int idx)
  1059. {
  1060. DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
  1061. le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
  1062. le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
  1063. }
  1064. void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
  1065. {
  1066. struct ethhdr *eth = (struct ethhdr *)skb->data;
  1067. unsigned char *buf = skb->data;
  1068. unsigned char buffer[128];
  1069. unsigned int i, j;
  1070. netdev_alert(netdev, "\n************** SKB dump ****************\n");
  1071. netdev_alert(netdev, "%s packet of %d bytes\n",
  1072. (tx_rx ? "TX" : "RX"), skb->len);
  1073. netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
  1074. netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
  1075. netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
  1076. for (i = 0, j = 0; i < skb->len;) {
  1077. j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
  1078. buf[i++]);
  1079. if ((i % 32) == 0) {
  1080. netdev_alert(netdev, " 0x%04x: %s\n", i - 32, buffer);
  1081. j = 0;
  1082. } else if ((i % 16) == 0) {
  1083. buffer[j++] = ' ';
  1084. buffer[j++] = ' ';
  1085. } else if ((i % 4) == 0) {
  1086. buffer[j++] = ' ';
  1087. }
  1088. }
  1089. if (i % 32)
  1090. netdev_alert(netdev, " 0x%04x: %s\n", i - (i % 32), buffer);
  1091. netdev_alert(netdev, "\n************** SKB dump ****************\n");
  1092. }