xgbe-drv.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204
  1. /*
  2. * AMD 10Gb Ethernet driver
  3. *
  4. * This file is available to you under your choice of the following two
  5. * licenses:
  6. *
  7. * License 1: GPLv2
  8. *
  9. * Copyright (c) 2014 Advanced Micro Devices, Inc.
  10. *
  11. * This file is free software; you may copy, redistribute and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation, either version 2 of the License, or (at
  14. * your option) any later version.
  15. *
  16. * This file is distributed in the hope that it will be useful, but
  17. * WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  23. *
  24. * This file incorporates work covered by the following copyright and
  25. * permission notice:
  26. * The Synopsys DWC ETHER XGMAC Software Driver and documentation
  27. * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
  28. * Inc. unless otherwise expressly agreed to in writing between Synopsys
  29. * and you.
  30. *
  31. * The Software IS NOT an item of Licensed Software or Licensed Product
  32. * under any End User Software License Agreement or Agreement for Licensed
  33. * Product with Synopsys or any supplement thereto. Permission is hereby
  34. * granted, free of charge, to any person obtaining a copy of this software
  35. * annotated with this license and the Software, to deal in the Software
  36. * without restriction, including without limitation the rights to use,
  37. * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
  38. * of the Software, and to permit persons to whom the Software is furnished
  39. * to do so, subject to the following conditions:
  40. *
  41. * The above copyright notice and this permission notice shall be included
  42. * in all copies or substantial portions of the Software.
  43. *
  44. * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
  45. * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  46. * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  47. * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
  48. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  49. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  50. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  51. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  52. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  53. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  54. * THE POSSIBILITY OF SUCH DAMAGE.
  55. *
  56. *
  57. * License 2: Modified BSD
  58. *
  59. * Copyright (c) 2014 Advanced Micro Devices, Inc.
  60. * All rights reserved.
  61. *
  62. * Redistribution and use in source and binary forms, with or without
  63. * modification, are permitted provided that the following conditions are met:
  64. * * Redistributions of source code must retain the above copyright
  65. * notice, this list of conditions and the following disclaimer.
  66. * * Redistributions in binary form must reproduce the above copyright
  67. * notice, this list of conditions and the following disclaimer in the
  68. * documentation and/or other materials provided with the distribution.
  69. * * Neither the name of Advanced Micro Devices, Inc. nor the
  70. * names of its contributors may be used to endorse or promote products
  71. * derived from this software without specific prior written permission.
  72. *
  73. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  74. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  75. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  76. * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
  77. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  78. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  79. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  80. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  81. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  82. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  83. *
  84. * This file incorporates work covered by the following copyright and
  85. * permission notice:
  86. * The Synopsys DWC ETHER XGMAC Software Driver and documentation
  87. * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
  88. * Inc. unless otherwise expressly agreed to in writing between Synopsys
  89. * and you.
  90. *
  91. * The Software IS NOT an item of Licensed Software or Licensed Product
  92. * under any End User Software License Agreement or Agreement for Licensed
  93. * Product with Synopsys or any supplement thereto. Permission is hereby
  94. * granted, free of charge, to any person obtaining a copy of this software
  95. * annotated with this license and the Software, to deal in the Software
  96. * without restriction, including without limitation the rights to use,
  97. * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
  98. * of the Software, and to permit persons to whom the Software is furnished
  99. * to do so, subject to the following conditions:
  100. *
  101. * The above copyright notice and this permission notice shall be included
  102. * in all copies or substantial portions of the Software.
  103. *
  104. * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
  105. * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  106. * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  107. * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
  108. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  109. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  110. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  111. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  112. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  113. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  114. * THE POSSIBILITY OF SUCH DAMAGE.
  115. */
  116. #include <linux/platform_device.h>
  117. #include <linux/spinlock.h>
  118. #include <linux/tcp.h>
  119. #include <linux/if_vlan.h>
  120. #include <net/busy_poll.h>
  121. #include <linux/clk.h>
  122. #include <linux/if_ether.h>
  123. #include <linux/net_tstamp.h>
  124. #include <linux/phy.h>
  125. #include "xgbe.h"
  126. #include "xgbe-common.h"
  127. static int xgbe_one_poll(struct napi_struct *, int);
  128. static int xgbe_all_poll(struct napi_struct *, int);
  129. static void xgbe_set_rx_mode(struct net_device *);
  130. static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
  131. {
  132. struct xgbe_channel *channel_mem, *channel;
  133. struct xgbe_ring *tx_ring, *rx_ring;
  134. unsigned int count, i;
  135. int ret = -ENOMEM;
  136. count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
  137. channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL);
  138. if (!channel_mem)
  139. goto err_channel;
  140. tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring),
  141. GFP_KERNEL);
  142. if (!tx_ring)
  143. goto err_tx_ring;
  144. rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring),
  145. GFP_KERNEL);
  146. if (!rx_ring)
  147. goto err_rx_ring;
  148. for (i = 0, channel = channel_mem; i < count; i++, channel++) {
  149. snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
  150. channel->pdata = pdata;
  151. channel->queue_index = i;
  152. channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
  153. (DMA_CH_INC * i);
  154. if (pdata->per_channel_irq) {
  155. /* Get the DMA interrupt (offset 1) */
  156. ret = platform_get_irq(pdata->pdev, i + 1);
  157. if (ret < 0) {
  158. netdev_err(pdata->netdev,
  159. "platform_get_irq %u failed\n",
  160. i + 1);
  161. goto err_irq;
  162. }
  163. channel->dma_irq = ret;
  164. }
  165. if (i < pdata->tx_ring_count) {
  166. spin_lock_init(&tx_ring->lock);
  167. channel->tx_ring = tx_ring++;
  168. }
  169. if (i < pdata->rx_ring_count) {
  170. spin_lock_init(&rx_ring->lock);
  171. channel->rx_ring = rx_ring++;
  172. }
  173. DBGPR(" %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
  174. channel->name, channel->queue_index, channel->dma_regs,
  175. channel->dma_irq, channel->tx_ring, channel->rx_ring);
  176. }
  177. pdata->channel = channel_mem;
  178. pdata->channel_count = count;
  179. return 0;
  180. err_irq:
  181. kfree(rx_ring);
  182. err_rx_ring:
  183. kfree(tx_ring);
  184. err_tx_ring:
  185. kfree(channel_mem);
  186. err_channel:
  187. return ret;
  188. }
  189. static void xgbe_free_channels(struct xgbe_prv_data *pdata)
  190. {
  191. if (!pdata->channel)
  192. return;
  193. kfree(pdata->channel->rx_ring);
  194. kfree(pdata->channel->tx_ring);
  195. kfree(pdata->channel);
  196. pdata->channel = NULL;
  197. pdata->channel_count = 0;
  198. }
  199. static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
  200. {
  201. return (ring->rdesc_count - (ring->cur - ring->dirty));
  202. }
  203. static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
  204. struct xgbe_ring *ring, unsigned int count)
  205. {
  206. struct xgbe_prv_data *pdata = channel->pdata;
  207. if (count > xgbe_tx_avail_desc(ring)) {
  208. DBGPR(" Tx queue stopped, not enough descriptors available\n");
  209. netif_stop_subqueue(pdata->netdev, channel->queue_index);
  210. ring->tx.queue_stopped = 1;
  211. /* If we haven't notified the hardware because of xmit_more
  212. * support, tell it now
  213. */
  214. if (ring->tx.xmit_more)
  215. pdata->hw_if.tx_start_xmit(channel, ring);
  216. return NETDEV_TX_BUSY;
  217. }
  218. return 0;
  219. }
  220. static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
  221. {
  222. unsigned int rx_buf_size;
  223. if (mtu > XGMAC_JUMBO_PACKET_MTU) {
  224. netdev_alert(netdev, "MTU exceeds maximum supported value\n");
  225. return -EINVAL;
  226. }
  227. rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
  228. rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
  229. rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
  230. ~(XGBE_RX_BUF_ALIGN - 1);
  231. return rx_buf_size;
  232. }
  233. static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
  234. {
  235. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  236. struct xgbe_channel *channel;
  237. enum xgbe_int int_id;
  238. unsigned int i;
  239. channel = pdata->channel;
  240. for (i = 0; i < pdata->channel_count; i++, channel++) {
  241. if (channel->tx_ring && channel->rx_ring)
  242. int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
  243. else if (channel->tx_ring)
  244. int_id = XGMAC_INT_DMA_CH_SR_TI;
  245. else if (channel->rx_ring)
  246. int_id = XGMAC_INT_DMA_CH_SR_RI;
  247. else
  248. continue;
  249. hw_if->enable_int(channel, int_id);
  250. }
  251. }
  252. static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
  253. {
  254. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  255. struct xgbe_channel *channel;
  256. enum xgbe_int int_id;
  257. unsigned int i;
  258. channel = pdata->channel;
  259. for (i = 0; i < pdata->channel_count; i++, channel++) {
  260. if (channel->tx_ring && channel->rx_ring)
  261. int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
  262. else if (channel->tx_ring)
  263. int_id = XGMAC_INT_DMA_CH_SR_TI;
  264. else if (channel->rx_ring)
  265. int_id = XGMAC_INT_DMA_CH_SR_RI;
  266. else
  267. continue;
  268. hw_if->disable_int(channel, int_id);
  269. }
  270. }
  271. static irqreturn_t xgbe_isr(int irq, void *data)
  272. {
  273. struct xgbe_prv_data *pdata = data;
  274. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  275. struct xgbe_channel *channel;
  276. unsigned int dma_isr, dma_ch_isr;
  277. unsigned int mac_isr, mac_tssr;
  278. unsigned int i;
  279. /* The DMA interrupt status register also reports MAC and MTL
  280. * interrupts. So for polling mode, we just need to check for
  281. * this register to be non-zero
  282. */
  283. dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
  284. if (!dma_isr)
  285. goto isr_done;
  286. DBGPR(" DMA_ISR = %08x\n", dma_isr);
  287. for (i = 0; i < pdata->channel_count; i++) {
  288. if (!(dma_isr & (1 << i)))
  289. continue;
  290. channel = pdata->channel + i;
  291. dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
  292. DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
  293. /* If we get a TI or RI interrupt that means per channel DMA
  294. * interrupts are not enabled, so we use the private data napi
  295. * structure, not the per channel napi structure
  296. */
  297. if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
  298. XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
  299. if (napi_schedule_prep(&pdata->napi)) {
  300. /* Disable Tx and Rx interrupts */
  301. xgbe_disable_rx_tx_ints(pdata);
  302. /* Turn on polling */
  303. __napi_schedule(&pdata->napi);
  304. }
  305. }
  306. /* Restart the device on a Fatal Bus Error */
  307. if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
  308. schedule_work(&pdata->restart_work);
  309. /* Clear all interrupt signals */
  310. XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
  311. }
  312. if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
  313. mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
  314. if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
  315. hw_if->tx_mmc_int(pdata);
  316. if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
  317. hw_if->rx_mmc_int(pdata);
  318. if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
  319. mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
  320. if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
  321. /* Read Tx Timestamp to clear interrupt */
  322. pdata->tx_tstamp =
  323. hw_if->get_tx_tstamp(pdata);
  324. schedule_work(&pdata->tx_tstamp_work);
  325. }
  326. }
  327. }
  328. DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
  329. isr_done:
  330. return IRQ_HANDLED;
  331. }
  332. static irqreturn_t xgbe_dma_isr(int irq, void *data)
  333. {
  334. struct xgbe_channel *channel = data;
  335. /* Per channel DMA interrupts are enabled, so we use the per
  336. * channel napi structure and not the private data napi structure
  337. */
  338. if (napi_schedule_prep(&channel->napi)) {
  339. /* Disable Tx and Rx interrupts */
  340. disable_irq_nosync(channel->dma_irq);
  341. /* Turn on polling */
  342. __napi_schedule(&channel->napi);
  343. }
  344. return IRQ_HANDLED;
  345. }
  346. static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
  347. {
  348. struct xgbe_channel *channel = container_of(timer,
  349. struct xgbe_channel,
  350. tx_timer);
  351. struct xgbe_ring *ring = channel->tx_ring;
  352. struct xgbe_prv_data *pdata = channel->pdata;
  353. struct napi_struct *napi;
  354. unsigned long flags;
  355. DBGPR("-->xgbe_tx_timer\n");
  356. napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
  357. spin_lock_irqsave(&ring->lock, flags);
  358. if (napi_schedule_prep(napi)) {
  359. /* Disable Tx and Rx interrupts */
  360. if (pdata->per_channel_irq)
  361. disable_irq(channel->dma_irq);
  362. else
  363. xgbe_disable_rx_tx_ints(pdata);
  364. /* Turn on polling */
  365. __napi_schedule(napi);
  366. }
  367. channel->tx_timer_active = 0;
  368. spin_unlock_irqrestore(&ring->lock, flags);
  369. DBGPR("<--xgbe_tx_timer\n");
  370. return HRTIMER_NORESTART;
  371. }
  372. static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
  373. {
  374. struct xgbe_channel *channel;
  375. unsigned int i;
  376. DBGPR("-->xgbe_init_tx_timers\n");
  377. channel = pdata->channel;
  378. for (i = 0; i < pdata->channel_count; i++, channel++) {
  379. if (!channel->tx_ring)
  380. break;
  381. DBGPR(" %s adding tx timer\n", channel->name);
  382. hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC,
  383. HRTIMER_MODE_REL);
  384. channel->tx_timer.function = xgbe_tx_timer;
  385. }
  386. DBGPR("<--xgbe_init_tx_timers\n");
  387. }
  388. static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
  389. {
  390. struct xgbe_channel *channel;
  391. unsigned int i;
  392. DBGPR("-->xgbe_stop_tx_timers\n");
  393. channel = pdata->channel;
  394. for (i = 0; i < pdata->channel_count; i++, channel++) {
  395. if (!channel->tx_ring)
  396. break;
  397. DBGPR(" %s deleting tx timer\n", channel->name);
  398. channel->tx_timer_active = 0;
  399. hrtimer_cancel(&channel->tx_timer);
  400. }
  401. DBGPR("<--xgbe_stop_tx_timers\n");
  402. }
  403. void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
  404. {
  405. unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
  406. struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
  407. DBGPR("-->xgbe_get_all_hw_features\n");
  408. mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
  409. mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
  410. mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
  411. memset(hw_feat, 0, sizeof(*hw_feat));
  412. hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
  413. /* Hardware feature register 0 */
  414. hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
  415. hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
  416. hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
  417. hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
  418. hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
  419. hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
  420. hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
  421. hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
  422. hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
  423. hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
  424. hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
  425. hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
  426. ADDMACADRSEL);
  427. hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
  428. hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
  429. /* Hardware feature register 1 */
  430. hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
  431. RXFIFOSIZE);
  432. hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
  433. TXFIFOSIZE);
  434. hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
  435. hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
  436. hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
  437. hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
  438. hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
  439. hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
  440. HASHTBLSZ);
  441. hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
  442. L3L4FNUM);
  443. /* Hardware feature register 2 */
  444. hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
  445. hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
  446. hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
  447. hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
  448. hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
  449. hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
  450. /* Translate the Hash Table size into actual number */
  451. switch (hw_feat->hash_table_size) {
  452. case 0:
  453. break;
  454. case 1:
  455. hw_feat->hash_table_size = 64;
  456. break;
  457. case 2:
  458. hw_feat->hash_table_size = 128;
  459. break;
  460. case 3:
  461. hw_feat->hash_table_size = 256;
  462. break;
  463. }
  464. /* The Queue and Channel counts are zero based so increment them
  465. * to get the actual number
  466. */
  467. hw_feat->rx_q_cnt++;
  468. hw_feat->tx_q_cnt++;
  469. hw_feat->rx_ch_cnt++;
  470. hw_feat->tx_ch_cnt++;
  471. DBGPR("<--xgbe_get_all_hw_features\n");
  472. }
  473. static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
  474. {
  475. struct xgbe_channel *channel;
  476. unsigned int i;
  477. if (pdata->per_channel_irq) {
  478. channel = pdata->channel;
  479. for (i = 0; i < pdata->channel_count; i++, channel++) {
  480. if (add)
  481. netif_napi_add(pdata->netdev, &channel->napi,
  482. xgbe_one_poll, NAPI_POLL_WEIGHT);
  483. napi_enable(&channel->napi);
  484. }
  485. } else {
  486. if (add)
  487. netif_napi_add(pdata->netdev, &pdata->napi,
  488. xgbe_all_poll, NAPI_POLL_WEIGHT);
  489. napi_enable(&pdata->napi);
  490. }
  491. }
  492. static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
  493. {
  494. struct xgbe_channel *channel;
  495. unsigned int i;
  496. if (pdata->per_channel_irq) {
  497. channel = pdata->channel;
  498. for (i = 0; i < pdata->channel_count; i++, channel++) {
  499. napi_disable(&channel->napi);
  500. if (del)
  501. netif_napi_del(&channel->napi);
  502. }
  503. } else {
  504. napi_disable(&pdata->napi);
  505. if (del)
  506. netif_napi_del(&pdata->napi);
  507. }
  508. }
  509. void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
  510. {
  511. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  512. DBGPR("-->xgbe_init_tx_coalesce\n");
  513. pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
  514. pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
  515. hw_if->config_tx_coalesce(pdata);
  516. DBGPR("<--xgbe_init_tx_coalesce\n");
  517. }
  518. void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
  519. {
  520. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  521. DBGPR("-->xgbe_init_rx_coalesce\n");
  522. pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
  523. pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
  524. hw_if->config_rx_coalesce(pdata);
  525. DBGPR("<--xgbe_init_rx_coalesce\n");
  526. }
  527. static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
  528. {
  529. struct xgbe_desc_if *desc_if = &pdata->desc_if;
  530. struct xgbe_channel *channel;
  531. struct xgbe_ring *ring;
  532. struct xgbe_ring_data *rdata;
  533. unsigned int i, j;
  534. DBGPR("-->xgbe_free_tx_data\n");
  535. channel = pdata->channel;
  536. for (i = 0; i < pdata->channel_count; i++, channel++) {
  537. ring = channel->tx_ring;
  538. if (!ring)
  539. break;
  540. for (j = 0; j < ring->rdesc_count; j++) {
  541. rdata = XGBE_GET_DESC_DATA(ring, j);
  542. desc_if->unmap_rdata(pdata, rdata);
  543. }
  544. }
  545. DBGPR("<--xgbe_free_tx_data\n");
  546. }
  547. static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
  548. {
  549. struct xgbe_desc_if *desc_if = &pdata->desc_if;
  550. struct xgbe_channel *channel;
  551. struct xgbe_ring *ring;
  552. struct xgbe_ring_data *rdata;
  553. unsigned int i, j;
  554. DBGPR("-->xgbe_free_rx_data\n");
  555. channel = pdata->channel;
  556. for (i = 0; i < pdata->channel_count; i++, channel++) {
  557. ring = channel->rx_ring;
  558. if (!ring)
  559. break;
  560. for (j = 0; j < ring->rdesc_count; j++) {
  561. rdata = XGBE_GET_DESC_DATA(ring, j);
  562. desc_if->unmap_rdata(pdata, rdata);
  563. }
  564. }
  565. DBGPR("<--xgbe_free_rx_data\n");
  566. }
  567. static void xgbe_adjust_link(struct net_device *netdev)
  568. {
  569. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  570. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  571. struct phy_device *phydev = pdata->phydev;
  572. int new_state = 0;
  573. if (phydev == NULL)
  574. return;
  575. if (phydev->link) {
  576. /* Flow control support */
  577. if (pdata->pause_autoneg) {
  578. if (phydev->pause || phydev->asym_pause) {
  579. pdata->tx_pause = 1;
  580. pdata->rx_pause = 1;
  581. } else {
  582. pdata->tx_pause = 0;
  583. pdata->rx_pause = 0;
  584. }
  585. }
  586. if (pdata->tx_pause != pdata->phy_tx_pause) {
  587. hw_if->config_tx_flow_control(pdata);
  588. pdata->phy_tx_pause = pdata->tx_pause;
  589. }
  590. if (pdata->rx_pause != pdata->phy_rx_pause) {
  591. hw_if->config_rx_flow_control(pdata);
  592. pdata->phy_rx_pause = pdata->rx_pause;
  593. }
  594. /* Speed support */
  595. if (phydev->speed != pdata->phy_speed) {
  596. new_state = 1;
  597. switch (phydev->speed) {
  598. case SPEED_10000:
  599. hw_if->set_xgmii_speed(pdata);
  600. break;
  601. case SPEED_2500:
  602. hw_if->set_gmii_2500_speed(pdata);
  603. break;
  604. case SPEED_1000:
  605. hw_if->set_gmii_speed(pdata);
  606. break;
  607. }
  608. pdata->phy_speed = phydev->speed;
  609. }
  610. if (phydev->link != pdata->phy_link) {
  611. new_state = 1;
  612. pdata->phy_link = 1;
  613. }
  614. } else if (pdata->phy_link) {
  615. new_state = 1;
  616. pdata->phy_link = 0;
  617. pdata->phy_speed = SPEED_UNKNOWN;
  618. }
  619. if (new_state)
  620. phy_print_status(phydev);
  621. }
  622. static int xgbe_phy_init(struct xgbe_prv_data *pdata)
  623. {
  624. struct net_device *netdev = pdata->netdev;
  625. struct phy_device *phydev = pdata->phydev;
  626. int ret;
  627. pdata->phy_link = -1;
  628. pdata->phy_speed = SPEED_UNKNOWN;
  629. pdata->phy_tx_pause = pdata->tx_pause;
  630. pdata->phy_rx_pause = pdata->rx_pause;
  631. ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link,
  632. pdata->phy_mode);
  633. if (ret) {
  634. netdev_err(netdev, "phy_connect_direct failed\n");
  635. return ret;
  636. }
  637. if (!phydev->drv || (phydev->drv->phy_id == 0)) {
  638. netdev_err(netdev, "phy_id not valid\n");
  639. ret = -ENODEV;
  640. goto err_phy_connect;
  641. }
  642. DBGPR(" phy_connect_direct succeeded for PHY %s, link=%d\n",
  643. dev_name(&phydev->dev), phydev->link);
  644. return 0;
  645. err_phy_connect:
  646. phy_disconnect(phydev);
  647. return ret;
  648. }
  649. static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
  650. {
  651. if (!pdata->phydev)
  652. return;
  653. phy_disconnect(pdata->phydev);
  654. }
  655. int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
  656. {
  657. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  658. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  659. unsigned long flags;
  660. DBGPR("-->xgbe_powerdown\n");
  661. if (!netif_running(netdev) ||
  662. (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
  663. netdev_alert(netdev, "Device is already powered down\n");
  664. DBGPR("<--xgbe_powerdown\n");
  665. return -EINVAL;
  666. }
  667. phy_stop(pdata->phydev);
  668. spin_lock_irqsave(&pdata->lock, flags);
  669. if (caller == XGMAC_DRIVER_CONTEXT)
  670. netif_device_detach(netdev);
  671. netif_tx_stop_all_queues(netdev);
  672. xgbe_napi_disable(pdata, 0);
  673. /* Powerdown Tx/Rx */
  674. hw_if->powerdown_tx(pdata);
  675. hw_if->powerdown_rx(pdata);
  676. pdata->power_down = 1;
  677. spin_unlock_irqrestore(&pdata->lock, flags);
  678. DBGPR("<--xgbe_powerdown\n");
  679. return 0;
  680. }
  681. int xgbe_powerup(struct net_device *netdev, unsigned int caller)
  682. {
  683. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  684. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  685. unsigned long flags;
  686. DBGPR("-->xgbe_powerup\n");
  687. if (!netif_running(netdev) ||
  688. (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
  689. netdev_alert(netdev, "Device is already powered up\n");
  690. DBGPR("<--xgbe_powerup\n");
  691. return -EINVAL;
  692. }
  693. spin_lock_irqsave(&pdata->lock, flags);
  694. pdata->power_down = 0;
  695. phy_start(pdata->phydev);
  696. /* Enable Tx/Rx */
  697. hw_if->powerup_tx(pdata);
  698. hw_if->powerup_rx(pdata);
  699. if (caller == XGMAC_DRIVER_CONTEXT)
  700. netif_device_attach(netdev);
  701. xgbe_napi_enable(pdata, 0);
  702. netif_tx_start_all_queues(netdev);
  703. spin_unlock_irqrestore(&pdata->lock, flags);
  704. DBGPR("<--xgbe_powerup\n");
  705. return 0;
  706. }
  707. static int xgbe_start(struct xgbe_prv_data *pdata)
  708. {
  709. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  710. struct net_device *netdev = pdata->netdev;
  711. DBGPR("-->xgbe_start\n");
  712. xgbe_set_rx_mode(netdev);
  713. hw_if->init(pdata);
  714. phy_start(pdata->phydev);
  715. hw_if->enable_tx(pdata);
  716. hw_if->enable_rx(pdata);
  717. xgbe_init_tx_timers(pdata);
  718. xgbe_napi_enable(pdata, 1);
  719. netif_tx_start_all_queues(netdev);
  720. DBGPR("<--xgbe_start\n");
  721. return 0;
  722. }
  723. static void xgbe_stop(struct xgbe_prv_data *pdata)
  724. {
  725. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  726. struct xgbe_channel *channel;
  727. struct net_device *netdev = pdata->netdev;
  728. struct netdev_queue *txq;
  729. unsigned int i;
  730. DBGPR("-->xgbe_stop\n");
  731. phy_stop(pdata->phydev);
  732. netif_tx_stop_all_queues(netdev);
  733. xgbe_napi_disable(pdata, 1);
  734. xgbe_stop_tx_timers(pdata);
  735. hw_if->disable_tx(pdata);
  736. hw_if->disable_rx(pdata);
  737. channel = pdata->channel;
  738. for (i = 0; i < pdata->channel_count; i++, channel++) {
  739. if (!channel->tx_ring)
  740. continue;
  741. txq = netdev_get_tx_queue(netdev, channel->queue_index);
  742. netdev_tx_reset_queue(txq);
  743. }
  744. DBGPR("<--xgbe_stop\n");
  745. }
  746. static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
  747. {
  748. struct xgbe_channel *channel;
  749. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  750. unsigned int i;
  751. DBGPR("-->xgbe_restart_dev\n");
  752. /* If not running, "restart" will happen on open */
  753. if (!netif_running(pdata->netdev))
  754. return;
  755. xgbe_stop(pdata);
  756. synchronize_irq(pdata->dev_irq);
  757. if (pdata->per_channel_irq) {
  758. channel = pdata->channel;
  759. for (i = 0; i < pdata->channel_count; i++, channel++)
  760. synchronize_irq(channel->dma_irq);
  761. }
  762. xgbe_free_tx_data(pdata);
  763. xgbe_free_rx_data(pdata);
  764. /* Issue software reset to device if requested */
  765. if (reset)
  766. hw_if->exit(pdata);
  767. xgbe_start(pdata);
  768. DBGPR("<--xgbe_restart_dev\n");
  769. }
  770. static void xgbe_restart(struct work_struct *work)
  771. {
  772. struct xgbe_prv_data *pdata = container_of(work,
  773. struct xgbe_prv_data,
  774. restart_work);
  775. rtnl_lock();
  776. xgbe_restart_dev(pdata, 1);
  777. rtnl_unlock();
  778. }
  779. static void xgbe_tx_tstamp(struct work_struct *work)
  780. {
  781. struct xgbe_prv_data *pdata = container_of(work,
  782. struct xgbe_prv_data,
  783. tx_tstamp_work);
  784. struct skb_shared_hwtstamps hwtstamps;
  785. u64 nsec;
  786. unsigned long flags;
  787. if (pdata->tx_tstamp) {
  788. nsec = timecounter_cyc2time(&pdata->tstamp_tc,
  789. pdata->tx_tstamp);
  790. memset(&hwtstamps, 0, sizeof(hwtstamps));
  791. hwtstamps.hwtstamp = ns_to_ktime(nsec);
  792. skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
  793. }
  794. dev_kfree_skb_any(pdata->tx_tstamp_skb);
  795. spin_lock_irqsave(&pdata->tstamp_lock, flags);
  796. pdata->tx_tstamp_skb = NULL;
  797. spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
  798. }
  799. static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
  800. struct ifreq *ifreq)
  801. {
  802. if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
  803. sizeof(pdata->tstamp_config)))
  804. return -EFAULT;
  805. return 0;
  806. }
  807. static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
  808. struct ifreq *ifreq)
  809. {
  810. struct hwtstamp_config config;
  811. unsigned int mac_tscr;
  812. if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
  813. return -EFAULT;
  814. if (config.flags)
  815. return -EINVAL;
  816. mac_tscr = 0;
  817. switch (config.tx_type) {
  818. case HWTSTAMP_TX_OFF:
  819. break;
  820. case HWTSTAMP_TX_ON:
  821. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
  822. break;
  823. default:
  824. return -ERANGE;
  825. }
  826. switch (config.rx_filter) {
  827. case HWTSTAMP_FILTER_NONE:
  828. break;
  829. case HWTSTAMP_FILTER_ALL:
  830. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
  831. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
  832. break;
  833. /* PTP v2, UDP, any kind of event packet */
  834. case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
  835. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
  836. /* PTP v1, UDP, any kind of event packet */
  837. case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
  838. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
  839. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
  840. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
  841. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
  842. break;
  843. /* PTP v2, UDP, Sync packet */
  844. case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
  845. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
  846. /* PTP v1, UDP, Sync packet */
  847. case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
  848. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
  849. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
  850. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
  851. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
  852. break;
  853. /* PTP v2, UDP, Delay_req packet */
  854. case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
  855. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
  856. /* PTP v1, UDP, Delay_req packet */
  857. case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
  858. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
  859. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
  860. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
  861. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
  862. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
  863. break;
  864. /* 802.AS1, Ethernet, any kind of event packet */
  865. case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
  866. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
  867. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
  868. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
  869. break;
  870. /* 802.AS1, Ethernet, Sync packet */
  871. case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
  872. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
  873. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
  874. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
  875. break;
  876. /* 802.AS1, Ethernet, Delay_req packet */
  877. case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
  878. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
  879. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
  880. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
  881. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
  882. break;
  883. /* PTP v2/802.AS1, any layer, any kind of event packet */
  884. case HWTSTAMP_FILTER_PTP_V2_EVENT:
  885. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
  886. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
  887. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
  888. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
  889. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
  890. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
  891. break;
  892. /* PTP v2/802.AS1, any layer, Sync packet */
  893. case HWTSTAMP_FILTER_PTP_V2_SYNC:
  894. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
  895. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
  896. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
  897. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
  898. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
  899. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
  900. break;
  901. /* PTP v2/802.AS1, any layer, Delay_req packet */
  902. case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
  903. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
  904. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
  905. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
  906. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
  907. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
  908. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
  909. XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
  910. break;
  911. default:
  912. return -ERANGE;
  913. }
  914. pdata->hw_if.config_tstamp(pdata, mac_tscr);
  915. memcpy(&pdata->tstamp_config, &config, sizeof(config));
  916. return 0;
  917. }
  918. static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
  919. struct sk_buff *skb,
  920. struct xgbe_packet_data *packet)
  921. {
  922. unsigned long flags;
  923. if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
  924. spin_lock_irqsave(&pdata->tstamp_lock, flags);
  925. if (pdata->tx_tstamp_skb) {
  926. /* Another timestamp in progress, ignore this one */
  927. XGMAC_SET_BITS(packet->attributes,
  928. TX_PACKET_ATTRIBUTES, PTP, 0);
  929. } else {
  930. pdata->tx_tstamp_skb = skb_get(skb);
  931. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  932. }
  933. spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
  934. }
  935. if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
  936. skb_tx_timestamp(skb);
  937. }
  938. static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
  939. {
  940. if (vlan_tx_tag_present(skb))
  941. packet->vlan_ctag = vlan_tx_tag_get(skb);
  942. }
  943. static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
  944. {
  945. int ret;
  946. if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  947. TSO_ENABLE))
  948. return 0;
  949. ret = skb_cow_head(skb, 0);
  950. if (ret)
  951. return ret;
  952. packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  953. packet->tcp_header_len = tcp_hdrlen(skb);
  954. packet->tcp_payload_len = skb->len - packet->header_len;
  955. packet->mss = skb_shinfo(skb)->gso_size;
  956. DBGPR(" packet->header_len=%u\n", packet->header_len);
  957. DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
  958. packet->tcp_header_len, packet->tcp_payload_len);
  959. DBGPR(" packet->mss=%u\n", packet->mss);
  960. /* Update the number of packets that will ultimately be transmitted
  961. * along with the extra bytes for each extra packet
  962. */
  963. packet->tx_packets = skb_shinfo(skb)->gso_segs;
  964. packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len;
  965. return 0;
  966. }
  967. static int xgbe_is_tso(struct sk_buff *skb)
  968. {
  969. if (skb->ip_summed != CHECKSUM_PARTIAL)
  970. return 0;
  971. if (!skb_is_gso(skb))
  972. return 0;
  973. DBGPR(" TSO packet to be processed\n");
  974. return 1;
  975. }
  976. static void xgbe_packet_info(struct xgbe_prv_data *pdata,
  977. struct xgbe_ring *ring, struct sk_buff *skb,
  978. struct xgbe_packet_data *packet)
  979. {
  980. struct skb_frag_struct *frag;
  981. unsigned int context_desc;
  982. unsigned int len;
  983. unsigned int i;
  984. packet->skb = skb;
  985. context_desc = 0;
  986. packet->rdesc_count = 0;
  987. packet->tx_packets = 1;
  988. packet->tx_bytes = skb->len;
  989. if (xgbe_is_tso(skb)) {
  990. /* TSO requires an extra descriptor if mss is different */
  991. if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
  992. context_desc = 1;
  993. packet->rdesc_count++;
  994. }
  995. /* TSO requires an extra descriptor for TSO header */
  996. packet->rdesc_count++;
  997. XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  998. TSO_ENABLE, 1);
  999. XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  1000. CSUM_ENABLE, 1);
  1001. } else if (skb->ip_summed == CHECKSUM_PARTIAL)
  1002. XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  1003. CSUM_ENABLE, 1);
  1004. if (vlan_tx_tag_present(skb)) {
  1005. /* VLAN requires an extra descriptor if tag is different */
  1006. if (vlan_tx_tag_get(skb) != ring->tx.cur_vlan_ctag)
  1007. /* We can share with the TSO context descriptor */
  1008. if (!context_desc) {
  1009. context_desc = 1;
  1010. packet->rdesc_count++;
  1011. }
  1012. XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  1013. VLAN_CTAG, 1);
  1014. }
  1015. if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
  1016. (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
  1017. XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  1018. PTP, 1);
  1019. for (len = skb_headlen(skb); len;) {
  1020. packet->rdesc_count++;
  1021. len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
  1022. }
  1023. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1024. frag = &skb_shinfo(skb)->frags[i];
  1025. for (len = skb_frag_size(frag); len; ) {
  1026. packet->rdesc_count++;
  1027. len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
  1028. }
  1029. }
  1030. }
  1031. static int xgbe_open(struct net_device *netdev)
  1032. {
  1033. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  1034. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  1035. struct xgbe_desc_if *desc_if = &pdata->desc_if;
  1036. struct xgbe_channel *channel = NULL;
  1037. unsigned int i = 0;
  1038. int ret;
  1039. DBGPR("-->xgbe_open\n");
  1040. /* Initialize the phy */
  1041. ret = xgbe_phy_init(pdata);
  1042. if (ret)
  1043. return ret;
  1044. /* Enable the clocks */
  1045. ret = clk_prepare_enable(pdata->sysclk);
  1046. if (ret) {
  1047. netdev_alert(netdev, "dma clk_prepare_enable failed\n");
  1048. goto err_phy_init;
  1049. }
  1050. ret = clk_prepare_enable(pdata->ptpclk);
  1051. if (ret) {
  1052. netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
  1053. goto err_sysclk;
  1054. }
  1055. /* Calculate the Rx buffer size before allocating rings */
  1056. ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
  1057. if (ret < 0)
  1058. goto err_ptpclk;
  1059. pdata->rx_buf_size = ret;
  1060. /* Allocate the channel and ring structures */
  1061. ret = xgbe_alloc_channels(pdata);
  1062. if (ret)
  1063. goto err_ptpclk;
  1064. /* Allocate the ring descriptors and buffers */
  1065. ret = desc_if->alloc_ring_resources(pdata);
  1066. if (ret)
  1067. goto err_channels;
  1068. /* Initialize the device restart and Tx timestamp work struct */
  1069. INIT_WORK(&pdata->restart_work, xgbe_restart);
  1070. INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
  1071. /* Request interrupts */
  1072. ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
  1073. netdev->name, pdata);
  1074. if (ret) {
  1075. netdev_alert(netdev, "error requesting irq %d\n",
  1076. pdata->dev_irq);
  1077. goto err_rings;
  1078. }
  1079. if (pdata->per_channel_irq) {
  1080. channel = pdata->channel;
  1081. for (i = 0; i < pdata->channel_count; i++, channel++) {
  1082. snprintf(channel->dma_irq_name,
  1083. sizeof(channel->dma_irq_name) - 1,
  1084. "%s-TxRx-%u", netdev_name(netdev),
  1085. channel->queue_index);
  1086. ret = devm_request_irq(pdata->dev, channel->dma_irq,
  1087. xgbe_dma_isr, 0,
  1088. channel->dma_irq_name, channel);
  1089. if (ret) {
  1090. netdev_alert(netdev,
  1091. "error requesting irq %d\n",
  1092. channel->dma_irq);
  1093. goto err_irq;
  1094. }
  1095. }
  1096. }
  1097. ret = xgbe_start(pdata);
  1098. if (ret)
  1099. goto err_start;
  1100. DBGPR("<--xgbe_open\n");
  1101. return 0;
  1102. err_start:
  1103. hw_if->exit(pdata);
  1104. err_irq:
  1105. if (pdata->per_channel_irq) {
  1106. /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
  1107. for (i--, channel--; i < pdata->channel_count; i--, channel--)
  1108. devm_free_irq(pdata->dev, channel->dma_irq, channel);
  1109. }
  1110. devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
  1111. err_rings:
  1112. desc_if->free_ring_resources(pdata);
  1113. err_channels:
  1114. xgbe_free_channels(pdata);
  1115. err_ptpclk:
  1116. clk_disable_unprepare(pdata->ptpclk);
  1117. err_sysclk:
  1118. clk_disable_unprepare(pdata->sysclk);
  1119. err_phy_init:
  1120. xgbe_phy_exit(pdata);
  1121. return ret;
  1122. }
  1123. static int xgbe_close(struct net_device *netdev)
  1124. {
  1125. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  1126. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  1127. struct xgbe_desc_if *desc_if = &pdata->desc_if;
  1128. struct xgbe_channel *channel;
  1129. unsigned int i;
  1130. DBGPR("-->xgbe_close\n");
  1131. /* Stop the device */
  1132. xgbe_stop(pdata);
  1133. /* Issue software reset to device */
  1134. hw_if->exit(pdata);
  1135. /* Free the ring descriptors and buffers */
  1136. desc_if->free_ring_resources(pdata);
  1137. /* Release the interrupts */
  1138. devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
  1139. if (pdata->per_channel_irq) {
  1140. channel = pdata->channel;
  1141. for (i = 0; i < pdata->channel_count; i++, channel++)
  1142. devm_free_irq(pdata->dev, channel->dma_irq, channel);
  1143. }
  1144. /* Free the channel and ring structures */
  1145. xgbe_free_channels(pdata);
  1146. /* Disable the clocks */
  1147. clk_disable_unprepare(pdata->ptpclk);
  1148. clk_disable_unprepare(pdata->sysclk);
  1149. /* Release the phy */
  1150. xgbe_phy_exit(pdata);
  1151. DBGPR("<--xgbe_close\n");
  1152. return 0;
  1153. }
  1154. static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
  1155. {
  1156. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  1157. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  1158. struct xgbe_desc_if *desc_if = &pdata->desc_if;
  1159. struct xgbe_channel *channel;
  1160. struct xgbe_ring *ring;
  1161. struct xgbe_packet_data *packet;
  1162. struct netdev_queue *txq;
  1163. unsigned long flags;
  1164. int ret;
  1165. DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
  1166. channel = pdata->channel + skb->queue_mapping;
  1167. txq = netdev_get_tx_queue(netdev, channel->queue_index);
  1168. ring = channel->tx_ring;
  1169. packet = &ring->packet_data;
  1170. ret = NETDEV_TX_OK;
  1171. spin_lock_irqsave(&ring->lock, flags);
  1172. if (skb->len == 0) {
  1173. netdev_err(netdev, "empty skb received from stack\n");
  1174. dev_kfree_skb_any(skb);
  1175. goto tx_netdev_return;
  1176. }
  1177. /* Calculate preliminary packet info */
  1178. memset(packet, 0, sizeof(*packet));
  1179. xgbe_packet_info(pdata, ring, skb, packet);
  1180. /* Check that there are enough descriptors available */
  1181. ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
  1182. if (ret)
  1183. goto tx_netdev_return;
  1184. ret = xgbe_prep_tso(skb, packet);
  1185. if (ret) {
  1186. netdev_err(netdev, "error processing TSO packet\n");
  1187. dev_kfree_skb_any(skb);
  1188. goto tx_netdev_return;
  1189. }
  1190. xgbe_prep_vlan(skb, packet);
  1191. if (!desc_if->map_tx_skb(channel, skb)) {
  1192. dev_kfree_skb_any(skb);
  1193. goto tx_netdev_return;
  1194. }
  1195. xgbe_prep_tx_tstamp(pdata, skb, packet);
  1196. /* Report on the actual number of bytes (to be) sent */
  1197. netdev_tx_sent_queue(txq, packet->tx_bytes);
  1198. /* Configure required descriptor fields for transmission */
  1199. hw_if->dev_xmit(channel);
  1200. #ifdef XGMAC_ENABLE_TX_PKT_DUMP
  1201. xgbe_print_pkt(netdev, skb, true);
  1202. #endif
  1203. /* Stop the queue in advance if there may not be enough descriptors */
  1204. xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
  1205. ret = NETDEV_TX_OK;
  1206. tx_netdev_return:
  1207. spin_unlock_irqrestore(&ring->lock, flags);
  1208. DBGPR("<--xgbe_xmit\n");
  1209. return ret;
  1210. }
  1211. static void xgbe_set_rx_mode(struct net_device *netdev)
  1212. {
  1213. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  1214. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  1215. unsigned int pr_mode, am_mode;
  1216. DBGPR("-->xgbe_set_rx_mode\n");
  1217. pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
  1218. am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
  1219. hw_if->set_promiscuous_mode(pdata, pr_mode);
  1220. hw_if->set_all_multicast_mode(pdata, am_mode);
  1221. hw_if->add_mac_addresses(pdata);
  1222. DBGPR("<--xgbe_set_rx_mode\n");
  1223. }
  1224. static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
  1225. {
  1226. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  1227. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  1228. struct sockaddr *saddr = addr;
  1229. DBGPR("-->xgbe_set_mac_address\n");
  1230. if (!is_valid_ether_addr(saddr->sa_data))
  1231. return -EADDRNOTAVAIL;
  1232. memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
  1233. hw_if->set_mac_address(pdata, netdev->dev_addr);
  1234. DBGPR("<--xgbe_set_mac_address\n");
  1235. return 0;
  1236. }
  1237. static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
  1238. {
  1239. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  1240. int ret;
  1241. switch (cmd) {
  1242. case SIOCGHWTSTAMP:
  1243. ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
  1244. break;
  1245. case SIOCSHWTSTAMP:
  1246. ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
  1247. break;
  1248. default:
  1249. ret = -EOPNOTSUPP;
  1250. }
  1251. return ret;
  1252. }
  1253. static int xgbe_change_mtu(struct net_device *netdev, int mtu)
  1254. {
  1255. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  1256. int ret;
  1257. DBGPR("-->xgbe_change_mtu\n");
  1258. ret = xgbe_calc_rx_buf_size(netdev, mtu);
  1259. if (ret < 0)
  1260. return ret;
  1261. pdata->rx_buf_size = ret;
  1262. netdev->mtu = mtu;
  1263. xgbe_restart_dev(pdata, 0);
  1264. DBGPR("<--xgbe_change_mtu\n");
  1265. return 0;
  1266. }
  1267. static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
  1268. struct rtnl_link_stats64 *s)
  1269. {
  1270. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  1271. struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
  1272. DBGPR("-->%s\n", __func__);
  1273. pdata->hw_if.read_mmc_stats(pdata);
  1274. s->rx_packets = pstats->rxframecount_gb;
  1275. s->rx_bytes = pstats->rxoctetcount_gb;
  1276. s->rx_errors = pstats->rxframecount_gb -
  1277. pstats->rxbroadcastframes_g -
  1278. pstats->rxmulticastframes_g -
  1279. pstats->rxunicastframes_g;
  1280. s->multicast = pstats->rxmulticastframes_g;
  1281. s->rx_length_errors = pstats->rxlengtherror;
  1282. s->rx_crc_errors = pstats->rxcrcerror;
  1283. s->rx_fifo_errors = pstats->rxfifooverflow;
  1284. s->tx_packets = pstats->txframecount_gb;
  1285. s->tx_bytes = pstats->txoctetcount_gb;
  1286. s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
  1287. s->tx_dropped = netdev->stats.tx_dropped;
  1288. DBGPR("<--%s\n", __func__);
  1289. return s;
  1290. }
  1291. static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
  1292. u16 vid)
  1293. {
  1294. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  1295. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  1296. DBGPR("-->%s\n", __func__);
  1297. set_bit(vid, pdata->active_vlans);
  1298. hw_if->update_vlan_hash_table(pdata);
  1299. DBGPR("<--%s\n", __func__);
  1300. return 0;
  1301. }
  1302. static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
  1303. u16 vid)
  1304. {
  1305. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  1306. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  1307. DBGPR("-->%s\n", __func__);
  1308. clear_bit(vid, pdata->active_vlans);
  1309. hw_if->update_vlan_hash_table(pdata);
  1310. DBGPR("<--%s\n", __func__);
  1311. return 0;
  1312. }
  1313. #ifdef CONFIG_NET_POLL_CONTROLLER
  1314. static void xgbe_poll_controller(struct net_device *netdev)
  1315. {
  1316. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  1317. struct xgbe_channel *channel;
  1318. unsigned int i;
  1319. DBGPR("-->xgbe_poll_controller\n");
  1320. if (pdata->per_channel_irq) {
  1321. channel = pdata->channel;
  1322. for (i = 0; i < pdata->channel_count; i++, channel++)
  1323. xgbe_dma_isr(channel->dma_irq, channel);
  1324. } else {
  1325. disable_irq(pdata->dev_irq);
  1326. xgbe_isr(pdata->dev_irq, pdata);
  1327. enable_irq(pdata->dev_irq);
  1328. }
  1329. DBGPR("<--xgbe_poll_controller\n");
  1330. }
  1331. #endif /* End CONFIG_NET_POLL_CONTROLLER */
  1332. static int xgbe_setup_tc(struct net_device *netdev, u8 tc)
  1333. {
  1334. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  1335. unsigned int offset, queue;
  1336. u8 i;
  1337. if (tc && (tc != pdata->hw_feat.tc_cnt))
  1338. return -EINVAL;
  1339. if (tc) {
  1340. netdev_set_num_tc(netdev, tc);
  1341. for (i = 0, queue = 0, offset = 0; i < tc; i++) {
  1342. while ((queue < pdata->tx_q_count) &&
  1343. (pdata->q2tc_map[queue] == i))
  1344. queue++;
  1345. DBGPR(" TC%u using TXq%u-%u\n", i, offset, queue - 1);
  1346. netdev_set_tc_queue(netdev, i, queue - offset, offset);
  1347. offset = queue;
  1348. }
  1349. } else {
  1350. netdev_reset_tc(netdev);
  1351. }
  1352. return 0;
  1353. }
  1354. static int xgbe_set_features(struct net_device *netdev,
  1355. netdev_features_t features)
  1356. {
  1357. struct xgbe_prv_data *pdata = netdev_priv(netdev);
  1358. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  1359. netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
  1360. int ret = 0;
  1361. rxhash = pdata->netdev_features & NETIF_F_RXHASH;
  1362. rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
  1363. rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
  1364. rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
  1365. if ((features & NETIF_F_RXHASH) && !rxhash)
  1366. ret = hw_if->enable_rss(pdata);
  1367. else if (!(features & NETIF_F_RXHASH) && rxhash)
  1368. ret = hw_if->disable_rss(pdata);
  1369. if (ret)
  1370. return ret;
  1371. if ((features & NETIF_F_RXCSUM) && !rxcsum)
  1372. hw_if->enable_rx_csum(pdata);
  1373. else if (!(features & NETIF_F_RXCSUM) && rxcsum)
  1374. hw_if->disable_rx_csum(pdata);
  1375. if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
  1376. hw_if->enable_rx_vlan_stripping(pdata);
  1377. else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
  1378. hw_if->disable_rx_vlan_stripping(pdata);
  1379. if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
  1380. hw_if->enable_rx_vlan_filtering(pdata);
  1381. else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
  1382. hw_if->disable_rx_vlan_filtering(pdata);
  1383. pdata->netdev_features = features;
  1384. DBGPR("<--xgbe_set_features\n");
  1385. return 0;
  1386. }
  1387. static const struct net_device_ops xgbe_netdev_ops = {
  1388. .ndo_open = xgbe_open,
  1389. .ndo_stop = xgbe_close,
  1390. .ndo_start_xmit = xgbe_xmit,
  1391. .ndo_set_rx_mode = xgbe_set_rx_mode,
  1392. .ndo_set_mac_address = xgbe_set_mac_address,
  1393. .ndo_validate_addr = eth_validate_addr,
  1394. .ndo_do_ioctl = xgbe_ioctl,
  1395. .ndo_change_mtu = xgbe_change_mtu,
  1396. .ndo_get_stats64 = xgbe_get_stats64,
  1397. .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid,
  1398. .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid,
  1399. #ifdef CONFIG_NET_POLL_CONTROLLER
  1400. .ndo_poll_controller = xgbe_poll_controller,
  1401. #endif
  1402. .ndo_setup_tc = xgbe_setup_tc,
  1403. .ndo_set_features = xgbe_set_features,
  1404. };
  1405. struct net_device_ops *xgbe_get_netdev_ops(void)
  1406. {
  1407. return (struct net_device_ops *)&xgbe_netdev_ops;
  1408. }
  1409. static void xgbe_rx_refresh(struct xgbe_channel *channel)
  1410. {
  1411. struct xgbe_prv_data *pdata = channel->pdata;
  1412. struct xgbe_desc_if *desc_if = &pdata->desc_if;
  1413. struct xgbe_ring *ring = channel->rx_ring;
  1414. struct xgbe_ring_data *rdata;
  1415. desc_if->realloc_rx_buffer(channel);
  1416. /* Update the Rx Tail Pointer Register with address of
  1417. * the last cleaned entry */
  1418. rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
  1419. XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
  1420. lower_32_bits(rdata->rdesc_dma));
  1421. }
  1422. static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
  1423. struct xgbe_ring_data *rdata,
  1424. unsigned int *len)
  1425. {
  1426. struct net_device *netdev = pdata->netdev;
  1427. struct sk_buff *skb;
  1428. u8 *packet;
  1429. unsigned int copy_len;
  1430. skb = netdev_alloc_skb_ip_align(netdev, rdata->rx.hdr.dma_len);
  1431. if (!skb)
  1432. return NULL;
  1433. packet = page_address(rdata->rx.hdr.pa.pages) +
  1434. rdata->rx.hdr.pa.pages_offset;
  1435. copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : *len;
  1436. copy_len = min(rdata->rx.hdr.dma_len, copy_len);
  1437. skb_copy_to_linear_data(skb, packet, copy_len);
  1438. skb_put(skb, copy_len);
  1439. *len -= copy_len;
  1440. return skb;
  1441. }
  1442. static int xgbe_tx_poll(struct xgbe_channel *channel)
  1443. {
  1444. struct xgbe_prv_data *pdata = channel->pdata;
  1445. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  1446. struct xgbe_desc_if *desc_if = &pdata->desc_if;
  1447. struct xgbe_ring *ring = channel->tx_ring;
  1448. struct xgbe_ring_data *rdata;
  1449. struct xgbe_ring_desc *rdesc;
  1450. struct net_device *netdev = pdata->netdev;
  1451. struct netdev_queue *txq;
  1452. unsigned long flags;
  1453. int processed = 0;
  1454. unsigned int tx_packets = 0, tx_bytes = 0;
  1455. DBGPR("-->xgbe_tx_poll\n");
  1456. /* Nothing to do if there isn't a Tx ring for this channel */
  1457. if (!ring)
  1458. return 0;
  1459. txq = netdev_get_tx_queue(netdev, channel->queue_index);
  1460. spin_lock_irqsave(&ring->lock, flags);
  1461. while ((processed < XGBE_TX_DESC_MAX_PROC) &&
  1462. (ring->dirty != ring->cur)) {
  1463. rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
  1464. rdesc = rdata->rdesc;
  1465. if (!hw_if->tx_complete(rdesc))
  1466. break;
  1467. /* Make sure descriptor fields are read after reading the OWN
  1468. * bit */
  1469. rmb();
  1470. #ifdef XGMAC_ENABLE_TX_DESC_DUMP
  1471. xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
  1472. #endif
  1473. if (hw_if->is_last_desc(rdesc)) {
  1474. tx_packets += rdata->tx.packets;
  1475. tx_bytes += rdata->tx.bytes;
  1476. }
  1477. /* Free the SKB and reset the descriptor for re-use */
  1478. desc_if->unmap_rdata(pdata, rdata);
  1479. hw_if->tx_desc_reset(rdata);
  1480. processed++;
  1481. ring->dirty++;
  1482. }
  1483. if (!processed)
  1484. goto unlock;
  1485. netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
  1486. if ((ring->tx.queue_stopped == 1) &&
  1487. (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
  1488. ring->tx.queue_stopped = 0;
  1489. netif_tx_wake_queue(txq);
  1490. }
  1491. DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
  1492. unlock:
  1493. spin_unlock_irqrestore(&ring->lock, flags);
  1494. return processed;
  1495. }
  1496. static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
  1497. {
  1498. struct xgbe_prv_data *pdata = channel->pdata;
  1499. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  1500. struct xgbe_ring *ring = channel->rx_ring;
  1501. struct xgbe_ring_data *rdata;
  1502. struct xgbe_packet_data *packet;
  1503. struct net_device *netdev = pdata->netdev;
  1504. struct napi_struct *napi;
  1505. struct sk_buff *skb;
  1506. struct skb_shared_hwtstamps *hwtstamps;
  1507. unsigned int incomplete, error, context_next, context;
  1508. unsigned int len, put_len, max_len;
  1509. unsigned int received = 0;
  1510. int packet_count = 0;
  1511. DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
  1512. /* Nothing to do if there isn't a Rx ring for this channel */
  1513. if (!ring)
  1514. return 0;
  1515. napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
  1516. rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
  1517. packet = &ring->packet_data;
  1518. while (packet_count < budget) {
  1519. DBGPR(" cur = %d\n", ring->cur);
  1520. /* First time in loop see if we need to restore state */
  1521. if (!received && rdata->state_saved) {
  1522. incomplete = rdata->state.incomplete;
  1523. context_next = rdata->state.context_next;
  1524. skb = rdata->state.skb;
  1525. error = rdata->state.error;
  1526. len = rdata->state.len;
  1527. } else {
  1528. memset(packet, 0, sizeof(*packet));
  1529. incomplete = 0;
  1530. context_next = 0;
  1531. skb = NULL;
  1532. error = 0;
  1533. len = 0;
  1534. }
  1535. read_again:
  1536. rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
  1537. if (ring->dirty > (XGBE_RX_DESC_CNT >> 3))
  1538. xgbe_rx_refresh(channel);
  1539. if (hw_if->dev_read(channel))
  1540. break;
  1541. received++;
  1542. ring->cur++;
  1543. ring->dirty++;
  1544. incomplete = XGMAC_GET_BITS(packet->attributes,
  1545. RX_PACKET_ATTRIBUTES,
  1546. INCOMPLETE);
  1547. context_next = XGMAC_GET_BITS(packet->attributes,
  1548. RX_PACKET_ATTRIBUTES,
  1549. CONTEXT_NEXT);
  1550. context = XGMAC_GET_BITS(packet->attributes,
  1551. RX_PACKET_ATTRIBUTES,
  1552. CONTEXT);
  1553. /* Earlier error, just drain the remaining data */
  1554. if ((incomplete || context_next) && error)
  1555. goto read_again;
  1556. if (error || packet->errors) {
  1557. if (packet->errors)
  1558. DBGPR("Error in received packet\n");
  1559. dev_kfree_skb(skb);
  1560. goto next_packet;
  1561. }
  1562. if (!context) {
  1563. put_len = rdata->rx.len - len;
  1564. len += put_len;
  1565. if (!skb) {
  1566. dma_sync_single_for_cpu(pdata->dev,
  1567. rdata->rx.hdr.dma,
  1568. rdata->rx.hdr.dma_len,
  1569. DMA_FROM_DEVICE);
  1570. skb = xgbe_create_skb(pdata, rdata, &put_len);
  1571. if (!skb) {
  1572. error = 1;
  1573. goto skip_data;
  1574. }
  1575. }
  1576. if (put_len) {
  1577. dma_sync_single_for_cpu(pdata->dev,
  1578. rdata->rx.buf.dma,
  1579. rdata->rx.buf.dma_len,
  1580. DMA_FROM_DEVICE);
  1581. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
  1582. rdata->rx.buf.pa.pages,
  1583. rdata->rx.buf.pa.pages_offset,
  1584. put_len, rdata->rx.buf.dma_len);
  1585. rdata->rx.buf.pa.pages = NULL;
  1586. }
  1587. }
  1588. skip_data:
  1589. if (incomplete || context_next)
  1590. goto read_again;
  1591. if (!skb)
  1592. goto next_packet;
  1593. /* Be sure we don't exceed the configured MTU */
  1594. max_len = netdev->mtu + ETH_HLEN;
  1595. if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
  1596. (skb->protocol == htons(ETH_P_8021Q)))
  1597. max_len += VLAN_HLEN;
  1598. if (skb->len > max_len) {
  1599. DBGPR("packet length exceeds configured MTU\n");
  1600. dev_kfree_skb(skb);
  1601. goto next_packet;
  1602. }
  1603. #ifdef XGMAC_ENABLE_RX_PKT_DUMP
  1604. xgbe_print_pkt(netdev, skb, false);
  1605. #endif
  1606. skb_checksum_none_assert(skb);
  1607. if (XGMAC_GET_BITS(packet->attributes,
  1608. RX_PACKET_ATTRIBUTES, CSUM_DONE))
  1609. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1610. if (XGMAC_GET_BITS(packet->attributes,
  1611. RX_PACKET_ATTRIBUTES, VLAN_CTAG))
  1612. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  1613. packet->vlan_ctag);
  1614. if (XGMAC_GET_BITS(packet->attributes,
  1615. RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
  1616. u64 nsec;
  1617. nsec = timecounter_cyc2time(&pdata->tstamp_tc,
  1618. packet->rx_tstamp);
  1619. hwtstamps = skb_hwtstamps(skb);
  1620. hwtstamps->hwtstamp = ns_to_ktime(nsec);
  1621. }
  1622. if (XGMAC_GET_BITS(packet->attributes,
  1623. RX_PACKET_ATTRIBUTES, RSS_HASH))
  1624. skb_set_hash(skb, packet->rss_hash,
  1625. packet->rss_hash_type);
  1626. skb->dev = netdev;
  1627. skb->protocol = eth_type_trans(skb, netdev);
  1628. skb_record_rx_queue(skb, channel->queue_index);
  1629. skb_mark_napi_id(skb, napi);
  1630. netdev->last_rx = jiffies;
  1631. napi_gro_receive(napi, skb);
  1632. next_packet:
  1633. packet_count++;
  1634. }
  1635. /* Check if we need to save state before leaving */
  1636. if (received && (incomplete || context_next)) {
  1637. rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
  1638. rdata->state_saved = 1;
  1639. rdata->state.incomplete = incomplete;
  1640. rdata->state.context_next = context_next;
  1641. rdata->state.skb = skb;
  1642. rdata->state.len = len;
  1643. rdata->state.error = error;
  1644. }
  1645. DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
  1646. return packet_count;
  1647. }
  1648. static int xgbe_one_poll(struct napi_struct *napi, int budget)
  1649. {
  1650. struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
  1651. napi);
  1652. int processed = 0;
  1653. DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
  1654. /* Cleanup Tx ring first */
  1655. xgbe_tx_poll(channel);
  1656. /* Process Rx ring next */
  1657. processed = xgbe_rx_poll(channel, budget);
  1658. /* If we processed everything, we are done */
  1659. if (processed < budget) {
  1660. /* Turn off polling */
  1661. napi_complete(napi);
  1662. /* Enable Tx and Rx interrupts */
  1663. enable_irq(channel->dma_irq);
  1664. }
  1665. DBGPR("<--xgbe_one_poll: received = %d\n", processed);
  1666. return processed;
  1667. }
  1668. static int xgbe_all_poll(struct napi_struct *napi, int budget)
  1669. {
  1670. struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
  1671. napi);
  1672. struct xgbe_channel *channel;
  1673. int ring_budget;
  1674. int processed, last_processed;
  1675. unsigned int i;
  1676. DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
  1677. processed = 0;
  1678. ring_budget = budget / pdata->rx_ring_count;
  1679. do {
  1680. last_processed = processed;
  1681. channel = pdata->channel;
  1682. for (i = 0; i < pdata->channel_count; i++, channel++) {
  1683. /* Cleanup Tx ring first */
  1684. xgbe_tx_poll(channel);
  1685. /* Process Rx ring next */
  1686. if (ring_budget > (budget - processed))
  1687. ring_budget = budget - processed;
  1688. processed += xgbe_rx_poll(channel, ring_budget);
  1689. }
  1690. } while ((processed < budget) && (processed != last_processed));
  1691. /* If we processed everything, we are done */
  1692. if (processed < budget) {
  1693. /* Turn off polling */
  1694. napi_complete(napi);
  1695. /* Enable Tx and Rx interrupts */
  1696. xgbe_enable_rx_tx_ints(pdata);
  1697. }
  1698. DBGPR("<--xgbe_all_poll: received = %d\n", processed);
  1699. return processed;
  1700. }
  1701. void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
  1702. unsigned int count, unsigned int flag)
  1703. {
  1704. struct xgbe_ring_data *rdata;
  1705. struct xgbe_ring_desc *rdesc;
  1706. while (count--) {
  1707. rdata = XGBE_GET_DESC_DATA(ring, idx);
  1708. rdesc = rdata->rdesc;
  1709. pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
  1710. (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
  1711. le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
  1712. le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
  1713. idx++;
  1714. }
  1715. }
  1716. void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
  1717. unsigned int idx)
  1718. {
  1719. pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
  1720. le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
  1721. le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
  1722. }
  1723. void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
  1724. {
  1725. struct ethhdr *eth = (struct ethhdr *)skb->data;
  1726. unsigned char *buf = skb->data;
  1727. unsigned char buffer[128];
  1728. unsigned int i, j;
  1729. netdev_alert(netdev, "\n************** SKB dump ****************\n");
  1730. netdev_alert(netdev, "%s packet of %d bytes\n",
  1731. (tx_rx ? "TX" : "RX"), skb->len);
  1732. netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
  1733. netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
  1734. netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
  1735. for (i = 0, j = 0; i < skb->len;) {
  1736. j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
  1737. buf[i++]);
  1738. if ((i % 32) == 0) {
  1739. netdev_alert(netdev, " 0x%04x: %s\n", i - 32, buffer);
  1740. j = 0;
  1741. } else if ((i % 16) == 0) {
  1742. buffer[j++] = ' ';
  1743. buffer[j++] = ' ';
  1744. } else if ((i % 4) == 0) {
  1745. buffer[j++] = ' ';
  1746. }
  1747. }
  1748. if (i % 32)
  1749. netdev_alert(netdev, " 0x%04x: %s\n", i - (i % 32), buffer);
  1750. netdev_alert(netdev, "\n************** SKB dump ****************\n");
  1751. }