xgbe-desc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677
  1. /*
  2. * AMD 10Gb Ethernet driver
  3. *
  4. * This file is available to you under your choice of the following two
  5. * licenses:
  6. *
  7. * License 1: GPLv2
  8. *
  9. * Copyright (c) 2014 Advanced Micro Devices, Inc.
  10. *
  11. * This file is free software; you may copy, redistribute and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation, either version 2 of the License, or (at
  14. * your option) any later version.
  15. *
  16. * This file is distributed in the hope that it will be useful, but
  17. * WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  23. *
  24. * This file incorporates work covered by the following copyright and
  25. * permission notice:
  26. * The Synopsys DWC ETHER XGMAC Software Driver and documentation
  27. * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
  28. * Inc. unless otherwise expressly agreed to in writing between Synopsys
  29. * and you.
  30. *
  31. * The Software IS NOT an item of Licensed Software or Licensed Product
  32. * under any End User Software License Agreement or Agreement for Licensed
  33. * Product with Synopsys or any supplement thereto. Permission is hereby
  34. * granted, free of charge, to any person obtaining a copy of this software
  35. * annotated with this license and the Software, to deal in the Software
  36. * without restriction, including without limitation the rights to use,
  37. * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
  38. * of the Software, and to permit persons to whom the Software is furnished
  39. * to do so, subject to the following conditions:
  40. *
  41. * The above copyright notice and this permission notice shall be included
  42. * in all copies or substantial portions of the Software.
  43. *
  44. * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
  45. * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  46. * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  47. * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
  48. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  49. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  50. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  51. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  52. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  53. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  54. * THE POSSIBILITY OF SUCH DAMAGE.
  55. *
  56. *
  57. * License 2: Modified BSD
  58. *
  59. * Copyright (c) 2014 Advanced Micro Devices, Inc.
  60. * All rights reserved.
  61. *
  62. * Redistribution and use in source and binary forms, with or without
  63. * modification, are permitted provided that the following conditions are met:
  64. * * Redistributions of source code must retain the above copyright
  65. * notice, this list of conditions and the following disclaimer.
  66. * * Redistributions in binary form must reproduce the above copyright
  67. * notice, this list of conditions and the following disclaimer in the
  68. * documentation and/or other materials provided with the distribution.
  69. * * Neither the name of Advanced Micro Devices, Inc. nor the
  70. * names of its contributors may be used to endorse or promote products
  71. * derived from this software without specific prior written permission.
  72. *
  73. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  74. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  75. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  76. * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
  77. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  78. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  79. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  80. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  81. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  82. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  83. *
  84. * This file incorporates work covered by the following copyright and
  85. * permission notice:
  86. * The Synopsys DWC ETHER XGMAC Software Driver and documentation
  87. * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
  88. * Inc. unless otherwise expressly agreed to in writing between Synopsys
  89. * and you.
  90. *
  91. * The Software IS NOT an item of Licensed Software or Licensed Product
  92. * under any End User Software License Agreement or Agreement for Licensed
  93. * Product with Synopsys or any supplement thereto. Permission is hereby
  94. * granted, free of charge, to any person obtaining a copy of this software
  95. * annotated with this license and the Software, to deal in the Software
  96. * without restriction, including without limitation the rights to use,
  97. * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
  98. * of the Software, and to permit persons to whom the Software is furnished
  99. * to do so, subject to the following conditions:
  100. *
  101. * The above copyright notice and this permission notice shall be included
  102. * in all copies or substantial portions of the Software.
  103. *
  104. * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
  105. * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  106. * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  107. * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
  108. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  109. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  110. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  111. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  112. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  113. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  114. * THE POSSIBILITY OF SUCH DAMAGE.
  115. */
  116. #include "xgbe.h"
  117. #include "xgbe-common.h"
  118. static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
  119. static void xgbe_free_ring(struct xgbe_prv_data *pdata,
  120. struct xgbe_ring *ring)
  121. {
  122. struct xgbe_ring_data *rdata;
  123. unsigned int i;
  124. if (!ring)
  125. return;
  126. if (ring->rdata) {
  127. for (i = 0; i < ring->rdesc_count; i++) {
  128. rdata = XGBE_GET_DESC_DATA(ring, i);
  129. xgbe_unmap_rdata(pdata, rdata);
  130. }
  131. kfree(ring->rdata);
  132. ring->rdata = NULL;
  133. }
  134. if (ring->rx_hdr_pa.pages) {
  135. dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
  136. ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
  137. put_page(ring->rx_hdr_pa.pages);
  138. ring->rx_hdr_pa.pages = NULL;
  139. ring->rx_hdr_pa.pages_len = 0;
  140. ring->rx_hdr_pa.pages_offset = 0;
  141. ring->rx_hdr_pa.pages_dma = 0;
  142. }
  143. if (ring->rx_buf_pa.pages) {
  144. dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
  145. ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
  146. put_page(ring->rx_buf_pa.pages);
  147. ring->rx_buf_pa.pages = NULL;
  148. ring->rx_buf_pa.pages_len = 0;
  149. ring->rx_buf_pa.pages_offset = 0;
  150. ring->rx_buf_pa.pages_dma = 0;
  151. }
  152. if (ring->rdesc) {
  153. dma_free_coherent(pdata->dev,
  154. (sizeof(struct xgbe_ring_desc) *
  155. ring->rdesc_count),
  156. ring->rdesc, ring->rdesc_dma);
  157. ring->rdesc = NULL;
  158. }
  159. }
  160. static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
  161. {
  162. struct xgbe_channel *channel;
  163. unsigned int i;
  164. DBGPR("-->xgbe_free_ring_resources\n");
  165. for (i = 0; i < pdata->channel_count; i++) {
  166. channel = pdata->channel[i];
  167. xgbe_free_ring(pdata, channel->tx_ring);
  168. xgbe_free_ring(pdata, channel->rx_ring);
  169. }
  170. DBGPR("<--xgbe_free_ring_resources\n");
  171. }
  172. static void *xgbe_alloc_node(size_t size, int node)
  173. {
  174. void *mem;
  175. mem = kzalloc_node(size, GFP_KERNEL, node);
  176. if (!mem)
  177. mem = kzalloc(size, GFP_KERNEL);
  178. return mem;
  179. }
  180. static void *xgbe_dma_alloc_node(struct device *dev, size_t size,
  181. dma_addr_t *dma, int node)
  182. {
  183. void *mem;
  184. int cur_node = dev_to_node(dev);
  185. set_dev_node(dev, node);
  186. mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
  187. set_dev_node(dev, cur_node);
  188. if (!mem)
  189. mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
  190. return mem;
  191. }
  192. static int xgbe_init_ring(struct xgbe_prv_data *pdata,
  193. struct xgbe_ring *ring, unsigned int rdesc_count)
  194. {
  195. size_t size;
  196. if (!ring)
  197. return 0;
  198. /* Descriptors */
  199. size = rdesc_count * sizeof(struct xgbe_ring_desc);
  200. ring->rdesc_count = rdesc_count;
  201. ring->rdesc = xgbe_dma_alloc_node(pdata->dev, size, &ring->rdesc_dma,
  202. ring->node);
  203. if (!ring->rdesc)
  204. return -ENOMEM;
  205. /* Descriptor information */
  206. size = rdesc_count * sizeof(struct xgbe_ring_data);
  207. ring->rdata = xgbe_alloc_node(size, ring->node);
  208. if (!ring->rdata)
  209. return -ENOMEM;
  210. netif_dbg(pdata, drv, pdata->netdev,
  211. "rdesc=%p, rdesc_dma=%pad, rdata=%p, node=%d\n",
  212. ring->rdesc, &ring->rdesc_dma, ring->rdata, ring->node);
  213. return 0;
  214. }
  215. static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
  216. {
  217. struct xgbe_channel *channel;
  218. unsigned int i;
  219. int ret;
  220. for (i = 0; i < pdata->channel_count; i++) {
  221. channel = pdata->channel[i];
  222. netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
  223. channel->name);
  224. ret = xgbe_init_ring(pdata, channel->tx_ring,
  225. pdata->tx_desc_count);
  226. if (ret) {
  227. netdev_alert(pdata->netdev,
  228. "error initializing Tx ring\n");
  229. goto err_ring;
  230. }
  231. netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
  232. channel->name);
  233. ret = xgbe_init_ring(pdata, channel->rx_ring,
  234. pdata->rx_desc_count);
  235. if (ret) {
  236. netdev_alert(pdata->netdev,
  237. "error initializing Rx ring\n");
  238. goto err_ring;
  239. }
  240. }
  241. return 0;
  242. err_ring:
  243. xgbe_free_ring_resources(pdata);
  244. return ret;
  245. }
  246. static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
  247. struct xgbe_page_alloc *pa, int alloc_order,
  248. int node)
  249. {
  250. struct page *pages = NULL;
  251. dma_addr_t pages_dma;
  252. gfp_t gfp;
  253. int order, ret;
  254. again:
  255. order = alloc_order;
  256. /* Try to obtain pages, decreasing order if necessary */
  257. gfp = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
  258. while (order >= 0) {
  259. pages = alloc_pages_node(node, gfp, order);
  260. if (pages)
  261. break;
  262. order--;
  263. }
  264. /* If we couldn't get local pages, try getting from anywhere */
  265. if (!pages && (node != NUMA_NO_NODE)) {
  266. node = NUMA_NO_NODE;
  267. goto again;
  268. }
  269. if (!pages)
  270. return -ENOMEM;
  271. /* Map the pages */
  272. pages_dma = dma_map_page(pdata->dev, pages, 0,
  273. PAGE_SIZE << order, DMA_FROM_DEVICE);
  274. ret = dma_mapping_error(pdata->dev, pages_dma);
  275. if (ret) {
  276. put_page(pages);
  277. return ret;
  278. }
  279. pa->pages = pages;
  280. pa->pages_len = PAGE_SIZE << order;
  281. pa->pages_offset = 0;
  282. pa->pages_dma = pages_dma;
  283. return 0;
  284. }
  285. static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
  286. struct xgbe_page_alloc *pa,
  287. unsigned int len)
  288. {
  289. get_page(pa->pages);
  290. bd->pa = *pa;
  291. bd->dma_base = pa->pages_dma;
  292. bd->dma_off = pa->pages_offset;
  293. bd->dma_len = len;
  294. pa->pages_offset += len;
  295. if ((pa->pages_offset + len) > pa->pages_len) {
  296. /* This data descriptor is responsible for unmapping page(s) */
  297. bd->pa_unmap = *pa;
  298. /* Get a new allocation next time */
  299. pa->pages = NULL;
  300. pa->pages_len = 0;
  301. pa->pages_offset = 0;
  302. pa->pages_dma = 0;
  303. }
  304. }
  305. static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
  306. struct xgbe_ring *ring,
  307. struct xgbe_ring_data *rdata)
  308. {
  309. int ret;
  310. if (!ring->rx_hdr_pa.pages) {
  311. ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, 0, ring->node);
  312. if (ret)
  313. return ret;
  314. }
  315. if (!ring->rx_buf_pa.pages) {
  316. ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa,
  317. PAGE_ALLOC_COSTLY_ORDER, ring->node);
  318. if (ret)
  319. return ret;
  320. }
  321. /* Set up the header page info */
  322. xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
  323. XGBE_SKB_ALLOC_SIZE);
  324. /* Set up the buffer page info */
  325. xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
  326. pdata->rx_buf_size);
  327. return 0;
  328. }
  329. static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
  330. {
  331. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  332. struct xgbe_channel *channel;
  333. struct xgbe_ring *ring;
  334. struct xgbe_ring_data *rdata;
  335. struct xgbe_ring_desc *rdesc;
  336. dma_addr_t rdesc_dma;
  337. unsigned int i, j;
  338. DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
  339. for (i = 0; i < pdata->channel_count; i++) {
  340. channel = pdata->channel[i];
  341. ring = channel->tx_ring;
  342. if (!ring)
  343. break;
  344. rdesc = ring->rdesc;
  345. rdesc_dma = ring->rdesc_dma;
  346. for (j = 0; j < ring->rdesc_count; j++) {
  347. rdata = XGBE_GET_DESC_DATA(ring, j);
  348. rdata->rdesc = rdesc;
  349. rdata->rdesc_dma = rdesc_dma;
  350. rdesc++;
  351. rdesc_dma += sizeof(struct xgbe_ring_desc);
  352. }
  353. ring->cur = 0;
  354. ring->dirty = 0;
  355. memset(&ring->tx, 0, sizeof(ring->tx));
  356. hw_if->tx_desc_init(channel);
  357. }
  358. DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
  359. }
  360. static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
  361. {
  362. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  363. struct xgbe_channel *channel;
  364. struct xgbe_ring *ring;
  365. struct xgbe_ring_desc *rdesc;
  366. struct xgbe_ring_data *rdata;
  367. dma_addr_t rdesc_dma;
  368. unsigned int i, j;
  369. DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
  370. for (i = 0; i < pdata->channel_count; i++) {
  371. channel = pdata->channel[i];
  372. ring = channel->rx_ring;
  373. if (!ring)
  374. break;
  375. rdesc = ring->rdesc;
  376. rdesc_dma = ring->rdesc_dma;
  377. for (j = 0; j < ring->rdesc_count; j++) {
  378. rdata = XGBE_GET_DESC_DATA(ring, j);
  379. rdata->rdesc = rdesc;
  380. rdata->rdesc_dma = rdesc_dma;
  381. if (xgbe_map_rx_buffer(pdata, ring, rdata))
  382. break;
  383. rdesc++;
  384. rdesc_dma += sizeof(struct xgbe_ring_desc);
  385. }
  386. ring->cur = 0;
  387. ring->dirty = 0;
  388. hw_if->rx_desc_init(channel);
  389. }
  390. DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
  391. }
  392. static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
  393. struct xgbe_ring_data *rdata)
  394. {
  395. if (rdata->skb_dma) {
  396. if (rdata->mapped_as_page) {
  397. dma_unmap_page(pdata->dev, rdata->skb_dma,
  398. rdata->skb_dma_len, DMA_TO_DEVICE);
  399. } else {
  400. dma_unmap_single(pdata->dev, rdata->skb_dma,
  401. rdata->skb_dma_len, DMA_TO_DEVICE);
  402. }
  403. rdata->skb_dma = 0;
  404. rdata->skb_dma_len = 0;
  405. }
  406. if (rdata->skb) {
  407. dev_kfree_skb_any(rdata->skb);
  408. rdata->skb = NULL;
  409. }
  410. if (rdata->rx.hdr.pa.pages)
  411. put_page(rdata->rx.hdr.pa.pages);
  412. if (rdata->rx.hdr.pa_unmap.pages) {
  413. dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma,
  414. rdata->rx.hdr.pa_unmap.pages_len,
  415. DMA_FROM_DEVICE);
  416. put_page(rdata->rx.hdr.pa_unmap.pages);
  417. }
  418. if (rdata->rx.buf.pa.pages)
  419. put_page(rdata->rx.buf.pa.pages);
  420. if (rdata->rx.buf.pa_unmap.pages) {
  421. dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
  422. rdata->rx.buf.pa_unmap.pages_len,
  423. DMA_FROM_DEVICE);
  424. put_page(rdata->rx.buf.pa_unmap.pages);
  425. }
  426. memset(&rdata->tx, 0, sizeof(rdata->tx));
  427. memset(&rdata->rx, 0, sizeof(rdata->rx));
  428. rdata->mapped_as_page = 0;
  429. if (rdata->state_saved) {
  430. rdata->state_saved = 0;
  431. rdata->state.skb = NULL;
  432. rdata->state.len = 0;
  433. rdata->state.error = 0;
  434. }
  435. }
  436. static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
  437. {
  438. struct xgbe_prv_data *pdata = channel->pdata;
  439. struct xgbe_ring *ring = channel->tx_ring;
  440. struct xgbe_ring_data *rdata;
  441. struct xgbe_packet_data *packet;
  442. struct skb_frag_struct *frag;
  443. dma_addr_t skb_dma;
  444. unsigned int start_index, cur_index;
  445. unsigned int offset, tso, vlan, datalen, len;
  446. unsigned int i;
  447. DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
  448. offset = 0;
  449. start_index = ring->cur;
  450. cur_index = ring->cur;
  451. packet = &ring->packet_data;
  452. packet->rdesc_count = 0;
  453. packet->length = 0;
  454. tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  455. TSO_ENABLE);
  456. vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  457. VLAN_CTAG);
  458. /* Save space for a context descriptor if needed */
  459. if ((tso && (packet->mss != ring->tx.cur_mss)) ||
  460. (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
  461. cur_index++;
  462. rdata = XGBE_GET_DESC_DATA(ring, cur_index);
  463. if (tso) {
  464. /* Map the TSO header */
  465. skb_dma = dma_map_single(pdata->dev, skb->data,
  466. packet->header_len, DMA_TO_DEVICE);
  467. if (dma_mapping_error(pdata->dev, skb_dma)) {
  468. netdev_alert(pdata->netdev, "dma_map_single failed\n");
  469. goto err_out;
  470. }
  471. rdata->skb_dma = skb_dma;
  472. rdata->skb_dma_len = packet->header_len;
  473. netif_dbg(pdata, tx_queued, pdata->netdev,
  474. "skb header: index=%u, dma=%pad, len=%u\n",
  475. cur_index, &skb_dma, packet->header_len);
  476. offset = packet->header_len;
  477. packet->length += packet->header_len;
  478. cur_index++;
  479. rdata = XGBE_GET_DESC_DATA(ring, cur_index);
  480. }
  481. /* Map the (remainder of the) packet */
  482. for (datalen = skb_headlen(skb) - offset; datalen; ) {
  483. len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
  484. skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
  485. DMA_TO_DEVICE);
  486. if (dma_mapping_error(pdata->dev, skb_dma)) {
  487. netdev_alert(pdata->netdev, "dma_map_single failed\n");
  488. goto err_out;
  489. }
  490. rdata->skb_dma = skb_dma;
  491. rdata->skb_dma_len = len;
  492. netif_dbg(pdata, tx_queued, pdata->netdev,
  493. "skb data: index=%u, dma=%pad, len=%u\n",
  494. cur_index, &skb_dma, len);
  495. datalen -= len;
  496. offset += len;
  497. packet->length += len;
  498. cur_index++;
  499. rdata = XGBE_GET_DESC_DATA(ring, cur_index);
  500. }
  501. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  502. netif_dbg(pdata, tx_queued, pdata->netdev,
  503. "mapping frag %u\n", i);
  504. frag = &skb_shinfo(skb)->frags[i];
  505. offset = 0;
  506. for (datalen = skb_frag_size(frag); datalen; ) {
  507. len = min_t(unsigned int, datalen,
  508. XGBE_TX_MAX_BUF_SIZE);
  509. skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
  510. len, DMA_TO_DEVICE);
  511. if (dma_mapping_error(pdata->dev, skb_dma)) {
  512. netdev_alert(pdata->netdev,
  513. "skb_frag_dma_map failed\n");
  514. goto err_out;
  515. }
  516. rdata->skb_dma = skb_dma;
  517. rdata->skb_dma_len = len;
  518. rdata->mapped_as_page = 1;
  519. netif_dbg(pdata, tx_queued, pdata->netdev,
  520. "skb frag: index=%u, dma=%pad, len=%u\n",
  521. cur_index, &skb_dma, len);
  522. datalen -= len;
  523. offset += len;
  524. packet->length += len;
  525. cur_index++;
  526. rdata = XGBE_GET_DESC_DATA(ring, cur_index);
  527. }
  528. }
  529. /* Save the skb address in the last entry. We always have some data
  530. * that has been mapped so rdata is always advanced past the last
  531. * piece of mapped data - use the entry pointed to by cur_index - 1.
  532. */
  533. rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
  534. rdata->skb = skb;
  535. /* Save the number of descriptor entries used */
  536. packet->rdesc_count = cur_index - start_index;
  537. DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
  538. return packet->rdesc_count;
  539. err_out:
  540. while (start_index < cur_index) {
  541. rdata = XGBE_GET_DESC_DATA(ring, start_index++);
  542. xgbe_unmap_rdata(pdata, rdata);
  543. }
  544. DBGPR("<--xgbe_map_tx_skb: count=0\n");
  545. return 0;
  546. }
  547. void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
  548. {
  549. DBGPR("-->xgbe_init_function_ptrs_desc\n");
  550. desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
  551. desc_if->free_ring_resources = xgbe_free_ring_resources;
  552. desc_if->map_tx_skb = xgbe_map_tx_skb;
  553. desc_if->map_rx_buffer = xgbe_map_rx_buffer;
  554. desc_if->unmap_rdata = xgbe_unmap_rdata;
  555. desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
  556. desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
  557. DBGPR("<--xgbe_init_function_ptrs_desc\n");
  558. }