bnxt_xdp.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. /* Broadcom NetXtreme-C/E network driver.
  2. *
  3. * Copyright (c) 2016-2017 Broadcom Limited
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/errno.h>
  11. #include <linux/pci.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/etherdevice.h>
  14. #include <linux/if_vlan.h>
  15. #include <linux/bpf.h>
  16. #include <linux/bpf_trace.h>
  17. #include <linux/filter.h>
  18. #include "bnxt_hsi.h"
  19. #include "bnxt.h"
  20. #include "bnxt_xdp.h"
  21. void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
  22. dma_addr_t mapping, u32 len, u16 rx_prod)
  23. {
  24. struct bnxt_sw_tx_bd *tx_buf;
  25. struct tx_bd *txbd;
  26. u32 flags;
  27. u16 prod;
  28. prod = txr->tx_prod;
  29. tx_buf = &txr->tx_buf_ring[prod];
  30. tx_buf->rx_prod = rx_prod;
  31. txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
  32. flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) |
  33. TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9];
  34. txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
  35. txbd->tx_bd_opaque = prod;
  36. txbd->tx_bd_haddr = cpu_to_le64(mapping);
  37. prod = NEXT_TX(prod);
  38. txr->tx_prod = prod;
  39. }
  40. void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
  41. {
  42. struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
  43. struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
  44. struct bnxt_sw_tx_bd *tx_buf;
  45. u16 tx_cons = txr->tx_cons;
  46. u16 last_tx_cons = tx_cons;
  47. u16 rx_prod;
  48. int i;
  49. for (i = 0; i < nr_pkts; i++) {
  50. last_tx_cons = tx_cons;
  51. tx_cons = NEXT_TX(tx_cons);
  52. }
  53. txr->tx_cons = tx_cons;
  54. if (bnxt_tx_avail(bp, txr) == bp->tx_ring_size) {
  55. rx_prod = rxr->rx_prod;
  56. } else {
  57. tx_buf = &txr->tx_buf_ring[last_tx_cons];
  58. rx_prod = tx_buf->rx_prod;
  59. }
  60. bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rx_prod);
  61. }
  62. /* returns the following:
  63. * true - packet consumed by XDP and new buffer is allocated.
  64. * false - packet should be passed to the stack.
  65. */
  66. bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
  67. struct page *page, u8 **data_ptr, unsigned int *len, u8 *event)
  68. {
  69. struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
  70. struct bnxt_tx_ring_info *txr;
  71. struct bnxt_sw_rx_bd *rx_buf;
  72. struct pci_dev *pdev;
  73. struct xdp_buff xdp;
  74. dma_addr_t mapping;
  75. void *orig_data;
  76. u32 tx_avail;
  77. u32 offset;
  78. u32 act;
  79. if (!xdp_prog)
  80. return false;
  81. pdev = bp->pdev;
  82. txr = rxr->bnapi->tx_ring;
  83. rx_buf = &rxr->rx_buf_ring[cons];
  84. offset = bp->rx_offset;
  85. xdp.data_hard_start = *data_ptr - offset;
  86. xdp.data = *data_ptr;
  87. xdp.data_end = *data_ptr + *len;
  88. orig_data = xdp.data;
  89. mapping = rx_buf->mapping - bp->rx_dma_offset;
  90. dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
  91. rcu_read_lock();
  92. act = bpf_prog_run_xdp(xdp_prog, &xdp);
  93. rcu_read_unlock();
  94. tx_avail = bnxt_tx_avail(bp, txr);
  95. /* If the tx ring is not full, we must not update the rx producer yet
  96. * because we may still be transmitting on some BDs.
  97. */
  98. if (tx_avail != bp->tx_ring_size)
  99. *event &= ~BNXT_RX_EVENT;
  100. if (orig_data != xdp.data) {
  101. offset = xdp.data - xdp.data_hard_start;
  102. *data_ptr = xdp.data_hard_start + offset;
  103. *len = xdp.data_end - xdp.data;
  104. }
  105. switch (act) {
  106. case XDP_PASS:
  107. return false;
  108. case XDP_TX:
  109. if (tx_avail < 1) {
  110. trace_xdp_exception(bp->dev, xdp_prog, act);
  111. bnxt_reuse_rx_data(rxr, cons, page);
  112. return true;
  113. }
  114. *event = BNXT_TX_EVENT;
  115. dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
  116. bp->rx_dir);
  117. bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
  118. NEXT_RX(rxr->rx_prod));
  119. bnxt_reuse_rx_data(rxr, cons, page);
  120. return true;
  121. default:
  122. bpf_warn_invalid_xdp_action(act);
  123. /* Fall thru */
  124. case XDP_ABORTED:
  125. trace_xdp_exception(bp->dev, xdp_prog, act);
  126. /* Fall thru */
  127. case XDP_DROP:
  128. bnxt_reuse_rx_data(rxr, cons, page);
  129. break;
  130. }
  131. return true;
  132. }
  133. /* Under rtnl_lock */
  134. static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
  135. {
  136. struct net_device *dev = bp->dev;
  137. int tx_xdp = 0, rc, tc;
  138. struct bpf_prog *old;
  139. if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
  140. netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n",
  141. bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
  142. return -EOPNOTSUPP;
  143. }
  144. if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
  145. netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
  146. return -EOPNOTSUPP;
  147. }
  148. if (prog)
  149. tx_xdp = bp->rx_nr_rings;
  150. tc = netdev_get_num_tc(dev);
  151. if (!tc)
  152. tc = 1;
  153. rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
  154. true, tc, tx_xdp);
  155. if (rc) {
  156. netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n");
  157. return rc;
  158. }
  159. if (netif_running(dev))
  160. bnxt_close_nic(bp, true, false);
  161. old = xchg(&bp->xdp_prog, prog);
  162. if (old)
  163. bpf_prog_put(old);
  164. if (prog) {
  165. bnxt_set_rx_skb_mode(bp, true);
  166. } else {
  167. int rx, tx;
  168. bnxt_set_rx_skb_mode(bp, false);
  169. bnxt_get_max_rings(bp, &rx, &tx, true);
  170. if (rx > 1) {
  171. bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
  172. bp->dev->hw_features |= NETIF_F_LRO;
  173. }
  174. }
  175. bp->tx_nr_rings_xdp = tx_xdp;
  176. bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
  177. bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
  178. bp->num_stat_ctxs = bp->cp_nr_rings;
  179. bnxt_set_tpa_flags(bp);
  180. bnxt_set_ring_params(bp);
  181. if (netif_running(dev))
  182. return bnxt_open_nic(bp, true, false);
  183. return 0;
  184. }
  185. int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp)
  186. {
  187. struct bnxt *bp = netdev_priv(dev);
  188. int rc;
  189. switch (xdp->command) {
  190. case XDP_SETUP_PROG:
  191. rc = bnxt_xdp_set(bp, xdp->prog);
  192. break;
  193. case XDP_QUERY_PROG:
  194. xdp->prog_attached = !!bp->xdp_prog;
  195. xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0;
  196. rc = 0;
  197. break;
  198. default:
  199. rc = -EINVAL;
  200. break;
  201. }
  202. return rc;
  203. }