bnxt_xdp.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. /* Broadcom NetXtreme-C/E network driver.
  2. *
  3. * Copyright (c) 2016-2017 Broadcom Limited
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/errno.h>
  11. #include <linux/pci.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/etherdevice.h>
  14. #include <linux/if_vlan.h>
  15. #include <linux/bpf.h>
  16. #include <linux/bpf_trace.h>
  17. #include <linux/filter.h>
  18. #include "bnxt_hsi.h"
  19. #include "bnxt.h"
  20. #include "bnxt_xdp.h"
  21. static void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
  22. dma_addr_t mapping, u32 len, u16 rx_prod)
  23. {
  24. struct bnxt_sw_tx_bd *tx_buf;
  25. struct tx_bd_ext *txbd1;
  26. struct tx_bd *txbd;
  27. u32 flags;
  28. u16 prod;
  29. prod = txr->tx_prod;
  30. tx_buf = &txr->tx_buf_ring[prod];
  31. tx_buf->rx_prod = rx_prod;
  32. txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
  33. flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
  34. (2 << TX_BD_FLAGS_BD_CNT_SHIFT) | TX_BD_FLAGS_COAL_NOW |
  35. TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9];
  36. txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
  37. txbd->tx_bd_opaque = prod;
  38. txbd->tx_bd_haddr = cpu_to_le64(mapping);
  39. prod = NEXT_TX(prod);
  40. txbd1 = (struct tx_bd_ext *)
  41. &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
  42. txbd1->tx_bd_hsize_lflags = cpu_to_le32(0);
  43. txbd1->tx_bd_mss = cpu_to_le32(0);
  44. txbd1->tx_bd_cfa_action = cpu_to_le32(0);
  45. txbd1->tx_bd_cfa_meta = cpu_to_le32(0);
  46. prod = NEXT_TX(prod);
  47. txr->tx_prod = prod;
  48. }
  49. void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
  50. {
  51. struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
  52. struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
  53. struct bnxt_sw_tx_bd *tx_buf;
  54. u16 tx_cons = txr->tx_cons;
  55. u16 last_tx_cons = tx_cons;
  56. u16 rx_prod;
  57. int i;
  58. for (i = 0; i < nr_pkts; i++) {
  59. last_tx_cons = tx_cons;
  60. tx_cons = NEXT_TX(tx_cons);
  61. tx_cons = NEXT_TX(tx_cons);
  62. }
  63. txr->tx_cons = tx_cons;
  64. if (bnxt_tx_avail(bp, txr) == bp->tx_ring_size) {
  65. rx_prod = rxr->rx_prod;
  66. } else {
  67. tx_buf = &txr->tx_buf_ring[last_tx_cons];
  68. rx_prod = tx_buf->rx_prod;
  69. }
  70. writel(DB_KEY_RX | rx_prod, rxr->rx_doorbell);
  71. }
  72. /* returns the following:
  73. * true - packet consumed by XDP and new buffer is allocated.
  74. * false - packet should be passed to the stack.
  75. */
  76. bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
  77. struct page *page, u8 **data_ptr, unsigned int *len, u8 *event)
  78. {
  79. struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
  80. struct bnxt_tx_ring_info *txr;
  81. struct bnxt_sw_rx_bd *rx_buf;
  82. struct pci_dev *pdev;
  83. struct xdp_buff xdp;
  84. dma_addr_t mapping;
  85. void *orig_data;
  86. u32 tx_avail;
  87. u32 offset;
  88. u32 act;
  89. if (!xdp_prog)
  90. return false;
  91. pdev = bp->pdev;
  92. txr = rxr->bnapi->tx_ring;
  93. rx_buf = &rxr->rx_buf_ring[cons];
  94. offset = bp->rx_offset;
  95. xdp.data_hard_start = *data_ptr - offset;
  96. xdp.data = *data_ptr;
  97. xdp.data_end = *data_ptr + *len;
  98. orig_data = xdp.data;
  99. mapping = rx_buf->mapping - bp->rx_dma_offset;
  100. dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
  101. rcu_read_lock();
  102. act = bpf_prog_run_xdp(xdp_prog, &xdp);
  103. rcu_read_unlock();
  104. tx_avail = bnxt_tx_avail(bp, txr);
  105. /* If the tx ring is not full, we must not update the rx producer yet
  106. * because we may still be transmitting on some BDs.
  107. */
  108. if (tx_avail != bp->tx_ring_size)
  109. *event &= ~BNXT_RX_EVENT;
  110. if (orig_data != xdp.data) {
  111. offset = xdp.data - xdp.data_hard_start;
  112. *data_ptr = xdp.data_hard_start + offset;
  113. *len = xdp.data_end - xdp.data;
  114. }
  115. switch (act) {
  116. case XDP_PASS:
  117. return false;
  118. case XDP_TX:
  119. if (tx_avail < 2) {
  120. trace_xdp_exception(bp->dev, xdp_prog, act);
  121. bnxt_reuse_rx_data(rxr, cons, page);
  122. return true;
  123. }
  124. *event = BNXT_TX_EVENT;
  125. dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
  126. bp->rx_dir);
  127. bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
  128. NEXT_RX(rxr->rx_prod));
  129. bnxt_reuse_rx_data(rxr, cons, page);
  130. return true;
  131. default:
  132. bpf_warn_invalid_xdp_action(act);
  133. /* Fall thru */
  134. case XDP_ABORTED:
  135. trace_xdp_exception(bp->dev, xdp_prog, act);
  136. /* Fall thru */
  137. case XDP_DROP:
  138. bnxt_reuse_rx_data(rxr, cons, page);
  139. break;
  140. }
  141. return true;
  142. }
  143. /* Under rtnl_lock */
  144. static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
  145. {
  146. struct net_device *dev = bp->dev;
  147. int tx_xdp = 0, rc, tc;
  148. struct bpf_prog *old;
  149. if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
  150. netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n",
  151. bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
  152. return -EOPNOTSUPP;
  153. }
  154. if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
  155. netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
  156. return -EOPNOTSUPP;
  157. }
  158. if (prog)
  159. tx_xdp = bp->rx_nr_rings;
  160. tc = netdev_get_num_tc(dev);
  161. if (!tc)
  162. tc = 1;
  163. rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
  164. tc, tx_xdp);
  165. if (rc) {
  166. netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n");
  167. return rc;
  168. }
  169. if (netif_running(dev))
  170. bnxt_close_nic(bp, true, false);
  171. old = xchg(&bp->xdp_prog, prog);
  172. if (old)
  173. bpf_prog_put(old);
  174. if (prog) {
  175. bnxt_set_rx_skb_mode(bp, true);
  176. } else {
  177. int rx, tx;
  178. bnxt_set_rx_skb_mode(bp, false);
  179. bnxt_get_max_rings(bp, &rx, &tx, true);
  180. if (rx > 1) {
  181. bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
  182. bp->dev->hw_features |= NETIF_F_LRO;
  183. }
  184. }
  185. bp->tx_nr_rings_xdp = tx_xdp;
  186. bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
  187. bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
  188. bp->num_stat_ctxs = bp->cp_nr_rings;
  189. bnxt_set_tpa_flags(bp);
  190. bnxt_set_ring_params(bp);
  191. if (netif_running(dev))
  192. return bnxt_open_nic(bp, true, false);
  193. return 0;
  194. }
  195. int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp)
  196. {
  197. struct bnxt *bp = netdev_priv(dev);
  198. int rc;
  199. switch (xdp->command) {
  200. case XDP_SETUP_PROG:
  201. rc = bnxt_xdp_set(bp, xdp->prog);
  202. break;
  203. case XDP_QUERY_PROG:
  204. xdp->prog_attached = !!bp->xdp_prog;
  205. rc = 0;
  206. break;
  207. default:
  208. rc = -EINVAL;
  209. break;
  210. }
  211. return rc;
  212. }