xdpsock_user.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2017 - 2018 Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. */
  13. #include <assert.h>
  14. #include <errno.h>
  15. #include <getopt.h>
  16. #include <libgen.h>
  17. #include <linux/bpf.h>
  18. #include <linux/if_link.h>
  19. #include <linux/if_xdp.h>
  20. #include <linux/if_ether.h>
  21. #include <net/if.h>
  22. #include <signal.h>
  23. #include <stdbool.h>
  24. #include <stdio.h>
  25. #include <stdlib.h>
  26. #include <string.h>
  27. #include <net/ethernet.h>
  28. #include <sys/resource.h>
  29. #include <sys/socket.h>
  30. #include <sys/mman.h>
  31. #include <time.h>
  32. #include <unistd.h>
  33. #include <pthread.h>
  34. #include <locale.h>
  35. #include <sys/types.h>
  36. #include <poll.h>
  37. #include "bpf_load.h"
  38. #include "bpf_util.h"
  39. #include "libbpf.h"
  40. #include "xdpsock.h"
  41. #ifndef SOL_XDP
  42. #define SOL_XDP 283
  43. #endif
  44. #ifndef AF_XDP
  45. #define AF_XDP 44
  46. #endif
  47. #ifndef PF_XDP
  48. #define PF_XDP AF_XDP
  49. #endif
  50. #define NUM_FRAMES 131072
  51. #define FRAME_HEADROOM 0
  52. #define FRAME_SIZE 2048
  53. #define NUM_DESCS 1024
  54. #define BATCH_SIZE 16
  55. #define FQ_NUM_DESCS 1024
  56. #define CQ_NUM_DESCS 1024
  57. #define DEBUG_HEXDUMP 0
  58. typedef __u32 u32;
  59. static unsigned long prev_time;
  60. enum benchmark_type {
  61. BENCH_RXDROP = 0,
  62. BENCH_TXONLY = 1,
  63. BENCH_L2FWD = 2,
  64. };
  65. static enum benchmark_type opt_bench = BENCH_RXDROP;
  66. static u32 opt_xdp_flags;
  67. static const char *opt_if = "";
  68. static int opt_ifindex;
  69. static int opt_queue;
  70. static int opt_poll;
  71. static int opt_shared_packet_buffer;
  72. static int opt_interval = 1;
  73. struct xdp_umem_uqueue {
  74. u32 cached_prod;
  75. u32 cached_cons;
  76. u32 mask;
  77. u32 size;
  78. struct xdp_umem_ring *ring;
  79. };
  80. struct xdp_umem {
  81. char (*frames)[FRAME_SIZE];
  82. struct xdp_umem_uqueue fq;
  83. struct xdp_umem_uqueue cq;
  84. int fd;
  85. };
  86. struct xdp_uqueue {
  87. u32 cached_prod;
  88. u32 cached_cons;
  89. u32 mask;
  90. u32 size;
  91. struct xdp_rxtx_ring *ring;
  92. };
  93. struct xdpsock {
  94. struct xdp_uqueue rx;
  95. struct xdp_uqueue tx;
  96. int sfd;
  97. struct xdp_umem *umem;
  98. u32 outstanding_tx;
  99. unsigned long rx_npkts;
  100. unsigned long tx_npkts;
  101. unsigned long prev_rx_npkts;
  102. unsigned long prev_tx_npkts;
  103. };
  104. #define MAX_SOCKS 4
  105. static int num_socks;
  106. struct xdpsock *xsks[MAX_SOCKS];
  107. static unsigned long get_nsecs(void)
  108. {
  109. struct timespec ts;
  110. clock_gettime(CLOCK_MONOTONIC, &ts);
  111. return ts.tv_sec * 1000000000UL + ts.tv_nsec;
  112. }
  113. static void dump_stats(void);
  114. #define lassert(expr) \
  115. do { \
  116. if (!(expr)) { \
  117. fprintf(stderr, "%s:%s:%i: Assertion failed: " \
  118. #expr ": errno: %d/\"%s\"\n", \
  119. __FILE__, __func__, __LINE__, \
  120. errno, strerror(errno)); \
  121. dump_stats(); \
  122. exit(EXIT_FAILURE); \
  123. } \
  124. } while (0)
  125. #define barrier() __asm__ __volatile__("": : :"memory")
  126. #define u_smp_rmb() barrier()
  127. #define u_smp_wmb() barrier()
  128. #define likely(x) __builtin_expect(!!(x), 1)
  129. #define unlikely(x) __builtin_expect(!!(x), 0)
  130. static const char pkt_data[] =
  131. "\x3c\xfd\xfe\x9e\x7f\x71\xec\xb1\xd7\x98\x3a\xc0\x08\x00\x45\x00"
  132. "\x00\x2e\x00\x00\x00\x00\x40\x11\x88\x97\x05\x08\x07\x08\xc8\x14"
  133. "\x1e\x04\x10\x92\x10\x92\x00\x1a\x6d\xa3\x34\x33\x1f\x69\x40\x6b"
  134. "\x54\x59\xb6\x14\x2d\x11\x44\xbf\xaf\xd9\xbe\xaa";
  135. static inline u32 umem_nb_free(struct xdp_umem_uqueue *q, u32 nb)
  136. {
  137. u32 free_entries = q->size - (q->cached_prod - q->cached_cons);
  138. if (free_entries >= nb)
  139. return free_entries;
  140. /* Refresh the local tail pointer */
  141. q->cached_cons = q->ring->ptrs.consumer;
  142. return q->size - (q->cached_prod - q->cached_cons);
  143. }
  144. static inline u32 xq_nb_free(struct xdp_uqueue *q, u32 ndescs)
  145. {
  146. u32 free_entries = q->cached_cons - q->cached_prod;
  147. if (free_entries >= ndescs)
  148. return free_entries;
  149. /* Refresh the local tail pointer */
  150. q->cached_cons = q->ring->ptrs.consumer + q->size;
  151. return q->cached_cons - q->cached_prod;
  152. }
  153. static inline u32 umem_nb_avail(struct xdp_umem_uqueue *q, u32 nb)
  154. {
  155. u32 entries = q->cached_prod - q->cached_cons;
  156. if (entries == 0) {
  157. q->cached_prod = q->ring->ptrs.producer;
  158. entries = q->cached_prod - q->cached_cons;
  159. }
  160. return (entries > nb) ? nb : entries;
  161. }
  162. static inline u32 xq_nb_avail(struct xdp_uqueue *q, u32 ndescs)
  163. {
  164. u32 entries = q->cached_prod - q->cached_cons;
  165. if (entries == 0) {
  166. q->cached_prod = q->ring->ptrs.producer;
  167. entries = q->cached_prod - q->cached_cons;
  168. }
  169. return (entries > ndescs) ? ndescs : entries;
  170. }
  171. static inline int umem_fill_to_kernel_ex(struct xdp_umem_uqueue *fq,
  172. struct xdp_desc *d,
  173. size_t nb)
  174. {
  175. u32 i;
  176. if (umem_nb_free(fq, nb) < nb)
  177. return -ENOSPC;
  178. for (i = 0; i < nb; i++) {
  179. u32 idx = fq->cached_prod++ & fq->mask;
  180. fq->ring->desc[idx] = d[i].idx;
  181. }
  182. u_smp_wmb();
  183. fq->ring->ptrs.producer = fq->cached_prod;
  184. return 0;
  185. }
  186. static inline int umem_fill_to_kernel(struct xdp_umem_uqueue *fq, u32 *d,
  187. size_t nb)
  188. {
  189. u32 i;
  190. if (umem_nb_free(fq, nb) < nb)
  191. return -ENOSPC;
  192. for (i = 0; i < nb; i++) {
  193. u32 idx = fq->cached_prod++ & fq->mask;
  194. fq->ring->desc[idx] = d[i];
  195. }
  196. u_smp_wmb();
  197. fq->ring->ptrs.producer = fq->cached_prod;
  198. return 0;
  199. }
  200. static inline size_t umem_complete_from_kernel(struct xdp_umem_uqueue *cq,
  201. u32 *d, size_t nb)
  202. {
  203. u32 idx, i, entries = umem_nb_avail(cq, nb);
  204. u_smp_rmb();
  205. for (i = 0; i < entries; i++) {
  206. idx = cq->cached_cons++ & cq->mask;
  207. d[i] = cq->ring->desc[idx];
  208. }
  209. if (entries > 0) {
  210. u_smp_wmb();
  211. cq->ring->ptrs.consumer = cq->cached_cons;
  212. }
  213. return entries;
  214. }
  215. static inline void *xq_get_data(struct xdpsock *xsk, __u32 idx, __u32 off)
  216. {
  217. lassert(idx < NUM_FRAMES);
  218. return &xsk->umem->frames[idx][off];
  219. }
  220. static inline int xq_enq(struct xdp_uqueue *uq,
  221. const struct xdp_desc *descs,
  222. unsigned int ndescs)
  223. {
  224. struct xdp_rxtx_ring *r = uq->ring;
  225. unsigned int i;
  226. if (xq_nb_free(uq, ndescs) < ndescs)
  227. return -ENOSPC;
  228. for (i = 0; i < ndescs; i++) {
  229. u32 idx = uq->cached_prod++ & uq->mask;
  230. r->desc[idx].idx = descs[i].idx;
  231. r->desc[idx].len = descs[i].len;
  232. r->desc[idx].offset = descs[i].offset;
  233. }
  234. u_smp_wmb();
  235. r->ptrs.producer = uq->cached_prod;
  236. return 0;
  237. }
  238. static inline int xq_enq_tx_only(struct xdp_uqueue *uq,
  239. __u32 idx, unsigned int ndescs)
  240. {
  241. struct xdp_rxtx_ring *q = uq->ring;
  242. unsigned int i;
  243. if (xq_nb_free(uq, ndescs) < ndescs)
  244. return -ENOSPC;
  245. for (i = 0; i < ndescs; i++) {
  246. u32 idx = uq->cached_prod++ & uq->mask;
  247. q->desc[idx].idx = idx + i;
  248. q->desc[idx].len = sizeof(pkt_data) - 1;
  249. q->desc[idx].offset = 0;
  250. }
  251. u_smp_wmb();
  252. q->ptrs.producer = uq->cached_prod;
  253. return 0;
  254. }
  255. static inline int xq_deq(struct xdp_uqueue *uq,
  256. struct xdp_desc *descs,
  257. int ndescs)
  258. {
  259. struct xdp_rxtx_ring *r = uq->ring;
  260. unsigned int idx;
  261. int i, entries;
  262. entries = xq_nb_avail(uq, ndescs);
  263. u_smp_rmb();
  264. for (i = 0; i < entries; i++) {
  265. idx = uq->cached_cons++ & uq->mask;
  266. descs[i] = r->desc[idx];
  267. }
  268. if (entries > 0) {
  269. u_smp_wmb();
  270. r->ptrs.consumer = uq->cached_cons;
  271. }
  272. return entries;
  273. }
  274. static void swap_mac_addresses(void *data)
  275. {
  276. struct ether_header *eth = (struct ether_header *)data;
  277. struct ether_addr *src_addr = (struct ether_addr *)&eth->ether_shost;
  278. struct ether_addr *dst_addr = (struct ether_addr *)&eth->ether_dhost;
  279. struct ether_addr tmp;
  280. tmp = *src_addr;
  281. *src_addr = *dst_addr;
  282. *dst_addr = tmp;
  283. }
  284. #if DEBUG_HEXDUMP
  285. static void hex_dump(void *pkt, size_t length, const char *prefix)
  286. {
  287. int i = 0;
  288. const unsigned char *address = (unsigned char *)pkt;
  289. const unsigned char *line = address;
  290. size_t line_size = 32;
  291. unsigned char c;
  292. printf("length = %zu\n", length);
  293. printf("%s | ", prefix);
  294. while (length-- > 0) {
  295. printf("%02X ", *address++);
  296. if (!(++i % line_size) || (length == 0 && i % line_size)) {
  297. if (length == 0) {
  298. while (i++ % line_size)
  299. printf("__ ");
  300. }
  301. printf(" | "); /* right close */
  302. while (line < address) {
  303. c = *line++;
  304. printf("%c", (c < 33 || c == 255) ? 0x2E : c);
  305. }
  306. printf("\n");
  307. if (length > 0)
  308. printf("%s | ", prefix);
  309. }
  310. }
  311. printf("\n");
  312. }
  313. #endif
  314. static size_t gen_eth_frame(char *frame)
  315. {
  316. memcpy(frame, pkt_data, sizeof(pkt_data) - 1);
  317. return sizeof(pkt_data) - 1;
  318. }
  319. static struct xdp_umem *xdp_umem_configure(int sfd)
  320. {
  321. int fq_size = FQ_NUM_DESCS, cq_size = CQ_NUM_DESCS;
  322. struct xdp_umem_reg mr;
  323. struct xdp_umem *umem;
  324. void *bufs;
  325. umem = calloc(1, sizeof(*umem));
  326. lassert(umem);
  327. lassert(posix_memalign(&bufs, getpagesize(), /* PAGE_SIZE aligned */
  328. NUM_FRAMES * FRAME_SIZE) == 0);
  329. mr.addr = (__u64)bufs;
  330. mr.len = NUM_FRAMES * FRAME_SIZE;
  331. mr.frame_size = FRAME_SIZE;
  332. mr.frame_headroom = FRAME_HEADROOM;
  333. lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr)) == 0);
  334. lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_FILL_RING, &fq_size,
  335. sizeof(int)) == 0);
  336. lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_COMPLETION_RING, &cq_size,
  337. sizeof(int)) == 0);
  338. umem->fq.ring = mmap(0, sizeof(struct xdp_umem_ring) +
  339. FQ_NUM_DESCS * sizeof(u32),
  340. PROT_READ | PROT_WRITE,
  341. MAP_SHARED | MAP_POPULATE, sfd,
  342. XDP_UMEM_PGOFF_FILL_RING);
  343. lassert(umem->fq.ring != MAP_FAILED);
  344. umem->fq.mask = FQ_NUM_DESCS - 1;
  345. umem->fq.size = FQ_NUM_DESCS;
  346. umem->cq.ring = mmap(0, sizeof(struct xdp_umem_ring) +
  347. CQ_NUM_DESCS * sizeof(u32),
  348. PROT_READ | PROT_WRITE,
  349. MAP_SHARED | MAP_POPULATE, sfd,
  350. XDP_UMEM_PGOFF_COMPLETION_RING);
  351. lassert(umem->cq.ring != MAP_FAILED);
  352. umem->cq.mask = CQ_NUM_DESCS - 1;
  353. umem->cq.size = CQ_NUM_DESCS;
  354. umem->frames = (char (*)[FRAME_SIZE])bufs;
  355. umem->fd = sfd;
  356. if (opt_bench == BENCH_TXONLY) {
  357. int i;
  358. for (i = 0; i < NUM_FRAMES; i++)
  359. (void)gen_eth_frame(&umem->frames[i][0]);
  360. }
  361. return umem;
  362. }
  363. static struct xdpsock *xsk_configure(struct xdp_umem *umem)
  364. {
  365. struct sockaddr_xdp sxdp = {};
  366. int sfd, ndescs = NUM_DESCS;
  367. struct xdpsock *xsk;
  368. bool shared = true;
  369. u32 i;
  370. sfd = socket(PF_XDP, SOCK_RAW, 0);
  371. lassert(sfd >= 0);
  372. xsk = calloc(1, sizeof(*xsk));
  373. lassert(xsk);
  374. xsk->sfd = sfd;
  375. xsk->outstanding_tx = 0;
  376. if (!umem) {
  377. shared = false;
  378. xsk->umem = xdp_umem_configure(sfd);
  379. } else {
  380. xsk->umem = umem;
  381. }
  382. lassert(setsockopt(sfd, SOL_XDP, XDP_RX_RING,
  383. &ndescs, sizeof(int)) == 0);
  384. lassert(setsockopt(sfd, SOL_XDP, XDP_TX_RING,
  385. &ndescs, sizeof(int)) == 0);
  386. /* Rx */
  387. xsk->rx.ring = mmap(NULL,
  388. sizeof(struct xdp_ring) +
  389. NUM_DESCS * sizeof(struct xdp_desc),
  390. PROT_READ | PROT_WRITE,
  391. MAP_SHARED | MAP_POPULATE, sfd,
  392. XDP_PGOFF_RX_RING);
  393. lassert(xsk->rx.ring != MAP_FAILED);
  394. if (!shared) {
  395. for (i = 0; i < NUM_DESCS / 2; i++)
  396. lassert(umem_fill_to_kernel(&xsk->umem->fq, &i, 1)
  397. == 0);
  398. }
  399. /* Tx */
  400. xsk->tx.ring = mmap(NULL,
  401. sizeof(struct xdp_ring) +
  402. NUM_DESCS * sizeof(struct xdp_desc),
  403. PROT_READ | PROT_WRITE,
  404. MAP_SHARED | MAP_POPULATE, sfd,
  405. XDP_PGOFF_TX_RING);
  406. lassert(xsk->tx.ring != MAP_FAILED);
  407. xsk->rx.mask = NUM_DESCS - 1;
  408. xsk->rx.size = NUM_DESCS;
  409. xsk->tx.mask = NUM_DESCS - 1;
  410. xsk->tx.size = NUM_DESCS;
  411. sxdp.sxdp_family = PF_XDP;
  412. sxdp.sxdp_ifindex = opt_ifindex;
  413. sxdp.sxdp_queue_id = opt_queue;
  414. if (shared) {
  415. sxdp.sxdp_flags = XDP_SHARED_UMEM;
  416. sxdp.sxdp_shared_umem_fd = umem->fd;
  417. }
  418. lassert(bind(sfd, (struct sockaddr *)&sxdp, sizeof(sxdp)) == 0);
  419. return xsk;
  420. }
  421. static void print_benchmark(bool running)
  422. {
  423. const char *bench_str = "INVALID";
  424. if (opt_bench == BENCH_RXDROP)
  425. bench_str = "rxdrop";
  426. else if (opt_bench == BENCH_TXONLY)
  427. bench_str = "txonly";
  428. else if (opt_bench == BENCH_L2FWD)
  429. bench_str = "l2fwd";
  430. printf("%s:%d %s ", opt_if, opt_queue, bench_str);
  431. if (opt_xdp_flags & XDP_FLAGS_SKB_MODE)
  432. printf("xdp-skb ");
  433. else if (opt_xdp_flags & XDP_FLAGS_DRV_MODE)
  434. printf("xdp-drv ");
  435. else
  436. printf(" ");
  437. if (opt_poll)
  438. printf("poll() ");
  439. if (running) {
  440. printf("running...");
  441. fflush(stdout);
  442. }
  443. }
  444. static void dump_stats(void)
  445. {
  446. unsigned long now = get_nsecs();
  447. long dt = now - prev_time;
  448. int i;
  449. prev_time = now;
  450. for (i = 0; i < num_socks; i++) {
  451. char *fmt = "%-15s %'-11.0f %'-11lu\n";
  452. double rx_pps, tx_pps;
  453. rx_pps = (xsks[i]->rx_npkts - xsks[i]->prev_rx_npkts) *
  454. 1000000000. / dt;
  455. tx_pps = (xsks[i]->tx_npkts - xsks[i]->prev_tx_npkts) *
  456. 1000000000. / dt;
  457. printf("\n sock%d@", i);
  458. print_benchmark(false);
  459. printf("\n");
  460. printf("%-15s %-11s %-11s %-11.2f\n", "", "pps", "pkts",
  461. dt / 1000000000.);
  462. printf(fmt, "rx", rx_pps, xsks[i]->rx_npkts);
  463. printf(fmt, "tx", tx_pps, xsks[i]->tx_npkts);
  464. xsks[i]->prev_rx_npkts = xsks[i]->rx_npkts;
  465. xsks[i]->prev_tx_npkts = xsks[i]->tx_npkts;
  466. }
  467. }
  468. static void *poller(void *arg)
  469. {
  470. (void)arg;
  471. for (;;) {
  472. sleep(opt_interval);
  473. dump_stats();
  474. }
  475. return NULL;
  476. }
  477. static void int_exit(int sig)
  478. {
  479. (void)sig;
  480. dump_stats();
  481. bpf_set_link_xdp_fd(opt_ifindex, -1, opt_xdp_flags);
  482. exit(EXIT_SUCCESS);
  483. }
  484. static struct option long_options[] = {
  485. {"rxdrop", no_argument, 0, 'r'},
  486. {"txonly", no_argument, 0, 't'},
  487. {"l2fwd", no_argument, 0, 'l'},
  488. {"interface", required_argument, 0, 'i'},
  489. {"queue", required_argument, 0, 'q'},
  490. {"poll", no_argument, 0, 'p'},
  491. {"shared-buffer", no_argument, 0, 's'},
  492. {"xdp-skb", no_argument, 0, 'S'},
  493. {"xdp-native", no_argument, 0, 'N'},
  494. {"interval", required_argument, 0, 'n'},
  495. {0, 0, 0, 0}
  496. };
  497. static void usage(const char *prog)
  498. {
  499. const char *str =
  500. " Usage: %s [OPTIONS]\n"
  501. " Options:\n"
  502. " -r, --rxdrop Discard all incoming packets (default)\n"
  503. " -t, --txonly Only send packets\n"
  504. " -l, --l2fwd MAC swap L2 forwarding\n"
  505. " -i, --interface=n Run on interface n\n"
  506. " -q, --queue=n Use queue n (default 0)\n"
  507. " -p, --poll Use poll syscall\n"
  508. " -s, --shared-buffer Use shared packet buffer\n"
  509. " -S, --xdp-skb=n Use XDP skb-mod\n"
  510. " -N, --xdp-native=n Enfore XDP native mode\n"
  511. " -n, --interval=n Specify statistics update interval (default 1 sec).\n"
  512. "\n";
  513. fprintf(stderr, str, prog);
  514. exit(EXIT_FAILURE);
  515. }
  516. static void parse_command_line(int argc, char **argv)
  517. {
  518. int option_index, c;
  519. opterr = 0;
  520. for (;;) {
  521. c = getopt_long(argc, argv, "rtli:q:psSNn:", long_options,
  522. &option_index);
  523. if (c == -1)
  524. break;
  525. switch (c) {
  526. case 'r':
  527. opt_bench = BENCH_RXDROP;
  528. break;
  529. case 't':
  530. opt_bench = BENCH_TXONLY;
  531. break;
  532. case 'l':
  533. opt_bench = BENCH_L2FWD;
  534. break;
  535. case 'i':
  536. opt_if = optarg;
  537. break;
  538. case 'q':
  539. opt_queue = atoi(optarg);
  540. break;
  541. case 's':
  542. opt_shared_packet_buffer = 1;
  543. break;
  544. case 'p':
  545. opt_poll = 1;
  546. break;
  547. case 'S':
  548. opt_xdp_flags |= XDP_FLAGS_SKB_MODE;
  549. break;
  550. case 'N':
  551. opt_xdp_flags |= XDP_FLAGS_DRV_MODE;
  552. break;
  553. case 'n':
  554. opt_interval = atoi(optarg);
  555. break;
  556. default:
  557. usage(basename(argv[0]));
  558. }
  559. }
  560. opt_ifindex = if_nametoindex(opt_if);
  561. if (!opt_ifindex) {
  562. fprintf(stderr, "ERROR: interface \"%s\" does not exist\n",
  563. opt_if);
  564. usage(basename(argv[0]));
  565. }
  566. }
  567. static void kick_tx(int fd)
  568. {
  569. int ret;
  570. ret = sendto(fd, NULL, 0, MSG_DONTWAIT, NULL, 0);
  571. if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN)
  572. return;
  573. lassert(0);
  574. }
  575. static inline void complete_tx_l2fwd(struct xdpsock *xsk)
  576. {
  577. u32 descs[BATCH_SIZE];
  578. unsigned int rcvd;
  579. size_t ndescs;
  580. if (!xsk->outstanding_tx)
  581. return;
  582. kick_tx(xsk->sfd);
  583. ndescs = (xsk->outstanding_tx > BATCH_SIZE) ? BATCH_SIZE :
  584. xsk->outstanding_tx;
  585. /* re-add completed Tx buffers */
  586. rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, ndescs);
  587. if (rcvd > 0) {
  588. umem_fill_to_kernel(&xsk->umem->fq, descs, rcvd);
  589. xsk->outstanding_tx -= rcvd;
  590. xsk->tx_npkts += rcvd;
  591. }
  592. }
  593. static inline void complete_tx_only(struct xdpsock *xsk)
  594. {
  595. u32 descs[BATCH_SIZE];
  596. unsigned int rcvd;
  597. if (!xsk->outstanding_tx)
  598. return;
  599. kick_tx(xsk->sfd);
  600. rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, BATCH_SIZE);
  601. if (rcvd > 0) {
  602. xsk->outstanding_tx -= rcvd;
  603. xsk->tx_npkts += rcvd;
  604. }
  605. }
  606. static void rx_drop(struct xdpsock *xsk)
  607. {
  608. struct xdp_desc descs[BATCH_SIZE];
  609. unsigned int rcvd, i;
  610. rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
  611. if (!rcvd)
  612. return;
  613. for (i = 0; i < rcvd; i++) {
  614. u32 idx = descs[i].idx;
  615. lassert(idx < NUM_FRAMES);
  616. #if DEBUG_HEXDUMP
  617. char *pkt;
  618. char buf[32];
  619. pkt = xq_get_data(xsk, idx, descs[i].offset);
  620. sprintf(buf, "idx=%d", idx);
  621. hex_dump(pkt, descs[i].len, buf);
  622. #endif
  623. }
  624. xsk->rx_npkts += rcvd;
  625. umem_fill_to_kernel_ex(&xsk->umem->fq, descs, rcvd);
  626. }
  627. static void rx_drop_all(void)
  628. {
  629. struct pollfd fds[MAX_SOCKS + 1];
  630. int i, ret, timeout, nfds = 1;
  631. memset(fds, 0, sizeof(fds));
  632. for (i = 0; i < num_socks; i++) {
  633. fds[i].fd = xsks[i]->sfd;
  634. fds[i].events = POLLIN;
  635. timeout = 1000; /* 1sn */
  636. }
  637. for (;;) {
  638. if (opt_poll) {
  639. ret = poll(fds, nfds, timeout);
  640. if (ret <= 0)
  641. continue;
  642. }
  643. for (i = 0; i < num_socks; i++)
  644. rx_drop(xsks[i]);
  645. }
  646. }
  647. static void tx_only(struct xdpsock *xsk)
  648. {
  649. int timeout, ret, nfds = 1;
  650. struct pollfd fds[nfds + 1];
  651. unsigned int idx = 0;
  652. memset(fds, 0, sizeof(fds));
  653. fds[0].fd = xsk->sfd;
  654. fds[0].events = POLLOUT;
  655. timeout = 1000; /* 1sn */
  656. for (;;) {
  657. if (opt_poll) {
  658. ret = poll(fds, nfds, timeout);
  659. if (ret <= 0)
  660. continue;
  661. if (fds[0].fd != xsk->sfd ||
  662. !(fds[0].revents & POLLOUT))
  663. continue;
  664. }
  665. if (xq_nb_free(&xsk->tx, BATCH_SIZE) >= BATCH_SIZE) {
  666. lassert(xq_enq_tx_only(&xsk->tx, idx, BATCH_SIZE) == 0);
  667. xsk->outstanding_tx += BATCH_SIZE;
  668. idx += BATCH_SIZE;
  669. idx %= NUM_FRAMES;
  670. }
  671. complete_tx_only(xsk);
  672. }
  673. }
  674. static void l2fwd(struct xdpsock *xsk)
  675. {
  676. for (;;) {
  677. struct xdp_desc descs[BATCH_SIZE];
  678. unsigned int rcvd, i;
  679. int ret;
  680. for (;;) {
  681. complete_tx_l2fwd(xsk);
  682. rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
  683. if (rcvd > 0)
  684. break;
  685. }
  686. for (i = 0; i < rcvd; i++) {
  687. char *pkt = xq_get_data(xsk, descs[i].idx,
  688. descs[i].offset);
  689. swap_mac_addresses(pkt);
  690. #if DEBUG_HEXDUMP
  691. char buf[32];
  692. u32 idx = descs[i].idx;
  693. sprintf(buf, "idx=%d", idx);
  694. hex_dump(pkt, descs[i].len, buf);
  695. #endif
  696. }
  697. xsk->rx_npkts += rcvd;
  698. ret = xq_enq(&xsk->tx, descs, rcvd);
  699. lassert(ret == 0);
  700. xsk->outstanding_tx += rcvd;
  701. }
  702. }
  703. int main(int argc, char **argv)
  704. {
  705. struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
  706. char xdp_filename[256];
  707. int i, ret, key = 0;
  708. pthread_t pt;
  709. parse_command_line(argc, argv);
  710. if (setrlimit(RLIMIT_MEMLOCK, &r)) {
  711. fprintf(stderr, "ERROR: setrlimit(RLIMIT_MEMLOCK) \"%s\"\n",
  712. strerror(errno));
  713. exit(EXIT_FAILURE);
  714. }
  715. snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.o", argv[0]);
  716. if (load_bpf_file(xdp_filename)) {
  717. fprintf(stderr, "ERROR: load_bpf_file %s\n", bpf_log_buf);
  718. exit(EXIT_FAILURE);
  719. }
  720. if (!prog_fd[0]) {
  721. fprintf(stderr, "ERROR: load_bpf_file: \"%s\"\n",
  722. strerror(errno));
  723. exit(EXIT_FAILURE);
  724. }
  725. if (bpf_set_link_xdp_fd(opt_ifindex, prog_fd[0], opt_xdp_flags) < 0) {
  726. fprintf(stderr, "ERROR: link set xdp fd failed\n");
  727. exit(EXIT_FAILURE);
  728. }
  729. ret = bpf_map_update_elem(map_fd[0], &key, &opt_queue, 0);
  730. if (ret) {
  731. fprintf(stderr, "ERROR: bpf_map_update_elem qidconf\n");
  732. exit(EXIT_FAILURE);
  733. }
  734. /* Create sockets... */
  735. xsks[num_socks++] = xsk_configure(NULL);
  736. #if RR_LB
  737. for (i = 0; i < MAX_SOCKS - 1; i++)
  738. xsks[num_socks++] = xsk_configure(xsks[0]->umem);
  739. #endif
  740. /* ...and insert them into the map. */
  741. for (i = 0; i < num_socks; i++) {
  742. key = i;
  743. ret = bpf_map_update_elem(map_fd[1], &key, &xsks[i]->sfd, 0);
  744. if (ret) {
  745. fprintf(stderr, "ERROR: bpf_map_update_elem %d\n", i);
  746. exit(EXIT_FAILURE);
  747. }
  748. }
  749. signal(SIGINT, int_exit);
  750. signal(SIGTERM, int_exit);
  751. signal(SIGABRT, int_exit);
  752. setlocale(LC_ALL, "");
  753. ret = pthread_create(&pt, NULL, poller, NULL);
  754. lassert(ret == 0);
  755. prev_time = get_nsecs();
  756. if (opt_bench == BENCH_RXDROP)
  757. rx_drop_all();
  758. else if (opt_bench == BENCH_TXONLY)
  759. tx_only(xsks[0]);
  760. else
  761. l2fwd(xsks[0]);
  762. return 0;
  763. }