xdpsock_user.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2017 - 2018 Intel Corporation. */
  3. #include <assert.h>
  4. #include <errno.h>
  5. #include <getopt.h>
  6. #include <libgen.h>
  7. #include <linux/bpf.h>
  8. #include <linux/if_link.h>
  9. #include <linux/if_xdp.h>
  10. #include <linux/if_ether.h>
  11. #include <net/if.h>
  12. #include <signal.h>
  13. #include <stdbool.h>
  14. #include <stdio.h>
  15. #include <stdlib.h>
  16. #include <string.h>
  17. #include <net/ethernet.h>
  18. #include <sys/resource.h>
  19. #include <sys/socket.h>
  20. #include <sys/mman.h>
  21. #include <time.h>
  22. #include <unistd.h>
  23. #include <pthread.h>
  24. #include <locale.h>
  25. #include <sys/types.h>
  26. #include <poll.h>
  27. #include "bpf_load.h"
  28. #include "bpf_util.h"
  29. #include <bpf/bpf.h>
  30. #include "xdpsock.h"
  31. #ifndef SOL_XDP
  32. #define SOL_XDP 283
  33. #endif
  34. #ifndef AF_XDP
  35. #define AF_XDP 44
  36. #endif
  37. #ifndef PF_XDP
  38. #define PF_XDP AF_XDP
  39. #endif
  40. #define NUM_FRAMES 131072
  41. #define FRAME_HEADROOM 0
  42. #define FRAME_SHIFT 11
  43. #define FRAME_SIZE 2048
  44. #define NUM_DESCS 1024
  45. #define BATCH_SIZE 16
  46. #define FQ_NUM_DESCS 1024
  47. #define CQ_NUM_DESCS 1024
  48. #define DEBUG_HEXDUMP 0
  49. typedef __u64 u64;
  50. typedef __u32 u32;
  51. static unsigned long prev_time;
  52. enum benchmark_type {
  53. BENCH_RXDROP = 0,
  54. BENCH_TXONLY = 1,
  55. BENCH_L2FWD = 2,
  56. };
  57. static enum benchmark_type opt_bench = BENCH_RXDROP;
  58. static u32 opt_xdp_flags;
  59. static const char *opt_if = "";
  60. static int opt_ifindex;
  61. static int opt_queue;
  62. static int opt_poll;
  63. static int opt_shared_packet_buffer;
  64. static int opt_interval = 1;
  65. static u32 opt_xdp_bind_flags;
  66. struct xdp_umem_uqueue {
  67. u32 cached_prod;
  68. u32 cached_cons;
  69. u32 mask;
  70. u32 size;
  71. u32 *producer;
  72. u32 *consumer;
  73. u64 *ring;
  74. void *map;
  75. };
  76. struct xdp_umem {
  77. char *frames;
  78. struct xdp_umem_uqueue fq;
  79. struct xdp_umem_uqueue cq;
  80. int fd;
  81. };
  82. struct xdp_uqueue {
  83. u32 cached_prod;
  84. u32 cached_cons;
  85. u32 mask;
  86. u32 size;
  87. u32 *producer;
  88. u32 *consumer;
  89. struct xdp_desc *ring;
  90. void *map;
  91. };
  92. struct xdpsock {
  93. struct xdp_uqueue rx;
  94. struct xdp_uqueue tx;
  95. int sfd;
  96. struct xdp_umem *umem;
  97. u32 outstanding_tx;
  98. unsigned long rx_npkts;
  99. unsigned long tx_npkts;
  100. unsigned long prev_rx_npkts;
  101. unsigned long prev_tx_npkts;
  102. };
  103. #define MAX_SOCKS 4
  104. static int num_socks;
  105. struct xdpsock *xsks[MAX_SOCKS];
  106. static unsigned long get_nsecs(void)
  107. {
  108. struct timespec ts;
  109. clock_gettime(CLOCK_MONOTONIC, &ts);
  110. return ts.tv_sec * 1000000000UL + ts.tv_nsec;
  111. }
  112. static void dump_stats(void);
  113. #define lassert(expr) \
  114. do { \
  115. if (!(expr)) { \
  116. fprintf(stderr, "%s:%s:%i: Assertion failed: " \
  117. #expr ": errno: %d/\"%s\"\n", \
  118. __FILE__, __func__, __LINE__, \
  119. errno, strerror(errno)); \
  120. dump_stats(); \
  121. exit(EXIT_FAILURE); \
  122. } \
  123. } while (0)
  124. #define barrier() __asm__ __volatile__("": : :"memory")
  125. #define u_smp_rmb() barrier()
  126. #define u_smp_wmb() barrier()
  127. #define likely(x) __builtin_expect(!!(x), 1)
  128. #define unlikely(x) __builtin_expect(!!(x), 0)
  129. static const char pkt_data[] =
  130. "\x3c\xfd\xfe\x9e\x7f\x71\xec\xb1\xd7\x98\x3a\xc0\x08\x00\x45\x00"
  131. "\x00\x2e\x00\x00\x00\x00\x40\x11\x88\x97\x05\x08\x07\x08\xc8\x14"
  132. "\x1e\x04\x10\x92\x10\x92\x00\x1a\x6d\xa3\x34\x33\x1f\x69\x40\x6b"
  133. "\x54\x59\xb6\x14\x2d\x11\x44\xbf\xaf\xd9\xbe\xaa";
  134. static inline u32 umem_nb_free(struct xdp_umem_uqueue *q, u32 nb)
  135. {
  136. u32 free_entries = q->cached_cons - q->cached_prod;
  137. if (free_entries >= nb)
  138. return free_entries;
  139. /* Refresh the local tail pointer */
  140. q->cached_cons = *q->consumer + q->size;
  141. return q->cached_cons - q->cached_prod;
  142. }
  143. static inline u32 xq_nb_free(struct xdp_uqueue *q, u32 ndescs)
  144. {
  145. u32 free_entries = q->cached_cons - q->cached_prod;
  146. if (free_entries >= ndescs)
  147. return free_entries;
  148. /* Refresh the local tail pointer */
  149. q->cached_cons = *q->consumer + q->size;
  150. return q->cached_cons - q->cached_prod;
  151. }
  152. static inline u32 umem_nb_avail(struct xdp_umem_uqueue *q, u32 nb)
  153. {
  154. u32 entries = q->cached_prod - q->cached_cons;
  155. if (entries == 0) {
  156. q->cached_prod = *q->producer;
  157. entries = q->cached_prod - q->cached_cons;
  158. }
  159. return (entries > nb) ? nb : entries;
  160. }
  161. static inline u32 xq_nb_avail(struct xdp_uqueue *q, u32 ndescs)
  162. {
  163. u32 entries = q->cached_prod - q->cached_cons;
  164. if (entries == 0) {
  165. q->cached_prod = *q->producer;
  166. entries = q->cached_prod - q->cached_cons;
  167. }
  168. return (entries > ndescs) ? ndescs : entries;
  169. }
  170. static inline int umem_fill_to_kernel_ex(struct xdp_umem_uqueue *fq,
  171. struct xdp_desc *d,
  172. size_t nb)
  173. {
  174. u32 i;
  175. if (umem_nb_free(fq, nb) < nb)
  176. return -ENOSPC;
  177. for (i = 0; i < nb; i++) {
  178. u32 idx = fq->cached_prod++ & fq->mask;
  179. fq->ring[idx] = d[i].addr;
  180. }
  181. u_smp_wmb();
  182. *fq->producer = fq->cached_prod;
  183. return 0;
  184. }
  185. static inline int umem_fill_to_kernel(struct xdp_umem_uqueue *fq, u64 *d,
  186. size_t nb)
  187. {
  188. u32 i;
  189. if (umem_nb_free(fq, nb) < nb)
  190. return -ENOSPC;
  191. for (i = 0; i < nb; i++) {
  192. u32 idx = fq->cached_prod++ & fq->mask;
  193. fq->ring[idx] = d[i];
  194. }
  195. u_smp_wmb();
  196. *fq->producer = fq->cached_prod;
  197. return 0;
  198. }
  199. static inline size_t umem_complete_from_kernel(struct xdp_umem_uqueue *cq,
  200. u64 *d, size_t nb)
  201. {
  202. u32 idx, i, entries = umem_nb_avail(cq, nb);
  203. u_smp_rmb();
  204. for (i = 0; i < entries; i++) {
  205. idx = cq->cached_cons++ & cq->mask;
  206. d[i] = cq->ring[idx];
  207. }
  208. if (entries > 0) {
  209. u_smp_wmb();
  210. *cq->consumer = cq->cached_cons;
  211. }
  212. return entries;
  213. }
  214. static inline void *xq_get_data(struct xdpsock *xsk, u64 addr)
  215. {
  216. return &xsk->umem->frames[addr];
  217. }
  218. static inline int xq_enq(struct xdp_uqueue *uq,
  219. const struct xdp_desc *descs,
  220. unsigned int ndescs)
  221. {
  222. struct xdp_desc *r = uq->ring;
  223. unsigned int i;
  224. if (xq_nb_free(uq, ndescs) < ndescs)
  225. return -ENOSPC;
  226. for (i = 0; i < ndescs; i++) {
  227. u32 idx = uq->cached_prod++ & uq->mask;
  228. r[idx].addr = descs[i].addr;
  229. r[idx].len = descs[i].len;
  230. }
  231. u_smp_wmb();
  232. *uq->producer = uq->cached_prod;
  233. return 0;
  234. }
  235. static inline int xq_enq_tx_only(struct xdp_uqueue *uq,
  236. unsigned int id, unsigned int ndescs)
  237. {
  238. struct xdp_desc *r = uq->ring;
  239. unsigned int i;
  240. if (xq_nb_free(uq, ndescs) < ndescs)
  241. return -ENOSPC;
  242. for (i = 0; i < ndescs; i++) {
  243. u32 idx = uq->cached_prod++ & uq->mask;
  244. r[idx].addr = (id + i) << FRAME_SHIFT;
  245. r[idx].len = sizeof(pkt_data) - 1;
  246. }
  247. u_smp_wmb();
  248. *uq->producer = uq->cached_prod;
  249. return 0;
  250. }
  251. static inline int xq_deq(struct xdp_uqueue *uq,
  252. struct xdp_desc *descs,
  253. int ndescs)
  254. {
  255. struct xdp_desc *r = uq->ring;
  256. unsigned int idx;
  257. int i, entries;
  258. entries = xq_nb_avail(uq, ndescs);
  259. u_smp_rmb();
  260. for (i = 0; i < entries; i++) {
  261. idx = uq->cached_cons++ & uq->mask;
  262. descs[i] = r[idx];
  263. }
  264. if (entries > 0) {
  265. u_smp_wmb();
  266. *uq->consumer = uq->cached_cons;
  267. }
  268. return entries;
  269. }
  270. static void swap_mac_addresses(void *data)
  271. {
  272. struct ether_header *eth = (struct ether_header *)data;
  273. struct ether_addr *src_addr = (struct ether_addr *)&eth->ether_shost;
  274. struct ether_addr *dst_addr = (struct ether_addr *)&eth->ether_dhost;
  275. struct ether_addr tmp;
  276. tmp = *src_addr;
  277. *src_addr = *dst_addr;
  278. *dst_addr = tmp;
  279. }
  280. static void hex_dump(void *pkt, size_t length, u64 addr)
  281. {
  282. const unsigned char *address = (unsigned char *)pkt;
  283. const unsigned char *line = address;
  284. size_t line_size = 32;
  285. unsigned char c;
  286. char buf[32];
  287. int i = 0;
  288. if (!DEBUG_HEXDUMP)
  289. return;
  290. sprintf(buf, "addr=%llu", addr);
  291. printf("length = %zu\n", length);
  292. printf("%s | ", buf);
  293. while (length-- > 0) {
  294. printf("%02X ", *address++);
  295. if (!(++i % line_size) || (length == 0 && i % line_size)) {
  296. if (length == 0) {
  297. while (i++ % line_size)
  298. printf("__ ");
  299. }
  300. printf(" | "); /* right close */
  301. while (line < address) {
  302. c = *line++;
  303. printf("%c", (c < 33 || c == 255) ? 0x2E : c);
  304. }
  305. printf("\n");
  306. if (length > 0)
  307. printf("%s | ", buf);
  308. }
  309. }
  310. printf("\n");
  311. }
  312. static size_t gen_eth_frame(char *frame)
  313. {
  314. memcpy(frame, pkt_data, sizeof(pkt_data) - 1);
  315. return sizeof(pkt_data) - 1;
  316. }
  317. static struct xdp_umem *xdp_umem_configure(int sfd)
  318. {
  319. int fq_size = FQ_NUM_DESCS, cq_size = CQ_NUM_DESCS;
  320. struct xdp_mmap_offsets off;
  321. struct xdp_umem_reg mr;
  322. struct xdp_umem *umem;
  323. socklen_t optlen;
  324. void *bufs;
  325. umem = calloc(1, sizeof(*umem));
  326. lassert(umem);
  327. lassert(posix_memalign(&bufs, getpagesize(), /* PAGE_SIZE aligned */
  328. NUM_FRAMES * FRAME_SIZE) == 0);
  329. mr.addr = (__u64)bufs;
  330. mr.len = NUM_FRAMES * FRAME_SIZE;
  331. mr.chunk_size = FRAME_SIZE;
  332. mr.headroom = FRAME_HEADROOM;
  333. lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr)) == 0);
  334. lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_FILL_RING, &fq_size,
  335. sizeof(int)) == 0);
  336. lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_COMPLETION_RING, &cq_size,
  337. sizeof(int)) == 0);
  338. optlen = sizeof(off);
  339. lassert(getsockopt(sfd, SOL_XDP, XDP_MMAP_OFFSETS, &off,
  340. &optlen) == 0);
  341. umem->fq.map = mmap(0, off.fr.desc +
  342. FQ_NUM_DESCS * sizeof(u64),
  343. PROT_READ | PROT_WRITE,
  344. MAP_SHARED | MAP_POPULATE, sfd,
  345. XDP_UMEM_PGOFF_FILL_RING);
  346. lassert(umem->fq.map != MAP_FAILED);
  347. umem->fq.mask = FQ_NUM_DESCS - 1;
  348. umem->fq.size = FQ_NUM_DESCS;
  349. umem->fq.producer = umem->fq.map + off.fr.producer;
  350. umem->fq.consumer = umem->fq.map + off.fr.consumer;
  351. umem->fq.ring = umem->fq.map + off.fr.desc;
  352. umem->fq.cached_cons = FQ_NUM_DESCS;
  353. umem->cq.map = mmap(0, off.cr.desc +
  354. CQ_NUM_DESCS * sizeof(u64),
  355. PROT_READ | PROT_WRITE,
  356. MAP_SHARED | MAP_POPULATE, sfd,
  357. XDP_UMEM_PGOFF_COMPLETION_RING);
  358. lassert(umem->cq.map != MAP_FAILED);
  359. umem->cq.mask = CQ_NUM_DESCS - 1;
  360. umem->cq.size = CQ_NUM_DESCS;
  361. umem->cq.producer = umem->cq.map + off.cr.producer;
  362. umem->cq.consumer = umem->cq.map + off.cr.consumer;
  363. umem->cq.ring = umem->cq.map + off.cr.desc;
  364. umem->frames = bufs;
  365. umem->fd = sfd;
  366. if (opt_bench == BENCH_TXONLY) {
  367. int i;
  368. for (i = 0; i < NUM_FRAMES * FRAME_SIZE; i += FRAME_SIZE)
  369. (void)gen_eth_frame(&umem->frames[i]);
  370. }
  371. return umem;
  372. }
  373. static struct xdpsock *xsk_configure(struct xdp_umem *umem)
  374. {
  375. struct sockaddr_xdp sxdp = {};
  376. struct xdp_mmap_offsets off;
  377. int sfd, ndescs = NUM_DESCS;
  378. struct xdpsock *xsk;
  379. bool shared = true;
  380. socklen_t optlen;
  381. u64 i;
  382. sfd = socket(PF_XDP, SOCK_RAW, 0);
  383. lassert(sfd >= 0);
  384. xsk = calloc(1, sizeof(*xsk));
  385. lassert(xsk);
  386. xsk->sfd = sfd;
  387. xsk->outstanding_tx = 0;
  388. if (!umem) {
  389. shared = false;
  390. xsk->umem = xdp_umem_configure(sfd);
  391. } else {
  392. xsk->umem = umem;
  393. }
  394. lassert(setsockopt(sfd, SOL_XDP, XDP_RX_RING,
  395. &ndescs, sizeof(int)) == 0);
  396. lassert(setsockopt(sfd, SOL_XDP, XDP_TX_RING,
  397. &ndescs, sizeof(int)) == 0);
  398. optlen = sizeof(off);
  399. lassert(getsockopt(sfd, SOL_XDP, XDP_MMAP_OFFSETS, &off,
  400. &optlen) == 0);
  401. /* Rx */
  402. xsk->rx.map = mmap(NULL,
  403. off.rx.desc +
  404. NUM_DESCS * sizeof(struct xdp_desc),
  405. PROT_READ | PROT_WRITE,
  406. MAP_SHARED | MAP_POPULATE, sfd,
  407. XDP_PGOFF_RX_RING);
  408. lassert(xsk->rx.map != MAP_FAILED);
  409. if (!shared) {
  410. for (i = 0; i < NUM_DESCS * FRAME_SIZE; i += FRAME_SIZE)
  411. lassert(umem_fill_to_kernel(&xsk->umem->fq, &i, 1)
  412. == 0);
  413. }
  414. /* Tx */
  415. xsk->tx.map = mmap(NULL,
  416. off.tx.desc +
  417. NUM_DESCS * sizeof(struct xdp_desc),
  418. PROT_READ | PROT_WRITE,
  419. MAP_SHARED | MAP_POPULATE, sfd,
  420. XDP_PGOFF_TX_RING);
  421. lassert(xsk->tx.map != MAP_FAILED);
  422. xsk->rx.mask = NUM_DESCS - 1;
  423. xsk->rx.size = NUM_DESCS;
  424. xsk->rx.producer = xsk->rx.map + off.rx.producer;
  425. xsk->rx.consumer = xsk->rx.map + off.rx.consumer;
  426. xsk->rx.ring = xsk->rx.map + off.rx.desc;
  427. xsk->tx.mask = NUM_DESCS - 1;
  428. xsk->tx.size = NUM_DESCS;
  429. xsk->tx.producer = xsk->tx.map + off.tx.producer;
  430. xsk->tx.consumer = xsk->tx.map + off.tx.consumer;
  431. xsk->tx.ring = xsk->tx.map + off.tx.desc;
  432. xsk->tx.cached_cons = NUM_DESCS;
  433. sxdp.sxdp_family = PF_XDP;
  434. sxdp.sxdp_ifindex = opt_ifindex;
  435. sxdp.sxdp_queue_id = opt_queue;
  436. if (shared) {
  437. sxdp.sxdp_flags = XDP_SHARED_UMEM;
  438. sxdp.sxdp_shared_umem_fd = umem->fd;
  439. } else {
  440. sxdp.sxdp_flags = opt_xdp_bind_flags;
  441. }
  442. lassert(bind(sfd, (struct sockaddr *)&sxdp, sizeof(sxdp)) == 0);
  443. return xsk;
  444. }
  445. static void print_benchmark(bool running)
  446. {
  447. const char *bench_str = "INVALID";
  448. if (opt_bench == BENCH_RXDROP)
  449. bench_str = "rxdrop";
  450. else if (opt_bench == BENCH_TXONLY)
  451. bench_str = "txonly";
  452. else if (opt_bench == BENCH_L2FWD)
  453. bench_str = "l2fwd";
  454. printf("%s:%d %s ", opt_if, opt_queue, bench_str);
  455. if (opt_xdp_flags & XDP_FLAGS_SKB_MODE)
  456. printf("xdp-skb ");
  457. else if (opt_xdp_flags & XDP_FLAGS_DRV_MODE)
  458. printf("xdp-drv ");
  459. else
  460. printf(" ");
  461. if (opt_poll)
  462. printf("poll() ");
  463. if (running) {
  464. printf("running...");
  465. fflush(stdout);
  466. }
  467. }
  468. static void dump_stats(void)
  469. {
  470. unsigned long now = get_nsecs();
  471. long dt = now - prev_time;
  472. int i;
  473. prev_time = now;
  474. for (i = 0; i < num_socks; i++) {
  475. char *fmt = "%-15s %'-11.0f %'-11lu\n";
  476. double rx_pps, tx_pps;
  477. rx_pps = (xsks[i]->rx_npkts - xsks[i]->prev_rx_npkts) *
  478. 1000000000. / dt;
  479. tx_pps = (xsks[i]->tx_npkts - xsks[i]->prev_tx_npkts) *
  480. 1000000000. / dt;
  481. printf("\n sock%d@", i);
  482. print_benchmark(false);
  483. printf("\n");
  484. printf("%-15s %-11s %-11s %-11.2f\n", "", "pps", "pkts",
  485. dt / 1000000000.);
  486. printf(fmt, "rx", rx_pps, xsks[i]->rx_npkts);
  487. printf(fmt, "tx", tx_pps, xsks[i]->tx_npkts);
  488. xsks[i]->prev_rx_npkts = xsks[i]->rx_npkts;
  489. xsks[i]->prev_tx_npkts = xsks[i]->tx_npkts;
  490. }
  491. }
  492. static void *poller(void *arg)
  493. {
  494. (void)arg;
  495. for (;;) {
  496. sleep(opt_interval);
  497. dump_stats();
  498. }
  499. return NULL;
  500. }
  501. static void int_exit(int sig)
  502. {
  503. (void)sig;
  504. dump_stats();
  505. bpf_set_link_xdp_fd(opt_ifindex, -1, opt_xdp_flags);
  506. exit(EXIT_SUCCESS);
  507. }
  508. static struct option long_options[] = {
  509. {"rxdrop", no_argument, 0, 'r'},
  510. {"txonly", no_argument, 0, 't'},
  511. {"l2fwd", no_argument, 0, 'l'},
  512. {"interface", required_argument, 0, 'i'},
  513. {"queue", required_argument, 0, 'q'},
  514. {"poll", no_argument, 0, 'p'},
  515. {"shared-buffer", no_argument, 0, 's'},
  516. {"xdp-skb", no_argument, 0, 'S'},
  517. {"xdp-native", no_argument, 0, 'N'},
  518. {"interval", required_argument, 0, 'n'},
  519. {0, 0, 0, 0}
  520. };
  521. static void usage(const char *prog)
  522. {
  523. const char *str =
  524. " Usage: %s [OPTIONS]\n"
  525. " Options:\n"
  526. " -r, --rxdrop Discard all incoming packets (default)\n"
  527. " -t, --txonly Only send packets\n"
  528. " -l, --l2fwd MAC swap L2 forwarding\n"
  529. " -i, --interface=n Run on interface n\n"
  530. " -q, --queue=n Use queue n (default 0)\n"
  531. " -p, --poll Use poll syscall\n"
  532. " -s, --shared-buffer Use shared packet buffer\n"
  533. " -S, --xdp-skb=n Use XDP skb-mod\n"
  534. " -N, --xdp-native=n Enfore XDP native mode\n"
  535. " -n, --interval=n Specify statistics update interval (default 1 sec).\n"
  536. "\n";
  537. fprintf(stderr, str, prog);
  538. exit(EXIT_FAILURE);
  539. }
  540. static void parse_command_line(int argc, char **argv)
  541. {
  542. int option_index, c;
  543. opterr = 0;
  544. for (;;) {
  545. c = getopt_long(argc, argv, "rtli:q:psSNn:", long_options,
  546. &option_index);
  547. if (c == -1)
  548. break;
  549. switch (c) {
  550. case 'r':
  551. opt_bench = BENCH_RXDROP;
  552. break;
  553. case 't':
  554. opt_bench = BENCH_TXONLY;
  555. break;
  556. case 'l':
  557. opt_bench = BENCH_L2FWD;
  558. break;
  559. case 'i':
  560. opt_if = optarg;
  561. break;
  562. case 'q':
  563. opt_queue = atoi(optarg);
  564. break;
  565. case 's':
  566. opt_shared_packet_buffer = 1;
  567. break;
  568. case 'p':
  569. opt_poll = 1;
  570. break;
  571. case 'S':
  572. opt_xdp_flags |= XDP_FLAGS_SKB_MODE;
  573. opt_xdp_bind_flags |= XDP_COPY;
  574. break;
  575. case 'N':
  576. opt_xdp_flags |= XDP_FLAGS_DRV_MODE;
  577. break;
  578. case 'n':
  579. opt_interval = atoi(optarg);
  580. break;
  581. default:
  582. usage(basename(argv[0]));
  583. }
  584. }
  585. opt_ifindex = if_nametoindex(opt_if);
  586. if (!opt_ifindex) {
  587. fprintf(stderr, "ERROR: interface \"%s\" does not exist\n",
  588. opt_if);
  589. usage(basename(argv[0]));
  590. }
  591. }
  592. static void kick_tx(int fd)
  593. {
  594. int ret;
  595. ret = sendto(fd, NULL, 0, MSG_DONTWAIT, NULL, 0);
  596. if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN)
  597. return;
  598. lassert(0);
  599. }
  600. static inline void complete_tx_l2fwd(struct xdpsock *xsk)
  601. {
  602. u64 descs[BATCH_SIZE];
  603. unsigned int rcvd;
  604. size_t ndescs;
  605. if (!xsk->outstanding_tx)
  606. return;
  607. kick_tx(xsk->sfd);
  608. ndescs = (xsk->outstanding_tx > BATCH_SIZE) ? BATCH_SIZE :
  609. xsk->outstanding_tx;
  610. /* re-add completed Tx buffers */
  611. rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, ndescs);
  612. if (rcvd > 0) {
  613. umem_fill_to_kernel(&xsk->umem->fq, descs, rcvd);
  614. xsk->outstanding_tx -= rcvd;
  615. xsk->tx_npkts += rcvd;
  616. }
  617. }
  618. static inline void complete_tx_only(struct xdpsock *xsk)
  619. {
  620. u64 descs[BATCH_SIZE];
  621. unsigned int rcvd;
  622. if (!xsk->outstanding_tx)
  623. return;
  624. kick_tx(xsk->sfd);
  625. rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, BATCH_SIZE);
  626. if (rcvd > 0) {
  627. xsk->outstanding_tx -= rcvd;
  628. xsk->tx_npkts += rcvd;
  629. }
  630. }
  631. static void rx_drop(struct xdpsock *xsk)
  632. {
  633. struct xdp_desc descs[BATCH_SIZE];
  634. unsigned int rcvd, i;
  635. rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
  636. if (!rcvd)
  637. return;
  638. for (i = 0; i < rcvd; i++) {
  639. char *pkt = xq_get_data(xsk, descs[i].addr);
  640. hex_dump(pkt, descs[i].len, descs[i].addr);
  641. }
  642. xsk->rx_npkts += rcvd;
  643. umem_fill_to_kernel_ex(&xsk->umem->fq, descs, rcvd);
  644. }
  645. static void rx_drop_all(void)
  646. {
  647. struct pollfd fds[MAX_SOCKS + 1];
  648. int i, ret, timeout, nfds = 1;
  649. memset(fds, 0, sizeof(fds));
  650. for (i = 0; i < num_socks; i++) {
  651. fds[i].fd = xsks[i]->sfd;
  652. fds[i].events = POLLIN;
  653. timeout = 1000; /* 1sn */
  654. }
  655. for (;;) {
  656. if (opt_poll) {
  657. ret = poll(fds, nfds, timeout);
  658. if (ret <= 0)
  659. continue;
  660. }
  661. for (i = 0; i < num_socks; i++)
  662. rx_drop(xsks[i]);
  663. }
  664. }
  665. static void tx_only(struct xdpsock *xsk)
  666. {
  667. int timeout, ret, nfds = 1;
  668. struct pollfd fds[nfds + 1];
  669. unsigned int idx = 0;
  670. memset(fds, 0, sizeof(fds));
  671. fds[0].fd = xsk->sfd;
  672. fds[0].events = POLLOUT;
  673. timeout = 1000; /* 1sn */
  674. for (;;) {
  675. if (opt_poll) {
  676. ret = poll(fds, nfds, timeout);
  677. if (ret <= 0)
  678. continue;
  679. if (fds[0].fd != xsk->sfd ||
  680. !(fds[0].revents & POLLOUT))
  681. continue;
  682. }
  683. if (xq_nb_free(&xsk->tx, BATCH_SIZE) >= BATCH_SIZE) {
  684. lassert(xq_enq_tx_only(&xsk->tx, idx, BATCH_SIZE) == 0);
  685. xsk->outstanding_tx += BATCH_SIZE;
  686. idx += BATCH_SIZE;
  687. idx %= NUM_FRAMES;
  688. }
  689. complete_tx_only(xsk);
  690. }
  691. }
  692. static void l2fwd(struct xdpsock *xsk)
  693. {
  694. for (;;) {
  695. struct xdp_desc descs[BATCH_SIZE];
  696. unsigned int rcvd, i;
  697. int ret;
  698. for (;;) {
  699. complete_tx_l2fwd(xsk);
  700. rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
  701. if (rcvd > 0)
  702. break;
  703. }
  704. for (i = 0; i < rcvd; i++) {
  705. char *pkt = xq_get_data(xsk, descs[i].addr);
  706. swap_mac_addresses(pkt);
  707. hex_dump(pkt, descs[i].len, descs[i].addr);
  708. }
  709. xsk->rx_npkts += rcvd;
  710. ret = xq_enq(&xsk->tx, descs, rcvd);
  711. lassert(ret == 0);
  712. xsk->outstanding_tx += rcvd;
  713. }
  714. }
  715. int main(int argc, char **argv)
  716. {
  717. struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
  718. char xdp_filename[256];
  719. int i, ret, key = 0;
  720. pthread_t pt;
  721. parse_command_line(argc, argv);
  722. if (setrlimit(RLIMIT_MEMLOCK, &r)) {
  723. fprintf(stderr, "ERROR: setrlimit(RLIMIT_MEMLOCK) \"%s\"\n",
  724. strerror(errno));
  725. exit(EXIT_FAILURE);
  726. }
  727. snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.o", argv[0]);
  728. if (load_bpf_file(xdp_filename)) {
  729. fprintf(stderr, "ERROR: load_bpf_file %s\n", bpf_log_buf);
  730. exit(EXIT_FAILURE);
  731. }
  732. if (!prog_fd[0]) {
  733. fprintf(stderr, "ERROR: load_bpf_file: \"%s\"\n",
  734. strerror(errno));
  735. exit(EXIT_FAILURE);
  736. }
  737. if (bpf_set_link_xdp_fd(opt_ifindex, prog_fd[0], opt_xdp_flags) < 0) {
  738. fprintf(stderr, "ERROR: link set xdp fd failed\n");
  739. exit(EXIT_FAILURE);
  740. }
  741. ret = bpf_map_update_elem(map_fd[0], &key, &opt_queue, 0);
  742. if (ret) {
  743. fprintf(stderr, "ERROR: bpf_map_update_elem qidconf\n");
  744. exit(EXIT_FAILURE);
  745. }
  746. /* Create sockets... */
  747. xsks[num_socks++] = xsk_configure(NULL);
  748. #if RR_LB
  749. for (i = 0; i < MAX_SOCKS - 1; i++)
  750. xsks[num_socks++] = xsk_configure(xsks[0]->umem);
  751. #endif
  752. /* ...and insert them into the map. */
  753. for (i = 0; i < num_socks; i++) {
  754. key = i;
  755. ret = bpf_map_update_elem(map_fd[1], &key, &xsks[i]->sfd, 0);
  756. if (ret) {
  757. fprintf(stderr, "ERROR: bpf_map_update_elem %d\n", i);
  758. exit(EXIT_FAILURE);
  759. }
  760. }
  761. signal(SIGINT, int_exit);
  762. signal(SIGTERM, int_exit);
  763. signal(SIGABRT, int_exit);
  764. setlocale(LC_ALL, "");
  765. ret = pthread_create(&pt, NULL, poller, NULL);
  766. lassert(ret == 0);
  767. prev_time = get_nsecs();
  768. if (opt_bench == BENCH_RXDROP)
  769. rx_drop_all();
  770. else if (opt_bench == BENCH_TXONLY)
  771. tx_only(xsks[0]);
  772. else
  773. l2fwd(xsks[0]);
  774. return 0;
  775. }