xdpsock_user.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2017 - 2018 Intel Corporation. */
  3. #include <assert.h>
  4. #include <errno.h>
  5. #include <getopt.h>
  6. #include <libgen.h>
  7. #include <linux/bpf.h>
  8. #include <linux/if_link.h>
  9. #include <linux/if_xdp.h>
  10. #include <linux/if_ether.h>
  11. #include <net/if.h>
  12. #include <signal.h>
  13. #include <stdbool.h>
  14. #include <stdio.h>
  15. #include <stdlib.h>
  16. #include <string.h>
  17. #include <net/ethernet.h>
  18. #include <sys/resource.h>
  19. #include <sys/socket.h>
  20. #include <sys/mman.h>
  21. #include <time.h>
  22. #include <unistd.h>
  23. #include <pthread.h>
  24. #include <locale.h>
  25. #include <sys/types.h>
  26. #include <poll.h>
  27. #include "bpf/libbpf.h"
  28. #include "bpf_util.h"
  29. #include <bpf/bpf.h>
  30. #include "xdpsock.h"
  31. #ifndef SOL_XDP
  32. #define SOL_XDP 283
  33. #endif
  34. #ifndef AF_XDP
  35. #define AF_XDP 44
  36. #endif
  37. #ifndef PF_XDP
  38. #define PF_XDP AF_XDP
  39. #endif
  40. #define NUM_FRAMES 131072
  41. #define FRAME_HEADROOM 0
  42. #define FRAME_SHIFT 11
  43. #define FRAME_SIZE 2048
  44. #define NUM_DESCS 1024
  45. #define BATCH_SIZE 16
  46. #define FQ_NUM_DESCS 1024
  47. #define CQ_NUM_DESCS 1024
  48. #define DEBUG_HEXDUMP 0
  49. typedef __u64 u64;
  50. typedef __u32 u32;
  51. static unsigned long prev_time;
  52. enum benchmark_type {
  53. BENCH_RXDROP = 0,
  54. BENCH_TXONLY = 1,
  55. BENCH_L2FWD = 2,
  56. };
  57. static enum benchmark_type opt_bench = BENCH_RXDROP;
  58. static u32 opt_xdp_flags;
  59. static const char *opt_if = "";
  60. static int opt_ifindex;
  61. static int opt_queue;
  62. static int opt_poll;
  63. static int opt_shared_packet_buffer;
  64. static int opt_interval = 1;
  65. static u32 opt_xdp_bind_flags;
  66. struct xdp_umem_uqueue {
  67. u32 cached_prod;
  68. u32 cached_cons;
  69. u32 mask;
  70. u32 size;
  71. u32 *producer;
  72. u32 *consumer;
  73. u64 *ring;
  74. void *map;
  75. };
  76. struct xdp_umem {
  77. char *frames;
  78. struct xdp_umem_uqueue fq;
  79. struct xdp_umem_uqueue cq;
  80. int fd;
  81. };
  82. struct xdp_uqueue {
  83. u32 cached_prod;
  84. u32 cached_cons;
  85. u32 mask;
  86. u32 size;
  87. u32 *producer;
  88. u32 *consumer;
  89. struct xdp_desc *ring;
  90. void *map;
  91. };
  92. struct xdpsock {
  93. struct xdp_uqueue rx;
  94. struct xdp_uqueue tx;
  95. int sfd;
  96. struct xdp_umem *umem;
  97. u32 outstanding_tx;
  98. unsigned long rx_npkts;
  99. unsigned long tx_npkts;
  100. unsigned long prev_rx_npkts;
  101. unsigned long prev_tx_npkts;
  102. };
  103. static int num_socks;
  104. struct xdpsock *xsks[MAX_SOCKS];
  105. static unsigned long get_nsecs(void)
  106. {
  107. struct timespec ts;
  108. clock_gettime(CLOCK_MONOTONIC, &ts);
  109. return ts.tv_sec * 1000000000UL + ts.tv_nsec;
  110. }
  111. static void dump_stats(void);
  112. #define lassert(expr) \
  113. do { \
  114. if (!(expr)) { \
  115. fprintf(stderr, "%s:%s:%i: Assertion failed: " \
  116. #expr ": errno: %d/\"%s\"\n", \
  117. __FILE__, __func__, __LINE__, \
  118. errno, strerror(errno)); \
  119. dump_stats(); \
  120. exit(EXIT_FAILURE); \
  121. } \
  122. } while (0)
  123. #define barrier() __asm__ __volatile__("": : :"memory")
  124. #ifdef __aarch64__
  125. #define u_smp_rmb() __asm__ __volatile__("dmb ishld": : :"memory")
  126. #define u_smp_wmb() __asm__ __volatile__("dmb ishst": : :"memory")
  127. #else
  128. #define u_smp_rmb() barrier()
  129. #define u_smp_wmb() barrier()
  130. #endif
  131. #define likely(x) __builtin_expect(!!(x), 1)
  132. #define unlikely(x) __builtin_expect(!!(x), 0)
  133. static const char pkt_data[] =
  134. "\x3c\xfd\xfe\x9e\x7f\x71\xec\xb1\xd7\x98\x3a\xc0\x08\x00\x45\x00"
  135. "\x00\x2e\x00\x00\x00\x00\x40\x11\x88\x97\x05\x08\x07\x08\xc8\x14"
  136. "\x1e\x04\x10\x92\x10\x92\x00\x1a\x6d\xa3\x34\x33\x1f\x69\x40\x6b"
  137. "\x54\x59\xb6\x14\x2d\x11\x44\xbf\xaf\xd9\xbe\xaa";
  138. static inline u32 umem_nb_free(struct xdp_umem_uqueue *q, u32 nb)
  139. {
  140. u32 free_entries = q->cached_cons - q->cached_prod;
  141. if (free_entries >= nb)
  142. return free_entries;
  143. /* Refresh the local tail pointer */
  144. q->cached_cons = *q->consumer + q->size;
  145. return q->cached_cons - q->cached_prod;
  146. }
  147. static inline u32 xq_nb_free(struct xdp_uqueue *q, u32 ndescs)
  148. {
  149. u32 free_entries = q->cached_cons - q->cached_prod;
  150. if (free_entries >= ndescs)
  151. return free_entries;
  152. /* Refresh the local tail pointer */
  153. q->cached_cons = *q->consumer + q->size;
  154. return q->cached_cons - q->cached_prod;
  155. }
  156. static inline u32 umem_nb_avail(struct xdp_umem_uqueue *q, u32 nb)
  157. {
  158. u32 entries = q->cached_prod - q->cached_cons;
  159. if (entries == 0) {
  160. q->cached_prod = *q->producer;
  161. entries = q->cached_prod - q->cached_cons;
  162. }
  163. return (entries > nb) ? nb : entries;
  164. }
  165. static inline u32 xq_nb_avail(struct xdp_uqueue *q, u32 ndescs)
  166. {
  167. u32 entries = q->cached_prod - q->cached_cons;
  168. if (entries == 0) {
  169. q->cached_prod = *q->producer;
  170. entries = q->cached_prod - q->cached_cons;
  171. }
  172. return (entries > ndescs) ? ndescs : entries;
  173. }
  174. static inline int umem_fill_to_kernel_ex(struct xdp_umem_uqueue *fq,
  175. struct xdp_desc *d,
  176. size_t nb)
  177. {
  178. u32 i;
  179. if (umem_nb_free(fq, nb) < nb)
  180. return -ENOSPC;
  181. for (i = 0; i < nb; i++) {
  182. u32 idx = fq->cached_prod++ & fq->mask;
  183. fq->ring[idx] = d[i].addr;
  184. }
  185. u_smp_wmb();
  186. *fq->producer = fq->cached_prod;
  187. return 0;
  188. }
  189. static inline int umem_fill_to_kernel(struct xdp_umem_uqueue *fq, u64 *d,
  190. size_t nb)
  191. {
  192. u32 i;
  193. if (umem_nb_free(fq, nb) < nb)
  194. return -ENOSPC;
  195. for (i = 0; i < nb; i++) {
  196. u32 idx = fq->cached_prod++ & fq->mask;
  197. fq->ring[idx] = d[i];
  198. }
  199. u_smp_wmb();
  200. *fq->producer = fq->cached_prod;
  201. return 0;
  202. }
  203. static inline size_t umem_complete_from_kernel(struct xdp_umem_uqueue *cq,
  204. u64 *d, size_t nb)
  205. {
  206. u32 idx, i, entries = umem_nb_avail(cq, nb);
  207. u_smp_rmb();
  208. for (i = 0; i < entries; i++) {
  209. idx = cq->cached_cons++ & cq->mask;
  210. d[i] = cq->ring[idx];
  211. }
  212. if (entries > 0) {
  213. u_smp_wmb();
  214. *cq->consumer = cq->cached_cons;
  215. }
  216. return entries;
  217. }
  218. static inline void *xq_get_data(struct xdpsock *xsk, u64 addr)
  219. {
  220. return &xsk->umem->frames[addr];
  221. }
  222. static inline int xq_enq(struct xdp_uqueue *uq,
  223. const struct xdp_desc *descs,
  224. unsigned int ndescs)
  225. {
  226. struct xdp_desc *r = uq->ring;
  227. unsigned int i;
  228. if (xq_nb_free(uq, ndescs) < ndescs)
  229. return -ENOSPC;
  230. for (i = 0; i < ndescs; i++) {
  231. u32 idx = uq->cached_prod++ & uq->mask;
  232. r[idx].addr = descs[i].addr;
  233. r[idx].len = descs[i].len;
  234. }
  235. u_smp_wmb();
  236. *uq->producer = uq->cached_prod;
  237. return 0;
  238. }
  239. static inline int xq_enq_tx_only(struct xdp_uqueue *uq,
  240. unsigned int id, unsigned int ndescs)
  241. {
  242. struct xdp_desc *r = uq->ring;
  243. unsigned int i;
  244. if (xq_nb_free(uq, ndescs) < ndescs)
  245. return -ENOSPC;
  246. for (i = 0; i < ndescs; i++) {
  247. u32 idx = uq->cached_prod++ & uq->mask;
  248. r[idx].addr = (id + i) << FRAME_SHIFT;
  249. r[idx].len = sizeof(pkt_data) - 1;
  250. }
  251. u_smp_wmb();
  252. *uq->producer = uq->cached_prod;
  253. return 0;
  254. }
  255. static inline int xq_deq(struct xdp_uqueue *uq,
  256. struct xdp_desc *descs,
  257. int ndescs)
  258. {
  259. struct xdp_desc *r = uq->ring;
  260. unsigned int idx;
  261. int i, entries;
  262. entries = xq_nb_avail(uq, ndescs);
  263. u_smp_rmb();
  264. for (i = 0; i < entries; i++) {
  265. idx = uq->cached_cons++ & uq->mask;
  266. descs[i] = r[idx];
  267. }
  268. if (entries > 0) {
  269. u_smp_wmb();
  270. *uq->consumer = uq->cached_cons;
  271. }
  272. return entries;
  273. }
  274. static void swap_mac_addresses(void *data)
  275. {
  276. struct ether_header *eth = (struct ether_header *)data;
  277. struct ether_addr *src_addr = (struct ether_addr *)&eth->ether_shost;
  278. struct ether_addr *dst_addr = (struct ether_addr *)&eth->ether_dhost;
  279. struct ether_addr tmp;
  280. tmp = *src_addr;
  281. *src_addr = *dst_addr;
  282. *dst_addr = tmp;
  283. }
  284. static void hex_dump(void *pkt, size_t length, u64 addr)
  285. {
  286. const unsigned char *address = (unsigned char *)pkt;
  287. const unsigned char *line = address;
  288. size_t line_size = 32;
  289. unsigned char c;
  290. char buf[32];
  291. int i = 0;
  292. if (!DEBUG_HEXDUMP)
  293. return;
  294. sprintf(buf, "addr=%llu", addr);
  295. printf("length = %zu\n", length);
  296. printf("%s | ", buf);
  297. while (length-- > 0) {
  298. printf("%02X ", *address++);
  299. if (!(++i % line_size) || (length == 0 && i % line_size)) {
  300. if (length == 0) {
  301. while (i++ % line_size)
  302. printf("__ ");
  303. }
  304. printf(" | "); /* right close */
  305. while (line < address) {
  306. c = *line++;
  307. printf("%c", (c < 33 || c == 255) ? 0x2E : c);
  308. }
  309. printf("\n");
  310. if (length > 0)
  311. printf("%s | ", buf);
  312. }
  313. }
  314. printf("\n");
  315. }
  316. static size_t gen_eth_frame(char *frame)
  317. {
  318. memcpy(frame, pkt_data, sizeof(pkt_data) - 1);
  319. return sizeof(pkt_data) - 1;
  320. }
  321. static struct xdp_umem *xdp_umem_configure(int sfd)
  322. {
  323. int fq_size = FQ_NUM_DESCS, cq_size = CQ_NUM_DESCS;
  324. struct xdp_mmap_offsets off;
  325. struct xdp_umem_reg mr;
  326. struct xdp_umem *umem;
  327. socklen_t optlen;
  328. void *bufs;
  329. umem = calloc(1, sizeof(*umem));
  330. lassert(umem);
  331. lassert(posix_memalign(&bufs, getpagesize(), /* PAGE_SIZE aligned */
  332. NUM_FRAMES * FRAME_SIZE) == 0);
  333. mr.addr = (__u64)bufs;
  334. mr.len = NUM_FRAMES * FRAME_SIZE;
  335. mr.chunk_size = FRAME_SIZE;
  336. mr.headroom = FRAME_HEADROOM;
  337. lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr)) == 0);
  338. lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_FILL_RING, &fq_size,
  339. sizeof(int)) == 0);
  340. lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_COMPLETION_RING, &cq_size,
  341. sizeof(int)) == 0);
  342. optlen = sizeof(off);
  343. lassert(getsockopt(sfd, SOL_XDP, XDP_MMAP_OFFSETS, &off,
  344. &optlen) == 0);
  345. umem->fq.map = mmap(0, off.fr.desc +
  346. FQ_NUM_DESCS * sizeof(u64),
  347. PROT_READ | PROT_WRITE,
  348. MAP_SHARED | MAP_POPULATE, sfd,
  349. XDP_UMEM_PGOFF_FILL_RING);
  350. lassert(umem->fq.map != MAP_FAILED);
  351. umem->fq.mask = FQ_NUM_DESCS - 1;
  352. umem->fq.size = FQ_NUM_DESCS;
  353. umem->fq.producer = umem->fq.map + off.fr.producer;
  354. umem->fq.consumer = umem->fq.map + off.fr.consumer;
  355. umem->fq.ring = umem->fq.map + off.fr.desc;
  356. umem->fq.cached_cons = FQ_NUM_DESCS;
  357. umem->cq.map = mmap(0, off.cr.desc +
  358. CQ_NUM_DESCS * sizeof(u64),
  359. PROT_READ | PROT_WRITE,
  360. MAP_SHARED | MAP_POPULATE, sfd,
  361. XDP_UMEM_PGOFF_COMPLETION_RING);
  362. lassert(umem->cq.map != MAP_FAILED);
  363. umem->cq.mask = CQ_NUM_DESCS - 1;
  364. umem->cq.size = CQ_NUM_DESCS;
  365. umem->cq.producer = umem->cq.map + off.cr.producer;
  366. umem->cq.consumer = umem->cq.map + off.cr.consumer;
  367. umem->cq.ring = umem->cq.map + off.cr.desc;
  368. umem->frames = bufs;
  369. umem->fd = sfd;
  370. if (opt_bench == BENCH_TXONLY) {
  371. int i;
  372. for (i = 0; i < NUM_FRAMES * FRAME_SIZE; i += FRAME_SIZE)
  373. (void)gen_eth_frame(&umem->frames[i]);
  374. }
  375. return umem;
  376. }
  377. static struct xdpsock *xsk_configure(struct xdp_umem *umem)
  378. {
  379. struct sockaddr_xdp sxdp = {};
  380. struct xdp_mmap_offsets off;
  381. int sfd, ndescs = NUM_DESCS;
  382. struct xdpsock *xsk;
  383. bool shared = true;
  384. socklen_t optlen;
  385. u64 i;
  386. sfd = socket(PF_XDP, SOCK_RAW, 0);
  387. lassert(sfd >= 0);
  388. xsk = calloc(1, sizeof(*xsk));
  389. lassert(xsk);
  390. xsk->sfd = sfd;
  391. xsk->outstanding_tx = 0;
  392. if (!umem) {
  393. shared = false;
  394. xsk->umem = xdp_umem_configure(sfd);
  395. } else {
  396. xsk->umem = umem;
  397. }
  398. lassert(setsockopt(sfd, SOL_XDP, XDP_RX_RING,
  399. &ndescs, sizeof(int)) == 0);
  400. lassert(setsockopt(sfd, SOL_XDP, XDP_TX_RING,
  401. &ndescs, sizeof(int)) == 0);
  402. optlen = sizeof(off);
  403. lassert(getsockopt(sfd, SOL_XDP, XDP_MMAP_OFFSETS, &off,
  404. &optlen) == 0);
  405. /* Rx */
  406. xsk->rx.map = mmap(NULL,
  407. off.rx.desc +
  408. NUM_DESCS * sizeof(struct xdp_desc),
  409. PROT_READ | PROT_WRITE,
  410. MAP_SHARED | MAP_POPULATE, sfd,
  411. XDP_PGOFF_RX_RING);
  412. lassert(xsk->rx.map != MAP_FAILED);
  413. if (!shared) {
  414. for (i = 0; i < NUM_DESCS * FRAME_SIZE; i += FRAME_SIZE)
  415. lassert(umem_fill_to_kernel(&xsk->umem->fq, &i, 1)
  416. == 0);
  417. }
  418. /* Tx */
  419. xsk->tx.map = mmap(NULL,
  420. off.tx.desc +
  421. NUM_DESCS * sizeof(struct xdp_desc),
  422. PROT_READ | PROT_WRITE,
  423. MAP_SHARED | MAP_POPULATE, sfd,
  424. XDP_PGOFF_TX_RING);
  425. lassert(xsk->tx.map != MAP_FAILED);
  426. xsk->rx.mask = NUM_DESCS - 1;
  427. xsk->rx.size = NUM_DESCS;
  428. xsk->rx.producer = xsk->rx.map + off.rx.producer;
  429. xsk->rx.consumer = xsk->rx.map + off.rx.consumer;
  430. xsk->rx.ring = xsk->rx.map + off.rx.desc;
  431. xsk->tx.mask = NUM_DESCS - 1;
  432. xsk->tx.size = NUM_DESCS;
  433. xsk->tx.producer = xsk->tx.map + off.tx.producer;
  434. xsk->tx.consumer = xsk->tx.map + off.tx.consumer;
  435. xsk->tx.ring = xsk->tx.map + off.tx.desc;
  436. xsk->tx.cached_cons = NUM_DESCS;
  437. sxdp.sxdp_family = PF_XDP;
  438. sxdp.sxdp_ifindex = opt_ifindex;
  439. sxdp.sxdp_queue_id = opt_queue;
  440. if (shared) {
  441. sxdp.sxdp_flags = XDP_SHARED_UMEM;
  442. sxdp.sxdp_shared_umem_fd = umem->fd;
  443. } else {
  444. sxdp.sxdp_flags = opt_xdp_bind_flags;
  445. }
  446. lassert(bind(sfd, (struct sockaddr *)&sxdp, sizeof(sxdp)) == 0);
  447. return xsk;
  448. }
  449. static void print_benchmark(bool running)
  450. {
  451. const char *bench_str = "INVALID";
  452. if (opt_bench == BENCH_RXDROP)
  453. bench_str = "rxdrop";
  454. else if (opt_bench == BENCH_TXONLY)
  455. bench_str = "txonly";
  456. else if (opt_bench == BENCH_L2FWD)
  457. bench_str = "l2fwd";
  458. printf("%s:%d %s ", opt_if, opt_queue, bench_str);
  459. if (opt_xdp_flags & XDP_FLAGS_SKB_MODE)
  460. printf("xdp-skb ");
  461. else if (opt_xdp_flags & XDP_FLAGS_DRV_MODE)
  462. printf("xdp-drv ");
  463. else
  464. printf(" ");
  465. if (opt_poll)
  466. printf("poll() ");
  467. if (running) {
  468. printf("running...");
  469. fflush(stdout);
  470. }
  471. }
  472. static void dump_stats(void)
  473. {
  474. unsigned long now = get_nsecs();
  475. long dt = now - prev_time;
  476. int i;
  477. prev_time = now;
  478. for (i = 0; i < num_socks && xsks[i]; i++) {
  479. char *fmt = "%-15s %'-11.0f %'-11lu\n";
  480. double rx_pps, tx_pps;
  481. rx_pps = (xsks[i]->rx_npkts - xsks[i]->prev_rx_npkts) *
  482. 1000000000. / dt;
  483. tx_pps = (xsks[i]->tx_npkts - xsks[i]->prev_tx_npkts) *
  484. 1000000000. / dt;
  485. printf("\n sock%d@", i);
  486. print_benchmark(false);
  487. printf("\n");
  488. printf("%-15s %-11s %-11s %-11.2f\n", "", "pps", "pkts",
  489. dt / 1000000000.);
  490. printf(fmt, "rx", rx_pps, xsks[i]->rx_npkts);
  491. printf(fmt, "tx", tx_pps, xsks[i]->tx_npkts);
  492. xsks[i]->prev_rx_npkts = xsks[i]->rx_npkts;
  493. xsks[i]->prev_tx_npkts = xsks[i]->tx_npkts;
  494. }
  495. }
  496. static void *poller(void *arg)
  497. {
  498. (void)arg;
  499. for (;;) {
  500. sleep(opt_interval);
  501. dump_stats();
  502. }
  503. return NULL;
  504. }
  505. static void int_exit(int sig)
  506. {
  507. (void)sig;
  508. dump_stats();
  509. bpf_set_link_xdp_fd(opt_ifindex, -1, opt_xdp_flags);
  510. exit(EXIT_SUCCESS);
  511. }
  512. static struct option long_options[] = {
  513. {"rxdrop", no_argument, 0, 'r'},
  514. {"txonly", no_argument, 0, 't'},
  515. {"l2fwd", no_argument, 0, 'l'},
  516. {"interface", required_argument, 0, 'i'},
  517. {"queue", required_argument, 0, 'q'},
  518. {"poll", no_argument, 0, 'p'},
  519. {"shared-buffer", no_argument, 0, 's'},
  520. {"xdp-skb", no_argument, 0, 'S'},
  521. {"xdp-native", no_argument, 0, 'N'},
  522. {"interval", required_argument, 0, 'n'},
  523. {"zero-copy", no_argument, 0, 'z'},
  524. {"copy", no_argument, 0, 'c'},
  525. {0, 0, 0, 0}
  526. };
  527. static void usage(const char *prog)
  528. {
  529. const char *str =
  530. " Usage: %s [OPTIONS]\n"
  531. " Options:\n"
  532. " -r, --rxdrop Discard all incoming packets (default)\n"
  533. " -t, --txonly Only send packets\n"
  534. " -l, --l2fwd MAC swap L2 forwarding\n"
  535. " -i, --interface=n Run on interface n\n"
  536. " -q, --queue=n Use queue n (default 0)\n"
  537. " -p, --poll Use poll syscall\n"
  538. " -s, --shared-buffer Use shared packet buffer\n"
  539. " -S, --xdp-skb=n Use XDP skb-mod\n"
  540. " -N, --xdp-native=n Enfore XDP native mode\n"
  541. " -n, --interval=n Specify statistics update interval (default 1 sec).\n"
  542. " -z, --zero-copy Force zero-copy mode.\n"
  543. " -c, --copy Force copy mode.\n"
  544. "\n";
  545. fprintf(stderr, str, prog);
  546. exit(EXIT_FAILURE);
  547. }
  548. static void parse_command_line(int argc, char **argv)
  549. {
  550. int option_index, c;
  551. opterr = 0;
  552. for (;;) {
  553. c = getopt_long(argc, argv, "rtli:q:psSNn:cz", long_options,
  554. &option_index);
  555. if (c == -1)
  556. break;
  557. switch (c) {
  558. case 'r':
  559. opt_bench = BENCH_RXDROP;
  560. break;
  561. case 't':
  562. opt_bench = BENCH_TXONLY;
  563. break;
  564. case 'l':
  565. opt_bench = BENCH_L2FWD;
  566. break;
  567. case 'i':
  568. opt_if = optarg;
  569. break;
  570. case 'q':
  571. opt_queue = atoi(optarg);
  572. break;
  573. case 's':
  574. opt_shared_packet_buffer = 1;
  575. break;
  576. case 'p':
  577. opt_poll = 1;
  578. break;
  579. case 'S':
  580. opt_xdp_flags |= XDP_FLAGS_SKB_MODE;
  581. opt_xdp_bind_flags |= XDP_COPY;
  582. break;
  583. case 'N':
  584. opt_xdp_flags |= XDP_FLAGS_DRV_MODE;
  585. break;
  586. case 'n':
  587. opt_interval = atoi(optarg);
  588. break;
  589. case 'z':
  590. opt_xdp_bind_flags |= XDP_ZEROCOPY;
  591. break;
  592. case 'c':
  593. opt_xdp_bind_flags |= XDP_COPY;
  594. break;
  595. default:
  596. usage(basename(argv[0]));
  597. }
  598. }
  599. opt_ifindex = if_nametoindex(opt_if);
  600. if (!opt_ifindex) {
  601. fprintf(stderr, "ERROR: interface \"%s\" does not exist\n",
  602. opt_if);
  603. usage(basename(argv[0]));
  604. }
  605. }
  606. static void kick_tx(int fd)
  607. {
  608. int ret;
  609. ret = sendto(fd, NULL, 0, MSG_DONTWAIT, NULL, 0);
  610. if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || errno == EBUSY)
  611. return;
  612. lassert(0);
  613. }
  614. static inline void complete_tx_l2fwd(struct xdpsock *xsk)
  615. {
  616. u64 descs[BATCH_SIZE];
  617. unsigned int rcvd;
  618. size_t ndescs;
  619. if (!xsk->outstanding_tx)
  620. return;
  621. kick_tx(xsk->sfd);
  622. ndescs = (xsk->outstanding_tx > BATCH_SIZE) ? BATCH_SIZE :
  623. xsk->outstanding_tx;
  624. /* re-add completed Tx buffers */
  625. rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, ndescs);
  626. if (rcvd > 0) {
  627. umem_fill_to_kernel(&xsk->umem->fq, descs, rcvd);
  628. xsk->outstanding_tx -= rcvd;
  629. xsk->tx_npkts += rcvd;
  630. }
  631. }
  632. static inline void complete_tx_only(struct xdpsock *xsk)
  633. {
  634. u64 descs[BATCH_SIZE];
  635. unsigned int rcvd;
  636. if (!xsk->outstanding_tx)
  637. return;
  638. kick_tx(xsk->sfd);
  639. rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, BATCH_SIZE);
  640. if (rcvd > 0) {
  641. xsk->outstanding_tx -= rcvd;
  642. xsk->tx_npkts += rcvd;
  643. }
  644. }
  645. static void rx_drop(struct xdpsock *xsk)
  646. {
  647. struct xdp_desc descs[BATCH_SIZE];
  648. unsigned int rcvd, i;
  649. rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
  650. if (!rcvd)
  651. return;
  652. for (i = 0; i < rcvd; i++) {
  653. char *pkt = xq_get_data(xsk, descs[i].addr);
  654. hex_dump(pkt, descs[i].len, descs[i].addr);
  655. }
  656. xsk->rx_npkts += rcvd;
  657. umem_fill_to_kernel_ex(&xsk->umem->fq, descs, rcvd);
  658. }
  659. static void rx_drop_all(void)
  660. {
  661. struct pollfd fds[MAX_SOCKS + 1];
  662. int i, ret, timeout, nfds = 1;
  663. memset(fds, 0, sizeof(fds));
  664. for (i = 0; i < num_socks; i++) {
  665. fds[i].fd = xsks[i]->sfd;
  666. fds[i].events = POLLIN;
  667. timeout = 1000; /* 1sn */
  668. }
  669. for (;;) {
  670. if (opt_poll) {
  671. ret = poll(fds, nfds, timeout);
  672. if (ret <= 0)
  673. continue;
  674. }
  675. for (i = 0; i < num_socks; i++)
  676. rx_drop(xsks[i]);
  677. }
  678. }
  679. static void tx_only(struct xdpsock *xsk)
  680. {
  681. int timeout, ret, nfds = 1;
  682. struct pollfd fds[nfds + 1];
  683. unsigned int idx = 0;
  684. memset(fds, 0, sizeof(fds));
  685. fds[0].fd = xsk->sfd;
  686. fds[0].events = POLLOUT;
  687. timeout = 1000; /* 1sn */
  688. for (;;) {
  689. if (opt_poll) {
  690. ret = poll(fds, nfds, timeout);
  691. if (ret <= 0)
  692. continue;
  693. if (fds[0].fd != xsk->sfd ||
  694. !(fds[0].revents & POLLOUT))
  695. continue;
  696. }
  697. if (xq_nb_free(&xsk->tx, BATCH_SIZE) >= BATCH_SIZE) {
  698. lassert(xq_enq_tx_only(&xsk->tx, idx, BATCH_SIZE) == 0);
  699. xsk->outstanding_tx += BATCH_SIZE;
  700. idx += BATCH_SIZE;
  701. idx %= NUM_FRAMES;
  702. }
  703. complete_tx_only(xsk);
  704. }
  705. }
  706. static void l2fwd(struct xdpsock *xsk)
  707. {
  708. for (;;) {
  709. struct xdp_desc descs[BATCH_SIZE];
  710. unsigned int rcvd, i;
  711. int ret;
  712. for (;;) {
  713. complete_tx_l2fwd(xsk);
  714. rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
  715. if (rcvd > 0)
  716. break;
  717. }
  718. for (i = 0; i < rcvd; i++) {
  719. char *pkt = xq_get_data(xsk, descs[i].addr);
  720. swap_mac_addresses(pkt);
  721. hex_dump(pkt, descs[i].len, descs[i].addr);
  722. }
  723. xsk->rx_npkts += rcvd;
  724. ret = xq_enq(&xsk->tx, descs, rcvd);
  725. lassert(ret == 0);
  726. xsk->outstanding_tx += rcvd;
  727. }
  728. }
  729. int main(int argc, char **argv)
  730. {
  731. struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
  732. struct bpf_prog_load_attr prog_load_attr = {
  733. .prog_type = BPF_PROG_TYPE_XDP,
  734. };
  735. int prog_fd, qidconf_map, xsks_map;
  736. struct bpf_object *obj;
  737. char xdp_filename[256];
  738. struct bpf_map *map;
  739. int i, ret, key = 0;
  740. pthread_t pt;
  741. parse_command_line(argc, argv);
  742. if (setrlimit(RLIMIT_MEMLOCK, &r)) {
  743. fprintf(stderr, "ERROR: setrlimit(RLIMIT_MEMLOCK) \"%s\"\n",
  744. strerror(errno));
  745. exit(EXIT_FAILURE);
  746. }
  747. snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.o", argv[0]);
  748. prog_load_attr.file = xdp_filename;
  749. if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
  750. exit(EXIT_FAILURE);
  751. if (prog_fd < 0) {
  752. fprintf(stderr, "ERROR: no program found: %s\n",
  753. strerror(prog_fd));
  754. exit(EXIT_FAILURE);
  755. }
  756. map = bpf_object__find_map_by_name(obj, "qidconf_map");
  757. qidconf_map = bpf_map__fd(map);
  758. if (qidconf_map < 0) {
  759. fprintf(stderr, "ERROR: no qidconf map found: %s\n",
  760. strerror(qidconf_map));
  761. exit(EXIT_FAILURE);
  762. }
  763. map = bpf_object__find_map_by_name(obj, "xsks_map");
  764. xsks_map = bpf_map__fd(map);
  765. if (xsks_map < 0) {
  766. fprintf(stderr, "ERROR: no xsks map found: %s\n",
  767. strerror(xsks_map));
  768. exit(EXIT_FAILURE);
  769. }
  770. if (bpf_set_link_xdp_fd(opt_ifindex, prog_fd, opt_xdp_flags) < 0) {
  771. fprintf(stderr, "ERROR: link set xdp fd failed\n");
  772. exit(EXIT_FAILURE);
  773. }
  774. ret = bpf_map_update_elem(qidconf_map, &key, &opt_queue, 0);
  775. if (ret) {
  776. fprintf(stderr, "ERROR: bpf_map_update_elem qidconf\n");
  777. exit(EXIT_FAILURE);
  778. }
  779. /* Create sockets... */
  780. xsks[num_socks++] = xsk_configure(NULL);
  781. #if RR_LB
  782. for (i = 0; i < MAX_SOCKS - 1; i++)
  783. xsks[num_socks++] = xsk_configure(xsks[0]->umem);
  784. #endif
  785. /* ...and insert them into the map. */
  786. for (i = 0; i < num_socks; i++) {
  787. key = i;
  788. ret = bpf_map_update_elem(xsks_map, &key, &xsks[i]->sfd, 0);
  789. if (ret) {
  790. fprintf(stderr, "ERROR: bpf_map_update_elem %d\n", i);
  791. exit(EXIT_FAILURE);
  792. }
  793. }
  794. signal(SIGINT, int_exit);
  795. signal(SIGTERM, int_exit);
  796. signal(SIGABRT, int_exit);
  797. setlocale(LC_ALL, "");
  798. ret = pthread_create(&pt, NULL, poller, NULL);
  799. lassert(ret == 0);
  800. prev_time = get_nsecs();
  801. if (opt_bench == BENCH_RXDROP)
  802. rx_drop_all();
  803. else if (opt_bench == BENCH_TXONLY)
  804. tx_only(xsks[0]);
  805. else
  806. l2fwd(xsks[0]);
  807. return 0;
  808. }