test_lpm_map.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. /*
  2. * Randomized tests for eBPF longest-prefix-match maps
  3. *
  4. * This program runs randomized tests against the lpm-bpf-map. It implements a
  5. * "Trivial Longest Prefix Match" (tlpm) based on simple, linear, singly linked
  6. * lists. The implementation should be pretty straightforward.
  7. *
  8. * Based on tlpm, this inserts randomized data into bpf-lpm-maps and verifies
  9. * the trie-based bpf-map implementation behaves the same way as tlpm.
  10. */
  11. #include <assert.h>
  12. #include <errno.h>
  13. #include <inttypes.h>
  14. #include <linux/bpf.h>
  15. #include <stdio.h>
  16. #include <stdlib.h>
  17. #include <string.h>
  18. #include <time.h>
  19. #include <unistd.h>
  20. #include <arpa/inet.h>
  21. #include <sys/time.h>
  22. #include <sys/resource.h>
  23. #include <bpf/bpf.h>
  24. #include "bpf_util.h"
  25. struct tlpm_node {
  26. struct tlpm_node *next;
  27. size_t n_bits;
  28. uint8_t key[];
  29. };
  30. static struct tlpm_node *tlpm_match(struct tlpm_node *list,
  31. const uint8_t *key,
  32. size_t n_bits);
  33. static struct tlpm_node *tlpm_add(struct tlpm_node *list,
  34. const uint8_t *key,
  35. size_t n_bits)
  36. {
  37. struct tlpm_node *node;
  38. size_t n;
  39. n = (n_bits + 7) / 8;
  40. /* 'overwrite' an equivalent entry if one already exists */
  41. node = tlpm_match(list, key, n_bits);
  42. if (node && node->n_bits == n_bits) {
  43. memcpy(node->key, key, n);
  44. return list;
  45. }
  46. /* add new entry with @key/@n_bits to @list and return new head */
  47. node = malloc(sizeof(*node) + n);
  48. assert(node);
  49. node->next = list;
  50. node->n_bits = n_bits;
  51. memcpy(node->key, key, n);
  52. return node;
  53. }
  54. static void tlpm_clear(struct tlpm_node *list)
  55. {
  56. struct tlpm_node *node;
  57. /* free all entries in @list */
  58. while ((node = list)) {
  59. list = list->next;
  60. free(node);
  61. }
  62. }
  63. static struct tlpm_node *tlpm_match(struct tlpm_node *list,
  64. const uint8_t *key,
  65. size_t n_bits)
  66. {
  67. struct tlpm_node *best = NULL;
  68. size_t i;
  69. /* Perform longest prefix-match on @key/@n_bits. That is, iterate all
  70. * entries and match each prefix against @key. Remember the "best"
  71. * entry we find (i.e., the longest prefix that matches) and return it
  72. * to the caller when done.
  73. */
  74. for ( ; list; list = list->next) {
  75. for (i = 0; i < n_bits && i < list->n_bits; ++i) {
  76. if ((key[i / 8] & (1 << (7 - i % 8))) !=
  77. (list->key[i / 8] & (1 << (7 - i % 8))))
  78. break;
  79. }
  80. if (i >= list->n_bits) {
  81. if (!best || i > best->n_bits)
  82. best = list;
  83. }
  84. }
  85. return best;
  86. }
  87. static void test_lpm_basic(void)
  88. {
  89. struct tlpm_node *list = NULL, *t1, *t2;
  90. /* very basic, static tests to verify tlpm works as expected */
  91. assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 8));
  92. t1 = list = tlpm_add(list, (uint8_t[]){ 0xff }, 8);
  93. assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8));
  94. assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16));
  95. assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0x00 }, 16));
  96. assert(!tlpm_match(list, (uint8_t[]){ 0x7f }, 8));
  97. assert(!tlpm_match(list, (uint8_t[]){ 0xfe }, 8));
  98. assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 7));
  99. t2 = list = tlpm_add(list, (uint8_t[]){ 0xff, 0xff }, 16);
  100. assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8));
  101. assert(t2 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16));
  102. assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 15));
  103. assert(!tlpm_match(list, (uint8_t[]){ 0x7f, 0xff }, 16));
  104. tlpm_clear(list);
  105. }
  106. static void test_lpm_order(void)
  107. {
  108. struct tlpm_node *t1, *t2, *l1 = NULL, *l2 = NULL;
  109. size_t i, j;
  110. /* Verify the tlpm implementation works correctly regardless of the
  111. * order of entries. Insert a random set of entries into @l1, and copy
  112. * the same data in reverse order into @l2. Then verify a lookup of
  113. * random keys will yield the same result in both sets.
  114. */
  115. for (i = 0; i < (1 << 12); ++i)
  116. l1 = tlpm_add(l1, (uint8_t[]){
  117. rand() % 0xff,
  118. rand() % 0xff,
  119. }, rand() % 16 + 1);
  120. for (t1 = l1; t1; t1 = t1->next)
  121. l2 = tlpm_add(l2, t1->key, t1->n_bits);
  122. for (i = 0; i < (1 << 8); ++i) {
  123. uint8_t key[] = { rand() % 0xff, rand() % 0xff };
  124. t1 = tlpm_match(l1, key, 16);
  125. t2 = tlpm_match(l2, key, 16);
  126. assert(!t1 == !t2);
  127. if (t1) {
  128. assert(t1->n_bits == t2->n_bits);
  129. for (j = 0; j < t1->n_bits; ++j)
  130. assert((t1->key[j / 8] & (1 << (7 - j % 8))) ==
  131. (t2->key[j / 8] & (1 << (7 - j % 8))));
  132. }
  133. }
  134. tlpm_clear(l1);
  135. tlpm_clear(l2);
  136. }
  137. static void test_lpm_map(int keysize)
  138. {
  139. size_t i, j, n_matches, n_nodes, n_lookups;
  140. struct tlpm_node *t, *list = NULL;
  141. struct bpf_lpm_trie_key *key;
  142. uint8_t *data, *value;
  143. int r, map;
  144. /* Compare behavior of tlpm vs. bpf-lpm. Create a randomized set of
  145. * prefixes and insert it into both tlpm and bpf-lpm. Then run some
  146. * randomized lookups and verify both maps return the same result.
  147. */
  148. n_matches = 0;
  149. n_nodes = 1 << 8;
  150. n_lookups = 1 << 16;
  151. data = alloca(keysize);
  152. memset(data, 0, keysize);
  153. value = alloca(keysize + 1);
  154. memset(value, 0, keysize + 1);
  155. key = alloca(sizeof(*key) + keysize);
  156. memset(key, 0, sizeof(*key) + keysize);
  157. map = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE,
  158. sizeof(*key) + keysize,
  159. keysize + 1,
  160. 4096,
  161. BPF_F_NO_PREALLOC);
  162. assert(map >= 0);
  163. for (i = 0; i < n_nodes; ++i) {
  164. for (j = 0; j < keysize; ++j)
  165. value[j] = rand() & 0xff;
  166. value[keysize] = rand() % (8 * keysize + 1);
  167. list = tlpm_add(list, value, value[keysize]);
  168. key->prefixlen = value[keysize];
  169. memcpy(key->data, value, keysize);
  170. r = bpf_map_update_elem(map, key, value, 0);
  171. assert(!r);
  172. }
  173. for (i = 0; i < n_lookups; ++i) {
  174. for (j = 0; j < keysize; ++j)
  175. data[j] = rand() & 0xff;
  176. t = tlpm_match(list, data, 8 * keysize);
  177. key->prefixlen = 8 * keysize;
  178. memcpy(key->data, data, keysize);
  179. r = bpf_map_lookup_elem(map, key, value);
  180. assert(!r || errno == ENOENT);
  181. assert(!t == !!r);
  182. if (t) {
  183. ++n_matches;
  184. assert(t->n_bits == value[keysize]);
  185. for (j = 0; j < t->n_bits; ++j)
  186. assert((t->key[j / 8] & (1 << (7 - j % 8))) ==
  187. (value[j / 8] & (1 << (7 - j % 8))));
  188. }
  189. }
  190. close(map);
  191. tlpm_clear(list);
  192. /* With 255 random nodes in the map, we are pretty likely to match
  193. * something on every lookup. For statistics, use this:
  194. *
  195. * printf(" nodes: %zu\n"
  196. * "lookups: %zu\n"
  197. * "matches: %zu\n", n_nodes, n_lookups, n_matches);
  198. */
  199. }
  200. /* Test the implementation with some 'real world' examples */
  201. static void test_lpm_ipaddr(void)
  202. {
  203. struct bpf_lpm_trie_key *key_ipv4;
  204. struct bpf_lpm_trie_key *key_ipv6;
  205. size_t key_size_ipv4;
  206. size_t key_size_ipv6;
  207. int map_fd_ipv4;
  208. int map_fd_ipv6;
  209. __u64 value;
  210. key_size_ipv4 = sizeof(*key_ipv4) + sizeof(__u32);
  211. key_size_ipv6 = sizeof(*key_ipv6) + sizeof(__u32) * 4;
  212. key_ipv4 = alloca(key_size_ipv4);
  213. key_ipv6 = alloca(key_size_ipv6);
  214. map_fd_ipv4 = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE,
  215. key_size_ipv4, sizeof(value),
  216. 100, BPF_F_NO_PREALLOC);
  217. assert(map_fd_ipv4 >= 0);
  218. map_fd_ipv6 = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE,
  219. key_size_ipv6, sizeof(value),
  220. 100, BPF_F_NO_PREALLOC);
  221. assert(map_fd_ipv6 >= 0);
  222. /* Fill data some IPv4 and IPv6 address ranges */
  223. value = 1;
  224. key_ipv4->prefixlen = 16;
  225. inet_pton(AF_INET, "192.168.0.0", key_ipv4->data);
  226. assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
  227. value = 2;
  228. key_ipv4->prefixlen = 24;
  229. inet_pton(AF_INET, "192.168.0.0", key_ipv4->data);
  230. assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
  231. value = 3;
  232. key_ipv4->prefixlen = 24;
  233. inet_pton(AF_INET, "192.168.128.0", key_ipv4->data);
  234. assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
  235. value = 5;
  236. key_ipv4->prefixlen = 24;
  237. inet_pton(AF_INET, "192.168.1.0", key_ipv4->data);
  238. assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
  239. value = 4;
  240. key_ipv4->prefixlen = 23;
  241. inet_pton(AF_INET, "192.168.0.0", key_ipv4->data);
  242. assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
  243. value = 0xdeadbeef;
  244. key_ipv6->prefixlen = 64;
  245. inet_pton(AF_INET6, "2a00:1450:4001:814::200e", key_ipv6->data);
  246. assert(bpf_map_update_elem(map_fd_ipv6, key_ipv6, &value, 0) == 0);
  247. /* Set tprefixlen to maximum for lookups */
  248. key_ipv4->prefixlen = 32;
  249. key_ipv6->prefixlen = 128;
  250. /* Test some lookups that should come back with a value */
  251. inet_pton(AF_INET, "192.168.128.23", key_ipv4->data);
  252. assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == 0);
  253. assert(value == 3);
  254. inet_pton(AF_INET, "192.168.0.1", key_ipv4->data);
  255. assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == 0);
  256. assert(value == 2);
  257. inet_pton(AF_INET6, "2a00:1450:4001:814::", key_ipv6->data);
  258. assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == 0);
  259. assert(value == 0xdeadbeef);
  260. inet_pton(AF_INET6, "2a00:1450:4001:814::1", key_ipv6->data);
  261. assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == 0);
  262. assert(value == 0xdeadbeef);
  263. /* Test some lookups that should not match any entry */
  264. inet_pton(AF_INET, "10.0.0.1", key_ipv4->data);
  265. assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -1 &&
  266. errno == ENOENT);
  267. inet_pton(AF_INET, "11.11.11.11", key_ipv4->data);
  268. assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -1 &&
  269. errno == ENOENT);
  270. inet_pton(AF_INET6, "2a00:ffff::", key_ipv6->data);
  271. assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == -1 &&
  272. errno == ENOENT);
  273. close(map_fd_ipv4);
  274. close(map_fd_ipv6);
  275. }
  276. int main(void)
  277. {
  278. struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
  279. int i, ret;
  280. /* we want predictable, pseudo random tests */
  281. srand(0xf00ba1);
  282. /* allow unlimited locked memory */
  283. ret = setrlimit(RLIMIT_MEMLOCK, &limit);
  284. if (ret < 0)
  285. perror("Unable to lift memlock rlimit");
  286. test_lpm_basic();
  287. test_lpm_order();
  288. /* Test with 8, 16, 24, 32, ... 128 bit prefix length */
  289. for (i = 1; i <= 16; ++i)
  290. test_lpm_map(i);
  291. test_lpm_ipaddr();
  292. printf("test_lpm: OK\n");
  293. return 0;
  294. }