flow_table.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647
  1. /*
  2. * Copyright (c) 2007-2013 Nicira, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of version 2 of the GNU General Public
  6. * License as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; if not, write to the Free Software
  15. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  16. * 02110-1301, USA
  17. */
  18. #include "flow.h"
  19. #include "datapath.h"
  20. #include <linux/uaccess.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/etherdevice.h>
  23. #include <linux/if_ether.h>
  24. #include <linux/if_vlan.h>
  25. #include <net/llc_pdu.h>
  26. #include <linux/kernel.h>
  27. #include <linux/hash.h>
  28. #include <linux/jiffies.h>
  29. #include <linux/llc.h>
  30. #include <linux/module.h>
  31. #include <linux/in.h>
  32. #include <linux/rcupdate.h>
  33. #include <linux/if_arp.h>
  34. #include <linux/ip.h>
  35. #include <linux/ipv6.h>
  36. #include <linux/sctp.h>
  37. #include <linux/tcp.h>
  38. #include <linux/udp.h>
  39. #include <linux/icmp.h>
  40. #include <linux/icmpv6.h>
  41. #include <linux/rculist.h>
  42. #include <net/ip.h>
  43. #include <net/ipv6.h>
  44. #include <net/ndisc.h>
  45. #define TBL_MIN_BUCKETS 1024
  46. #define REHASH_INTERVAL (10 * 60 * HZ)
  47. static struct kmem_cache *flow_cache;
  48. struct kmem_cache *flow_stats_cache __read_mostly;
  49. static u16 range_n_bytes(const struct sw_flow_key_range *range)
  50. {
  51. return range->end - range->start;
  52. }
  53. void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
  54. const struct sw_flow_mask *mask)
  55. {
  56. const long *m = (const long *)((const u8 *)&mask->key +
  57. mask->range.start);
  58. const long *s = (const long *)((const u8 *)src +
  59. mask->range.start);
  60. long *d = (long *)((u8 *)dst + mask->range.start);
  61. int i;
  62. /* The memory outside of the 'mask->range' are not set since
  63. * further operations on 'dst' only uses contents within
  64. * 'mask->range'.
  65. */
  66. for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
  67. *d++ = *s++ & *m++;
  68. }
  69. struct sw_flow *ovs_flow_alloc(void)
  70. {
  71. struct sw_flow *flow;
  72. struct flow_stats *stats;
  73. int node;
  74. flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
  75. if (!flow)
  76. return ERR_PTR(-ENOMEM);
  77. flow->sf_acts = NULL;
  78. flow->mask = NULL;
  79. flow->stats_last_writer = NUMA_NO_NODE;
  80. /* Initialize the default stat node. */
  81. stats = kmem_cache_alloc_node(flow_stats_cache,
  82. GFP_KERNEL | __GFP_ZERO, 0);
  83. if (!stats)
  84. goto err;
  85. spin_lock_init(&stats->lock);
  86. RCU_INIT_POINTER(flow->stats[0], stats);
  87. for_each_node(node)
  88. if (node != 0)
  89. RCU_INIT_POINTER(flow->stats[node], NULL);
  90. return flow;
  91. err:
  92. kmem_cache_free(flow_cache, flow);
  93. return ERR_PTR(-ENOMEM);
  94. }
  95. int ovs_flow_tbl_count(struct flow_table *table)
  96. {
  97. return table->count;
  98. }
  99. static struct flex_array *alloc_buckets(unsigned int n_buckets)
  100. {
  101. struct flex_array *buckets;
  102. int i, err;
  103. buckets = flex_array_alloc(sizeof(struct hlist_head),
  104. n_buckets, GFP_KERNEL);
  105. if (!buckets)
  106. return NULL;
  107. err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
  108. if (err) {
  109. flex_array_free(buckets);
  110. return NULL;
  111. }
  112. for (i = 0; i < n_buckets; i++)
  113. INIT_HLIST_HEAD((struct hlist_head *)
  114. flex_array_get(buckets, i));
  115. return buckets;
  116. }
  117. static void flow_free(struct sw_flow *flow)
  118. {
  119. int node;
  120. kfree((struct sw_flow_actions __force *)flow->sf_acts);
  121. for_each_node(node)
  122. if (flow->stats[node])
  123. kmem_cache_free(flow_stats_cache,
  124. (struct flow_stats __force *)flow->stats[node]);
  125. kmem_cache_free(flow_cache, flow);
  126. }
  127. static void rcu_free_flow_callback(struct rcu_head *rcu)
  128. {
  129. struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
  130. flow_free(flow);
  131. }
  132. void ovs_flow_free(struct sw_flow *flow, bool deferred)
  133. {
  134. if (!flow)
  135. return;
  136. if (deferred)
  137. call_rcu(&flow->rcu, rcu_free_flow_callback);
  138. else
  139. flow_free(flow);
  140. }
  141. static void free_buckets(struct flex_array *buckets)
  142. {
  143. flex_array_free(buckets);
  144. }
  145. static void __table_instance_destroy(struct table_instance *ti)
  146. {
  147. free_buckets(ti->buckets);
  148. kfree(ti);
  149. }
  150. static struct table_instance *table_instance_alloc(int new_size)
  151. {
  152. struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
  153. if (!ti)
  154. return NULL;
  155. ti->buckets = alloc_buckets(new_size);
  156. if (!ti->buckets) {
  157. kfree(ti);
  158. return NULL;
  159. }
  160. ti->n_buckets = new_size;
  161. ti->node_ver = 0;
  162. ti->keep_flows = false;
  163. get_random_bytes(&ti->hash_seed, sizeof(u32));
  164. return ti;
  165. }
  166. int ovs_flow_tbl_init(struct flow_table *table)
  167. {
  168. struct table_instance *ti;
  169. ti = table_instance_alloc(TBL_MIN_BUCKETS);
  170. if (!ti)
  171. return -ENOMEM;
  172. rcu_assign_pointer(table->ti, ti);
  173. INIT_LIST_HEAD(&table->mask_list);
  174. table->last_rehash = jiffies;
  175. table->count = 0;
  176. return 0;
  177. }
  178. static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
  179. {
  180. struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
  181. __table_instance_destroy(ti);
  182. }
  183. static void table_instance_destroy(struct table_instance *ti, bool deferred)
  184. {
  185. int i;
  186. if (!ti)
  187. return;
  188. if (ti->keep_flows)
  189. goto skip_flows;
  190. for (i = 0; i < ti->n_buckets; i++) {
  191. struct sw_flow *flow;
  192. struct hlist_head *head = flex_array_get(ti->buckets, i);
  193. struct hlist_node *n;
  194. int ver = ti->node_ver;
  195. hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
  196. hlist_del_rcu(&flow->hash_node[ver]);
  197. ovs_flow_free(flow, deferred);
  198. }
  199. }
  200. skip_flows:
  201. if (deferred)
  202. call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
  203. else
  204. __table_instance_destroy(ti);
  205. }
  206. void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
  207. {
  208. struct table_instance *ti = ovsl_dereference(table->ti);
  209. table_instance_destroy(ti, deferred);
  210. }
  211. struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
  212. u32 *bucket, u32 *last)
  213. {
  214. struct sw_flow *flow;
  215. struct hlist_head *head;
  216. int ver;
  217. int i;
  218. ver = ti->node_ver;
  219. while (*bucket < ti->n_buckets) {
  220. i = 0;
  221. head = flex_array_get(ti->buckets, *bucket);
  222. hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
  223. if (i < *last) {
  224. i++;
  225. continue;
  226. }
  227. *last = i + 1;
  228. return flow;
  229. }
  230. (*bucket)++;
  231. *last = 0;
  232. }
  233. return NULL;
  234. }
  235. static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
  236. {
  237. hash = jhash_1word(hash, ti->hash_seed);
  238. return flex_array_get(ti->buckets,
  239. (hash & (ti->n_buckets - 1)));
  240. }
  241. static void table_instance_insert(struct table_instance *ti, struct sw_flow *flow)
  242. {
  243. struct hlist_head *head;
  244. head = find_bucket(ti, flow->hash);
  245. hlist_add_head_rcu(&flow->hash_node[ti->node_ver], head);
  246. }
  247. static void flow_table_copy_flows(struct table_instance *old,
  248. struct table_instance *new)
  249. {
  250. int old_ver;
  251. int i;
  252. old_ver = old->node_ver;
  253. new->node_ver = !old_ver;
  254. /* Insert in new table. */
  255. for (i = 0; i < old->n_buckets; i++) {
  256. struct sw_flow *flow;
  257. struct hlist_head *head;
  258. head = flex_array_get(old->buckets, i);
  259. hlist_for_each_entry(flow, head, hash_node[old_ver])
  260. table_instance_insert(new, flow);
  261. }
  262. old->keep_flows = true;
  263. }
  264. static struct table_instance *table_instance_rehash(struct table_instance *ti,
  265. int n_buckets)
  266. {
  267. struct table_instance *new_ti;
  268. new_ti = table_instance_alloc(n_buckets);
  269. if (!new_ti)
  270. return NULL;
  271. flow_table_copy_flows(ti, new_ti);
  272. return new_ti;
  273. }
  274. int ovs_flow_tbl_flush(struct flow_table *flow_table)
  275. {
  276. struct table_instance *old_ti;
  277. struct table_instance *new_ti;
  278. old_ti = ovsl_dereference(flow_table->ti);
  279. new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
  280. if (!new_ti)
  281. return -ENOMEM;
  282. rcu_assign_pointer(flow_table->ti, new_ti);
  283. flow_table->last_rehash = jiffies;
  284. flow_table->count = 0;
  285. table_instance_destroy(old_ti, true);
  286. return 0;
  287. }
  288. static u32 flow_hash(const struct sw_flow_key *key, int key_start,
  289. int key_end)
  290. {
  291. const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
  292. int hash_u32s = (key_end - key_start) >> 2;
  293. /* Make sure number of hash bytes are multiple of u32. */
  294. BUILD_BUG_ON(sizeof(long) % sizeof(u32));
  295. return arch_fast_hash2(hash_key, hash_u32s, 0);
  296. }
  297. static int flow_key_start(const struct sw_flow_key *key)
  298. {
  299. if (key->tun_key.ipv4_dst)
  300. return 0;
  301. else
  302. return rounddown(offsetof(struct sw_flow_key, phy),
  303. sizeof(long));
  304. }
  305. static bool cmp_key(const struct sw_flow_key *key1,
  306. const struct sw_flow_key *key2,
  307. int key_start, int key_end)
  308. {
  309. const long *cp1 = (const long *)((const u8 *)key1 + key_start);
  310. const long *cp2 = (const long *)((const u8 *)key2 + key_start);
  311. long diffs = 0;
  312. int i;
  313. for (i = key_start; i < key_end; i += sizeof(long))
  314. diffs |= *cp1++ ^ *cp2++;
  315. return diffs == 0;
  316. }
  317. static bool flow_cmp_masked_key(const struct sw_flow *flow,
  318. const struct sw_flow_key *key,
  319. int key_start, int key_end)
  320. {
  321. return cmp_key(&flow->key, key, key_start, key_end);
  322. }
  323. bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
  324. struct sw_flow_match *match)
  325. {
  326. struct sw_flow_key *key = match->key;
  327. int key_start = flow_key_start(key);
  328. int key_end = match->range.end;
  329. return cmp_key(&flow->unmasked_key, key, key_start, key_end);
  330. }
  331. static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
  332. const struct sw_flow_key *unmasked,
  333. struct sw_flow_mask *mask)
  334. {
  335. struct sw_flow *flow;
  336. struct hlist_head *head;
  337. int key_start = mask->range.start;
  338. int key_end = mask->range.end;
  339. u32 hash;
  340. struct sw_flow_key masked_key;
  341. ovs_flow_mask_key(&masked_key, unmasked, mask);
  342. hash = flow_hash(&masked_key, key_start, key_end);
  343. head = find_bucket(ti, hash);
  344. hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) {
  345. if (flow->mask == mask && flow->hash == hash &&
  346. flow_cmp_masked_key(flow, &masked_key,
  347. key_start, key_end))
  348. return flow;
  349. }
  350. return NULL;
  351. }
  352. struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
  353. const struct sw_flow_key *key,
  354. u32 *n_mask_hit)
  355. {
  356. struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
  357. struct sw_flow_mask *mask;
  358. struct sw_flow *flow;
  359. *n_mask_hit = 0;
  360. list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
  361. (*n_mask_hit)++;
  362. flow = masked_flow_lookup(ti, key, mask);
  363. if (flow) /* Found */
  364. return flow;
  365. }
  366. return NULL;
  367. }
  368. struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
  369. const struct sw_flow_key *key)
  370. {
  371. u32 __always_unused n_mask_hit;
  372. return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
  373. }
  374. struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
  375. struct sw_flow_match *match)
  376. {
  377. struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
  378. struct sw_flow_mask *mask;
  379. struct sw_flow *flow;
  380. /* Always called under ovs-mutex. */
  381. list_for_each_entry(mask, &tbl->mask_list, list) {
  382. flow = masked_flow_lookup(ti, match->key, mask);
  383. if (flow && ovs_flow_cmp_unmasked_key(flow, match)) /* Found */
  384. return flow;
  385. }
  386. return NULL;
  387. }
  388. int ovs_flow_tbl_num_masks(const struct flow_table *table)
  389. {
  390. struct sw_flow_mask *mask;
  391. int num = 0;
  392. list_for_each_entry(mask, &table->mask_list, list)
  393. num++;
  394. return num;
  395. }
  396. static struct table_instance *table_instance_expand(struct table_instance *ti)
  397. {
  398. return table_instance_rehash(ti, ti->n_buckets * 2);
  399. }
  400. /* Remove 'mask' from the mask list, if it is not needed any more. */
  401. static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
  402. {
  403. if (mask) {
  404. /* ovs-lock is required to protect mask-refcount and
  405. * mask list.
  406. */
  407. ASSERT_OVSL();
  408. BUG_ON(!mask->ref_count);
  409. mask->ref_count--;
  410. if (!mask->ref_count) {
  411. list_del_rcu(&mask->list);
  412. kfree_rcu(mask, rcu);
  413. }
  414. }
  415. }
  416. /* Must be called with OVS mutex held. */
  417. void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
  418. {
  419. struct table_instance *ti = ovsl_dereference(table->ti);
  420. BUG_ON(table->count == 0);
  421. hlist_del_rcu(&flow->hash_node[ti->node_ver]);
  422. table->count--;
  423. /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
  424. * accessible as long as the RCU read lock is held.
  425. */
  426. flow_mask_remove(table, flow->mask);
  427. }
  428. static struct sw_flow_mask *mask_alloc(void)
  429. {
  430. struct sw_flow_mask *mask;
  431. mask = kmalloc(sizeof(*mask), GFP_KERNEL);
  432. if (mask)
  433. mask->ref_count = 1;
  434. return mask;
  435. }
  436. static bool mask_equal(const struct sw_flow_mask *a,
  437. const struct sw_flow_mask *b)
  438. {
  439. const u8 *a_ = (const u8 *)&a->key + a->range.start;
  440. const u8 *b_ = (const u8 *)&b->key + b->range.start;
  441. return (a->range.end == b->range.end)
  442. && (a->range.start == b->range.start)
  443. && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
  444. }
  445. static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
  446. const struct sw_flow_mask *mask)
  447. {
  448. struct list_head *ml;
  449. list_for_each(ml, &tbl->mask_list) {
  450. struct sw_flow_mask *m;
  451. m = container_of(ml, struct sw_flow_mask, list);
  452. if (mask_equal(mask, m))
  453. return m;
  454. }
  455. return NULL;
  456. }
  457. /* Add 'mask' into the mask list, if it is not already there. */
  458. static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
  459. struct sw_flow_mask *new)
  460. {
  461. struct sw_flow_mask *mask;
  462. mask = flow_mask_find(tbl, new);
  463. if (!mask) {
  464. /* Allocate a new mask if none exsits. */
  465. mask = mask_alloc();
  466. if (!mask)
  467. return -ENOMEM;
  468. mask->key = new->key;
  469. mask->range = new->range;
  470. list_add_rcu(&mask->list, &tbl->mask_list);
  471. } else {
  472. BUG_ON(!mask->ref_count);
  473. mask->ref_count++;
  474. }
  475. flow->mask = mask;
  476. return 0;
  477. }
  478. /* Must be called with OVS mutex held. */
  479. int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
  480. struct sw_flow_mask *mask)
  481. {
  482. struct table_instance *new_ti = NULL;
  483. struct table_instance *ti;
  484. int err;
  485. err = flow_mask_insert(table, flow, mask);
  486. if (err)
  487. return err;
  488. flow->hash = flow_hash(&flow->key, flow->mask->range.start,
  489. flow->mask->range.end);
  490. ti = ovsl_dereference(table->ti);
  491. table_instance_insert(ti, flow);
  492. table->count++;
  493. /* Expand table, if necessary, to make room. */
  494. if (table->count > ti->n_buckets)
  495. new_ti = table_instance_expand(ti);
  496. else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
  497. new_ti = table_instance_rehash(ti, ti->n_buckets);
  498. if (new_ti) {
  499. rcu_assign_pointer(table->ti, new_ti);
  500. table_instance_destroy(ti, true);
  501. table->last_rehash = jiffies;
  502. }
  503. return 0;
  504. }
  505. /* Initializes the flow module.
  506. * Returns zero if successful or a negative error code. */
  507. int ovs_flow_init(void)
  508. {
  509. BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
  510. BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
  511. flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
  512. + (num_possible_nodes()
  513. * sizeof(struct flow_stats *)),
  514. 0, 0, NULL);
  515. if (flow_cache == NULL)
  516. return -ENOMEM;
  517. flow_stats_cache
  518. = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
  519. 0, SLAB_HWCACHE_ALIGN, NULL);
  520. if (flow_stats_cache == NULL) {
  521. kmem_cache_destroy(flow_cache);
  522. flow_cache = NULL;
  523. return -ENOMEM;
  524. }
  525. return 0;
  526. }
  527. /* Uninitializes the flow module. */
  528. void ovs_flow_exit(void)
  529. {
  530. kmem_cache_destroy(flow_stats_cache);
  531. kmem_cache_destroy(flow_cache);
  532. }