flow_table.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781
  1. /*
  2. * Copyright (c) 2007-2014 Nicira, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of version 2 of the GNU General Public
  6. * License as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; if not, write to the Free Software
  15. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  16. * 02110-1301, USA
  17. */
  18. #include "flow.h"
  19. #include "datapath.h"
  20. #include "flow_netlink.h"
  21. #include <linux/uaccess.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/etherdevice.h>
  24. #include <linux/if_ether.h>
  25. #include <linux/if_vlan.h>
  26. #include <net/llc_pdu.h>
  27. #include <linux/kernel.h>
  28. #include <linux/jhash.h>
  29. #include <linux/jiffies.h>
  30. #include <linux/llc.h>
  31. #include <linux/module.h>
  32. #include <linux/in.h>
  33. #include <linux/rcupdate.h>
  34. #include <linux/if_arp.h>
  35. #include <linux/ip.h>
  36. #include <linux/ipv6.h>
  37. #include <linux/sctp.h>
  38. #include <linux/tcp.h>
  39. #include <linux/udp.h>
  40. #include <linux/icmp.h>
  41. #include <linux/icmpv6.h>
  42. #include <linux/rculist.h>
  43. #include <net/ip.h>
  44. #include <net/ipv6.h>
  45. #include <net/ndisc.h>
  46. #define TBL_MIN_BUCKETS 1024
  47. #define REHASH_INTERVAL (10 * 60 * HZ)
  48. static struct kmem_cache *flow_cache;
  49. struct kmem_cache *flow_stats_cache __read_mostly;
  50. static u16 range_n_bytes(const struct sw_flow_key_range *range)
  51. {
  52. return range->end - range->start;
  53. }
  54. void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
  55. bool full, const struct sw_flow_mask *mask)
  56. {
  57. int start = full ? 0 : mask->range.start;
  58. int len = full ? sizeof *dst : range_n_bytes(&mask->range);
  59. const long *m = (const long *)((const u8 *)&mask->key + start);
  60. const long *s = (const long *)((const u8 *)src + start);
  61. long *d = (long *)((u8 *)dst + start);
  62. int i;
  63. /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
  64. * if 'full' is false the memory outside of the 'mask->range' is left
  65. * uninitialized. This can be used as an optimization when further
  66. * operations on 'dst' only use contents within 'mask->range'.
  67. */
  68. for (i = 0; i < len; i += sizeof(long))
  69. *d++ = *s++ & *m++;
  70. }
  71. struct sw_flow *ovs_flow_alloc(void)
  72. {
  73. struct sw_flow *flow;
  74. struct flow_stats *stats;
  75. int node;
  76. flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
  77. if (!flow)
  78. return ERR_PTR(-ENOMEM);
  79. flow->sf_acts = NULL;
  80. flow->mask = NULL;
  81. flow->id.unmasked_key = NULL;
  82. flow->id.ufid_len = 0;
  83. flow->stats_last_writer = NUMA_NO_NODE;
  84. /* Initialize the default stat node. */
  85. stats = kmem_cache_alloc_node(flow_stats_cache,
  86. GFP_KERNEL | __GFP_ZERO, 0);
  87. if (!stats)
  88. goto err;
  89. spin_lock_init(&stats->lock);
  90. RCU_INIT_POINTER(flow->stats[0], stats);
  91. for_each_node(node)
  92. if (node != 0)
  93. RCU_INIT_POINTER(flow->stats[node], NULL);
  94. return flow;
  95. err:
  96. kmem_cache_free(flow_cache, flow);
  97. return ERR_PTR(-ENOMEM);
  98. }
  99. int ovs_flow_tbl_count(const struct flow_table *table)
  100. {
  101. return table->count;
  102. }
  103. static struct flex_array *alloc_buckets(unsigned int n_buckets)
  104. {
  105. struct flex_array *buckets;
  106. int i, err;
  107. buckets = flex_array_alloc(sizeof(struct hlist_head),
  108. n_buckets, GFP_KERNEL);
  109. if (!buckets)
  110. return NULL;
  111. err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
  112. if (err) {
  113. flex_array_free(buckets);
  114. return NULL;
  115. }
  116. for (i = 0; i < n_buckets; i++)
  117. INIT_HLIST_HEAD((struct hlist_head *)
  118. flex_array_get(buckets, i));
  119. return buckets;
  120. }
  121. static void flow_free(struct sw_flow *flow)
  122. {
  123. int node;
  124. if (ovs_identifier_is_key(&flow->id))
  125. kfree(flow->id.unmasked_key);
  126. if (flow->sf_acts)
  127. ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
  128. for_each_node(node)
  129. if (flow->stats[node])
  130. kmem_cache_free(flow_stats_cache,
  131. (struct flow_stats __force *)flow->stats[node]);
  132. kmem_cache_free(flow_cache, flow);
  133. }
  134. static void rcu_free_flow_callback(struct rcu_head *rcu)
  135. {
  136. struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
  137. flow_free(flow);
  138. }
  139. void ovs_flow_free(struct sw_flow *flow, bool deferred)
  140. {
  141. if (!flow)
  142. return;
  143. if (deferred)
  144. call_rcu(&flow->rcu, rcu_free_flow_callback);
  145. else
  146. flow_free(flow);
  147. }
  148. static void free_buckets(struct flex_array *buckets)
  149. {
  150. flex_array_free(buckets);
  151. }
  152. static void __table_instance_destroy(struct table_instance *ti)
  153. {
  154. free_buckets(ti->buckets);
  155. kfree(ti);
  156. }
  157. static struct table_instance *table_instance_alloc(int new_size)
  158. {
  159. struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
  160. if (!ti)
  161. return NULL;
  162. ti->buckets = alloc_buckets(new_size);
  163. if (!ti->buckets) {
  164. kfree(ti);
  165. return NULL;
  166. }
  167. ti->n_buckets = new_size;
  168. ti->node_ver = 0;
  169. ti->keep_flows = false;
  170. get_random_bytes(&ti->hash_seed, sizeof(u32));
  171. return ti;
  172. }
  173. int ovs_flow_tbl_init(struct flow_table *table)
  174. {
  175. struct table_instance *ti, *ufid_ti;
  176. ti = table_instance_alloc(TBL_MIN_BUCKETS);
  177. if (!ti)
  178. return -ENOMEM;
  179. ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
  180. if (!ufid_ti)
  181. goto free_ti;
  182. rcu_assign_pointer(table->ti, ti);
  183. rcu_assign_pointer(table->ufid_ti, ufid_ti);
  184. INIT_LIST_HEAD(&table->mask_list);
  185. table->last_rehash = jiffies;
  186. table->count = 0;
  187. table->ufid_count = 0;
  188. return 0;
  189. free_ti:
  190. __table_instance_destroy(ti);
  191. return -ENOMEM;
  192. }
  193. static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
  194. {
  195. struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
  196. __table_instance_destroy(ti);
  197. }
  198. static void table_instance_destroy(struct table_instance *ti,
  199. struct table_instance *ufid_ti,
  200. bool deferred)
  201. {
  202. int i;
  203. if (!ti)
  204. return;
  205. BUG_ON(!ufid_ti);
  206. if (ti->keep_flows)
  207. goto skip_flows;
  208. for (i = 0; i < ti->n_buckets; i++) {
  209. struct sw_flow *flow;
  210. struct hlist_head *head = flex_array_get(ti->buckets, i);
  211. struct hlist_node *n;
  212. int ver = ti->node_ver;
  213. int ufid_ver = ufid_ti->node_ver;
  214. hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
  215. hlist_del_rcu(&flow->flow_table.node[ver]);
  216. if (ovs_identifier_is_ufid(&flow->id))
  217. hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
  218. ovs_flow_free(flow, deferred);
  219. }
  220. }
  221. skip_flows:
  222. if (deferred) {
  223. call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
  224. call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
  225. } else {
  226. __table_instance_destroy(ti);
  227. __table_instance_destroy(ufid_ti);
  228. }
  229. }
  230. /* No need for locking this function is called from RCU callback or
  231. * error path.
  232. */
  233. void ovs_flow_tbl_destroy(struct flow_table *table)
  234. {
  235. struct table_instance *ti = rcu_dereference_raw(table->ti);
  236. struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
  237. table_instance_destroy(ti, ufid_ti, false);
  238. }
  239. struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
  240. u32 *bucket, u32 *last)
  241. {
  242. struct sw_flow *flow;
  243. struct hlist_head *head;
  244. int ver;
  245. int i;
  246. ver = ti->node_ver;
  247. while (*bucket < ti->n_buckets) {
  248. i = 0;
  249. head = flex_array_get(ti->buckets, *bucket);
  250. hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
  251. if (i < *last) {
  252. i++;
  253. continue;
  254. }
  255. *last = i + 1;
  256. return flow;
  257. }
  258. (*bucket)++;
  259. *last = 0;
  260. }
  261. return NULL;
  262. }
  263. static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
  264. {
  265. hash = jhash_1word(hash, ti->hash_seed);
  266. return flex_array_get(ti->buckets,
  267. (hash & (ti->n_buckets - 1)));
  268. }
  269. static void table_instance_insert(struct table_instance *ti,
  270. struct sw_flow *flow)
  271. {
  272. struct hlist_head *head;
  273. head = find_bucket(ti, flow->flow_table.hash);
  274. hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
  275. }
  276. static void ufid_table_instance_insert(struct table_instance *ti,
  277. struct sw_flow *flow)
  278. {
  279. struct hlist_head *head;
  280. head = find_bucket(ti, flow->ufid_table.hash);
  281. hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
  282. }
  283. static void flow_table_copy_flows(struct table_instance *old,
  284. struct table_instance *new, bool ufid)
  285. {
  286. int old_ver;
  287. int i;
  288. old_ver = old->node_ver;
  289. new->node_ver = !old_ver;
  290. /* Insert in new table. */
  291. for (i = 0; i < old->n_buckets; i++) {
  292. struct sw_flow *flow;
  293. struct hlist_head *head;
  294. head = flex_array_get(old->buckets, i);
  295. if (ufid)
  296. hlist_for_each_entry(flow, head,
  297. ufid_table.node[old_ver])
  298. ufid_table_instance_insert(new, flow);
  299. else
  300. hlist_for_each_entry(flow, head,
  301. flow_table.node[old_ver])
  302. table_instance_insert(new, flow);
  303. }
  304. old->keep_flows = true;
  305. }
  306. static struct table_instance *table_instance_rehash(struct table_instance *ti,
  307. int n_buckets, bool ufid)
  308. {
  309. struct table_instance *new_ti;
  310. new_ti = table_instance_alloc(n_buckets);
  311. if (!new_ti)
  312. return NULL;
  313. flow_table_copy_flows(ti, new_ti, ufid);
  314. return new_ti;
  315. }
  316. int ovs_flow_tbl_flush(struct flow_table *flow_table)
  317. {
  318. struct table_instance *old_ti, *new_ti;
  319. struct table_instance *old_ufid_ti, *new_ufid_ti;
  320. new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
  321. if (!new_ti)
  322. return -ENOMEM;
  323. new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
  324. if (!new_ufid_ti)
  325. goto err_free_ti;
  326. old_ti = ovsl_dereference(flow_table->ti);
  327. old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
  328. rcu_assign_pointer(flow_table->ti, new_ti);
  329. rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
  330. flow_table->last_rehash = jiffies;
  331. flow_table->count = 0;
  332. flow_table->ufid_count = 0;
  333. table_instance_destroy(old_ti, old_ufid_ti, true);
  334. return 0;
  335. err_free_ti:
  336. __table_instance_destroy(new_ti);
  337. return -ENOMEM;
  338. }
  339. static u32 flow_hash(const struct sw_flow_key *key,
  340. const struct sw_flow_key_range *range)
  341. {
  342. int key_start = range->start;
  343. int key_end = range->end;
  344. const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
  345. int hash_u32s = (key_end - key_start) >> 2;
  346. /* Make sure number of hash bytes are multiple of u32. */
  347. BUILD_BUG_ON(sizeof(long) % sizeof(u32));
  348. return jhash2(hash_key, hash_u32s, 0);
  349. }
  350. static int flow_key_start(const struct sw_flow_key *key)
  351. {
  352. if (key->tun_key.u.ipv4.dst)
  353. return 0;
  354. else
  355. return rounddown(offsetof(struct sw_flow_key, phy),
  356. sizeof(long));
  357. }
  358. static bool cmp_key(const struct sw_flow_key *key1,
  359. const struct sw_flow_key *key2,
  360. int key_start, int key_end)
  361. {
  362. const long *cp1 = (const long *)((const u8 *)key1 + key_start);
  363. const long *cp2 = (const long *)((const u8 *)key2 + key_start);
  364. long diffs = 0;
  365. int i;
  366. for (i = key_start; i < key_end; i += sizeof(long))
  367. diffs |= *cp1++ ^ *cp2++;
  368. return diffs == 0;
  369. }
  370. static bool flow_cmp_masked_key(const struct sw_flow *flow,
  371. const struct sw_flow_key *key,
  372. const struct sw_flow_key_range *range)
  373. {
  374. return cmp_key(&flow->key, key, range->start, range->end);
  375. }
  376. static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
  377. const struct sw_flow_match *match)
  378. {
  379. struct sw_flow_key *key = match->key;
  380. int key_start = flow_key_start(key);
  381. int key_end = match->range.end;
  382. BUG_ON(ovs_identifier_is_ufid(&flow->id));
  383. return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
  384. }
  385. static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
  386. const struct sw_flow_key *unmasked,
  387. const struct sw_flow_mask *mask)
  388. {
  389. struct sw_flow *flow;
  390. struct hlist_head *head;
  391. u32 hash;
  392. struct sw_flow_key masked_key;
  393. ovs_flow_mask_key(&masked_key, unmasked, false, mask);
  394. hash = flow_hash(&masked_key, &mask->range);
  395. head = find_bucket(ti, hash);
  396. hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
  397. if (flow->mask == mask && flow->flow_table.hash == hash &&
  398. flow_cmp_masked_key(flow, &masked_key, &mask->range))
  399. return flow;
  400. }
  401. return NULL;
  402. }
  403. struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
  404. const struct sw_flow_key *key,
  405. u32 *n_mask_hit)
  406. {
  407. struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
  408. struct sw_flow_mask *mask;
  409. struct sw_flow *flow;
  410. *n_mask_hit = 0;
  411. list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
  412. (*n_mask_hit)++;
  413. flow = masked_flow_lookup(ti, key, mask);
  414. if (flow) /* Found */
  415. return flow;
  416. }
  417. return NULL;
  418. }
  419. struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
  420. const struct sw_flow_key *key)
  421. {
  422. u32 __always_unused n_mask_hit;
  423. return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
  424. }
  425. struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
  426. const struct sw_flow_match *match)
  427. {
  428. struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
  429. struct sw_flow_mask *mask;
  430. struct sw_flow *flow;
  431. /* Always called under ovs-mutex. */
  432. list_for_each_entry(mask, &tbl->mask_list, list) {
  433. flow = masked_flow_lookup(ti, match->key, mask);
  434. if (flow && ovs_identifier_is_key(&flow->id) &&
  435. ovs_flow_cmp_unmasked_key(flow, match))
  436. return flow;
  437. }
  438. return NULL;
  439. }
  440. static u32 ufid_hash(const struct sw_flow_id *sfid)
  441. {
  442. return jhash(sfid->ufid, sfid->ufid_len, 0);
  443. }
  444. static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
  445. const struct sw_flow_id *sfid)
  446. {
  447. if (flow->id.ufid_len != sfid->ufid_len)
  448. return false;
  449. return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
  450. }
  451. bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
  452. {
  453. if (ovs_identifier_is_ufid(&flow->id))
  454. return flow_cmp_masked_key(flow, match->key, &match->range);
  455. return ovs_flow_cmp_unmasked_key(flow, match);
  456. }
  457. struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
  458. const struct sw_flow_id *ufid)
  459. {
  460. struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
  461. struct sw_flow *flow;
  462. struct hlist_head *head;
  463. u32 hash;
  464. hash = ufid_hash(ufid);
  465. head = find_bucket(ti, hash);
  466. hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
  467. if (flow->ufid_table.hash == hash &&
  468. ovs_flow_cmp_ufid(flow, ufid))
  469. return flow;
  470. }
  471. return NULL;
  472. }
  473. int ovs_flow_tbl_num_masks(const struct flow_table *table)
  474. {
  475. struct sw_flow_mask *mask;
  476. int num = 0;
  477. list_for_each_entry(mask, &table->mask_list, list)
  478. num++;
  479. return num;
  480. }
  481. static struct table_instance *table_instance_expand(struct table_instance *ti,
  482. bool ufid)
  483. {
  484. return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
  485. }
  486. /* Remove 'mask' from the mask list, if it is not needed any more. */
  487. static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
  488. {
  489. if (mask) {
  490. /* ovs-lock is required to protect mask-refcount and
  491. * mask list.
  492. */
  493. ASSERT_OVSL();
  494. BUG_ON(!mask->ref_count);
  495. mask->ref_count--;
  496. if (!mask->ref_count) {
  497. list_del_rcu(&mask->list);
  498. kfree_rcu(mask, rcu);
  499. }
  500. }
  501. }
  502. /* Must be called with OVS mutex held. */
  503. void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
  504. {
  505. struct table_instance *ti = ovsl_dereference(table->ti);
  506. struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
  507. BUG_ON(table->count == 0);
  508. hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
  509. table->count--;
  510. if (ovs_identifier_is_ufid(&flow->id)) {
  511. hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
  512. table->ufid_count--;
  513. }
  514. /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
  515. * accessible as long as the RCU read lock is held.
  516. */
  517. flow_mask_remove(table, flow->mask);
  518. }
  519. static struct sw_flow_mask *mask_alloc(void)
  520. {
  521. struct sw_flow_mask *mask;
  522. mask = kmalloc(sizeof(*mask), GFP_KERNEL);
  523. if (mask)
  524. mask->ref_count = 1;
  525. return mask;
  526. }
  527. static bool mask_equal(const struct sw_flow_mask *a,
  528. const struct sw_flow_mask *b)
  529. {
  530. const u8 *a_ = (const u8 *)&a->key + a->range.start;
  531. const u8 *b_ = (const u8 *)&b->key + b->range.start;
  532. return (a->range.end == b->range.end)
  533. && (a->range.start == b->range.start)
  534. && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
  535. }
  536. static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
  537. const struct sw_flow_mask *mask)
  538. {
  539. struct list_head *ml;
  540. list_for_each(ml, &tbl->mask_list) {
  541. struct sw_flow_mask *m;
  542. m = container_of(ml, struct sw_flow_mask, list);
  543. if (mask_equal(mask, m))
  544. return m;
  545. }
  546. return NULL;
  547. }
  548. /* Add 'mask' into the mask list, if it is not already there. */
  549. static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
  550. const struct sw_flow_mask *new)
  551. {
  552. struct sw_flow_mask *mask;
  553. mask = flow_mask_find(tbl, new);
  554. if (!mask) {
  555. /* Allocate a new mask if none exsits. */
  556. mask = mask_alloc();
  557. if (!mask)
  558. return -ENOMEM;
  559. mask->key = new->key;
  560. mask->range = new->range;
  561. list_add_rcu(&mask->list, &tbl->mask_list);
  562. } else {
  563. BUG_ON(!mask->ref_count);
  564. mask->ref_count++;
  565. }
  566. flow->mask = mask;
  567. return 0;
  568. }
  569. /* Must be called with OVS mutex held. */
  570. static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
  571. {
  572. struct table_instance *new_ti = NULL;
  573. struct table_instance *ti;
  574. flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
  575. ti = ovsl_dereference(table->ti);
  576. table_instance_insert(ti, flow);
  577. table->count++;
  578. /* Expand table, if necessary, to make room. */
  579. if (table->count > ti->n_buckets)
  580. new_ti = table_instance_expand(ti, false);
  581. else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
  582. new_ti = table_instance_rehash(ti, ti->n_buckets, false);
  583. if (new_ti) {
  584. rcu_assign_pointer(table->ti, new_ti);
  585. call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
  586. table->last_rehash = jiffies;
  587. }
  588. }
  589. /* Must be called with OVS mutex held. */
  590. static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
  591. {
  592. struct table_instance *ti;
  593. flow->ufid_table.hash = ufid_hash(&flow->id);
  594. ti = ovsl_dereference(table->ufid_ti);
  595. ufid_table_instance_insert(ti, flow);
  596. table->ufid_count++;
  597. /* Expand table, if necessary, to make room. */
  598. if (table->ufid_count > ti->n_buckets) {
  599. struct table_instance *new_ti;
  600. new_ti = table_instance_expand(ti, true);
  601. if (new_ti) {
  602. rcu_assign_pointer(table->ufid_ti, new_ti);
  603. call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
  604. }
  605. }
  606. }
  607. /* Must be called with OVS mutex held. */
  608. int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
  609. const struct sw_flow_mask *mask)
  610. {
  611. int err;
  612. err = flow_mask_insert(table, flow, mask);
  613. if (err)
  614. return err;
  615. flow_key_insert(table, flow);
  616. if (ovs_identifier_is_ufid(&flow->id))
  617. flow_ufid_insert(table, flow);
  618. return 0;
  619. }
  620. /* Initializes the flow module.
  621. * Returns zero if successful or a negative error code. */
  622. int ovs_flow_init(void)
  623. {
  624. BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
  625. BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
  626. flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
  627. + (nr_node_ids
  628. * sizeof(struct flow_stats *)),
  629. 0, 0, NULL);
  630. if (flow_cache == NULL)
  631. return -ENOMEM;
  632. flow_stats_cache
  633. = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
  634. 0, SLAB_HWCACHE_ALIGN, NULL);
  635. if (flow_stats_cache == NULL) {
  636. kmem_cache_destroy(flow_cache);
  637. flow_cache = NULL;
  638. return -ENOMEM;
  639. }
  640. return 0;
  641. }
  642. /* Uninitializes the flow module. */
  643. void ovs_flow_exit(void)
  644. {
  645. kmem_cache_destroy(flow_stats_cache);
  646. kmem_cache_destroy(flow_cache);
  647. }