flow_table.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612
  1. /*
  2. * Copyright (c) 2007-2013 Nicira, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of version 2 of the GNU General Public
  6. * License as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; if not, write to the Free Software
  15. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  16. * 02110-1301, USA
  17. */
  18. #include "flow.h"
  19. #include "datapath.h"
  20. #include <linux/uaccess.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/etherdevice.h>
  23. #include <linux/if_ether.h>
  24. #include <linux/if_vlan.h>
  25. #include <net/llc_pdu.h>
  26. #include <linux/kernel.h>
  27. #include <linux/hash.h>
  28. #include <linux/jiffies.h>
  29. #include <linux/llc.h>
  30. #include <linux/module.h>
  31. #include <linux/in.h>
  32. #include <linux/rcupdate.h>
  33. #include <linux/if_arp.h>
  34. #include <linux/ip.h>
  35. #include <linux/ipv6.h>
  36. #include <linux/sctp.h>
  37. #include <linux/tcp.h>
  38. #include <linux/udp.h>
  39. #include <linux/icmp.h>
  40. #include <linux/icmpv6.h>
  41. #include <linux/rculist.h>
  42. #include <net/ip.h>
  43. #include <net/ipv6.h>
  44. #include <net/ndisc.h>
  45. #define TBL_MIN_BUCKETS 1024
  46. #define REHASH_INTERVAL (10 * 60 * HZ)
  47. static struct kmem_cache *flow_cache;
  48. static u16 range_n_bytes(const struct sw_flow_key_range *range)
  49. {
  50. return range->end - range->start;
  51. }
  52. void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
  53. const struct sw_flow_mask *mask)
  54. {
  55. const long *m = (long *)((u8 *)&mask->key + mask->range.start);
  56. const long *s = (long *)((u8 *)src + mask->range.start);
  57. long *d = (long *)((u8 *)dst + mask->range.start);
  58. int i;
  59. /* The memory outside of the 'mask->range' are not set since
  60. * further operations on 'dst' only uses contents within
  61. * 'mask->range'.
  62. */
  63. for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
  64. *d++ = *s++ & *m++;
  65. }
  66. struct sw_flow *ovs_flow_alloc(bool percpu_stats)
  67. {
  68. struct sw_flow *flow;
  69. int cpu;
  70. flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
  71. if (!flow)
  72. return ERR_PTR(-ENOMEM);
  73. flow->sf_acts = NULL;
  74. flow->mask = NULL;
  75. flow->stats.is_percpu = percpu_stats;
  76. if (!percpu_stats) {
  77. flow->stats.stat = kzalloc(sizeof(*flow->stats.stat), GFP_KERNEL);
  78. if (!flow->stats.stat)
  79. goto err;
  80. spin_lock_init(&flow->stats.stat->lock);
  81. } else {
  82. flow->stats.cpu_stats = alloc_percpu(struct flow_stats);
  83. if (!flow->stats.cpu_stats)
  84. goto err;
  85. for_each_possible_cpu(cpu) {
  86. struct flow_stats *cpu_stats;
  87. cpu_stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
  88. spin_lock_init(&cpu_stats->lock);
  89. }
  90. }
  91. return flow;
  92. err:
  93. kmem_cache_free(flow_cache, flow);
  94. return ERR_PTR(-ENOMEM);
  95. }
  96. int ovs_flow_tbl_count(struct flow_table *table)
  97. {
  98. return table->count;
  99. }
  100. static struct flex_array *alloc_buckets(unsigned int n_buckets)
  101. {
  102. struct flex_array *buckets;
  103. int i, err;
  104. buckets = flex_array_alloc(sizeof(struct hlist_head),
  105. n_buckets, GFP_KERNEL);
  106. if (!buckets)
  107. return NULL;
  108. err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
  109. if (err) {
  110. flex_array_free(buckets);
  111. return NULL;
  112. }
  113. for (i = 0; i < n_buckets; i++)
  114. INIT_HLIST_HEAD((struct hlist_head *)
  115. flex_array_get(buckets, i));
  116. return buckets;
  117. }
  118. static void flow_free(struct sw_flow *flow)
  119. {
  120. kfree((struct sf_flow_acts __force *)flow->sf_acts);
  121. if (flow->stats.is_percpu)
  122. free_percpu(flow->stats.cpu_stats);
  123. else
  124. kfree(flow->stats.stat);
  125. kmem_cache_free(flow_cache, flow);
  126. }
  127. static void rcu_free_flow_callback(struct rcu_head *rcu)
  128. {
  129. struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
  130. flow_free(flow);
  131. }
  132. void ovs_flow_free(struct sw_flow *flow, bool deferred)
  133. {
  134. if (!flow)
  135. return;
  136. if (flow->mask) {
  137. struct sw_flow_mask *mask = flow->mask;
  138. /* ovs-lock is required to protect mask-refcount and
  139. * mask list.
  140. */
  141. ASSERT_OVSL();
  142. BUG_ON(!mask->ref_count);
  143. mask->ref_count--;
  144. if (!mask->ref_count) {
  145. list_del_rcu(&mask->list);
  146. if (deferred)
  147. kfree_rcu(mask, rcu);
  148. else
  149. kfree(mask);
  150. }
  151. }
  152. if (deferred)
  153. call_rcu(&flow->rcu, rcu_free_flow_callback);
  154. else
  155. flow_free(flow);
  156. }
  157. static void free_buckets(struct flex_array *buckets)
  158. {
  159. flex_array_free(buckets);
  160. }
  161. static void __table_instance_destroy(struct table_instance *ti)
  162. {
  163. free_buckets(ti->buckets);
  164. kfree(ti);
  165. }
  166. static struct table_instance *table_instance_alloc(int new_size)
  167. {
  168. struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
  169. if (!ti)
  170. return NULL;
  171. ti->buckets = alloc_buckets(new_size);
  172. if (!ti->buckets) {
  173. kfree(ti);
  174. return NULL;
  175. }
  176. ti->n_buckets = new_size;
  177. ti->node_ver = 0;
  178. ti->keep_flows = false;
  179. get_random_bytes(&ti->hash_seed, sizeof(u32));
  180. return ti;
  181. }
  182. int ovs_flow_tbl_init(struct flow_table *table)
  183. {
  184. struct table_instance *ti;
  185. ti = table_instance_alloc(TBL_MIN_BUCKETS);
  186. if (!ti)
  187. return -ENOMEM;
  188. rcu_assign_pointer(table->ti, ti);
  189. INIT_LIST_HEAD(&table->mask_list);
  190. table->last_rehash = jiffies;
  191. table->count = 0;
  192. return 0;
  193. }
  194. static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
  195. {
  196. struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
  197. __table_instance_destroy(ti);
  198. }
  199. static void table_instance_destroy(struct table_instance *ti, bool deferred)
  200. {
  201. int i;
  202. if (!ti)
  203. return;
  204. if (ti->keep_flows)
  205. goto skip_flows;
  206. for (i = 0; i < ti->n_buckets; i++) {
  207. struct sw_flow *flow;
  208. struct hlist_head *head = flex_array_get(ti->buckets, i);
  209. struct hlist_node *n;
  210. int ver = ti->node_ver;
  211. hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
  212. hlist_del_rcu(&flow->hash_node[ver]);
  213. ovs_flow_free(flow, deferred);
  214. }
  215. }
  216. skip_flows:
  217. if (deferred)
  218. call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
  219. else
  220. __table_instance_destroy(ti);
  221. }
  222. void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
  223. {
  224. struct table_instance *ti = ovsl_dereference(table->ti);
  225. table_instance_destroy(ti, deferred);
  226. }
  227. struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
  228. u32 *bucket, u32 *last)
  229. {
  230. struct sw_flow *flow;
  231. struct hlist_head *head;
  232. int ver;
  233. int i;
  234. ver = ti->node_ver;
  235. while (*bucket < ti->n_buckets) {
  236. i = 0;
  237. head = flex_array_get(ti->buckets, *bucket);
  238. hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
  239. if (i < *last) {
  240. i++;
  241. continue;
  242. }
  243. *last = i + 1;
  244. return flow;
  245. }
  246. (*bucket)++;
  247. *last = 0;
  248. }
  249. return NULL;
  250. }
  251. static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
  252. {
  253. hash = jhash_1word(hash, ti->hash_seed);
  254. return flex_array_get(ti->buckets,
  255. (hash & (ti->n_buckets - 1)));
  256. }
  257. static void table_instance_insert(struct table_instance *ti, struct sw_flow *flow)
  258. {
  259. struct hlist_head *head;
  260. head = find_bucket(ti, flow->hash);
  261. hlist_add_head_rcu(&flow->hash_node[ti->node_ver], head);
  262. }
  263. static void flow_table_copy_flows(struct table_instance *old,
  264. struct table_instance *new)
  265. {
  266. int old_ver;
  267. int i;
  268. old_ver = old->node_ver;
  269. new->node_ver = !old_ver;
  270. /* Insert in new table. */
  271. for (i = 0; i < old->n_buckets; i++) {
  272. struct sw_flow *flow;
  273. struct hlist_head *head;
  274. head = flex_array_get(old->buckets, i);
  275. hlist_for_each_entry(flow, head, hash_node[old_ver])
  276. table_instance_insert(new, flow);
  277. }
  278. old->keep_flows = true;
  279. }
  280. static struct table_instance *table_instance_rehash(struct table_instance *ti,
  281. int n_buckets)
  282. {
  283. struct table_instance *new_ti;
  284. new_ti = table_instance_alloc(n_buckets);
  285. if (!new_ti)
  286. return NULL;
  287. flow_table_copy_flows(ti, new_ti);
  288. return new_ti;
  289. }
  290. int ovs_flow_tbl_flush(struct flow_table *flow_table)
  291. {
  292. struct table_instance *old_ti;
  293. struct table_instance *new_ti;
  294. old_ti = ovsl_dereference(flow_table->ti);
  295. new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
  296. if (!new_ti)
  297. return -ENOMEM;
  298. rcu_assign_pointer(flow_table->ti, new_ti);
  299. flow_table->last_rehash = jiffies;
  300. flow_table->count = 0;
  301. table_instance_destroy(old_ti, true);
  302. return 0;
  303. }
  304. static u32 flow_hash(const struct sw_flow_key *key, int key_start,
  305. int key_end)
  306. {
  307. u32 *hash_key = (u32 *)((u8 *)key + key_start);
  308. int hash_u32s = (key_end - key_start) >> 2;
  309. /* Make sure number of hash bytes are multiple of u32. */
  310. BUILD_BUG_ON(sizeof(long) % sizeof(u32));
  311. return arch_fast_hash2(hash_key, hash_u32s, 0);
  312. }
  313. static int flow_key_start(const struct sw_flow_key *key)
  314. {
  315. if (key->tun_key.ipv4_dst)
  316. return 0;
  317. else
  318. return rounddown(offsetof(struct sw_flow_key, phy),
  319. sizeof(long));
  320. }
  321. static bool cmp_key(const struct sw_flow_key *key1,
  322. const struct sw_flow_key *key2,
  323. int key_start, int key_end)
  324. {
  325. const long *cp1 = (long *)((u8 *)key1 + key_start);
  326. const long *cp2 = (long *)((u8 *)key2 + key_start);
  327. long diffs = 0;
  328. int i;
  329. for (i = key_start; i < key_end; i += sizeof(long))
  330. diffs |= *cp1++ ^ *cp2++;
  331. return diffs == 0;
  332. }
  333. static bool flow_cmp_masked_key(const struct sw_flow *flow,
  334. const struct sw_flow_key *key,
  335. int key_start, int key_end)
  336. {
  337. return cmp_key(&flow->key, key, key_start, key_end);
  338. }
  339. bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
  340. struct sw_flow_match *match)
  341. {
  342. struct sw_flow_key *key = match->key;
  343. int key_start = flow_key_start(key);
  344. int key_end = match->range.end;
  345. return cmp_key(&flow->unmasked_key, key, key_start, key_end);
  346. }
  347. static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
  348. const struct sw_flow_key *unmasked,
  349. struct sw_flow_mask *mask)
  350. {
  351. struct sw_flow *flow;
  352. struct hlist_head *head;
  353. int key_start = mask->range.start;
  354. int key_end = mask->range.end;
  355. u32 hash;
  356. struct sw_flow_key masked_key;
  357. ovs_flow_mask_key(&masked_key, unmasked, mask);
  358. hash = flow_hash(&masked_key, key_start, key_end);
  359. head = find_bucket(ti, hash);
  360. hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) {
  361. if (flow->mask == mask && flow->hash == hash &&
  362. flow_cmp_masked_key(flow, &masked_key,
  363. key_start, key_end))
  364. return flow;
  365. }
  366. return NULL;
  367. }
  368. struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
  369. const struct sw_flow_key *key,
  370. u32 *n_mask_hit)
  371. {
  372. struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
  373. struct sw_flow_mask *mask;
  374. struct sw_flow *flow;
  375. *n_mask_hit = 0;
  376. list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
  377. (*n_mask_hit)++;
  378. flow = masked_flow_lookup(ti, key, mask);
  379. if (flow) /* Found */
  380. return flow;
  381. }
  382. return NULL;
  383. }
  384. struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
  385. const struct sw_flow_key *key)
  386. {
  387. u32 __always_unused n_mask_hit;
  388. return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
  389. }
  390. int ovs_flow_tbl_num_masks(const struct flow_table *table)
  391. {
  392. struct sw_flow_mask *mask;
  393. int num = 0;
  394. list_for_each_entry(mask, &table->mask_list, list)
  395. num++;
  396. return num;
  397. }
  398. static struct table_instance *table_instance_expand(struct table_instance *ti)
  399. {
  400. return table_instance_rehash(ti, ti->n_buckets * 2);
  401. }
  402. void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
  403. {
  404. struct table_instance *ti = ovsl_dereference(table->ti);
  405. BUG_ON(table->count == 0);
  406. hlist_del_rcu(&flow->hash_node[ti->node_ver]);
  407. table->count--;
  408. }
  409. static struct sw_flow_mask *mask_alloc(void)
  410. {
  411. struct sw_flow_mask *mask;
  412. mask = kmalloc(sizeof(*mask), GFP_KERNEL);
  413. if (mask)
  414. mask->ref_count = 1;
  415. return mask;
  416. }
  417. static bool mask_equal(const struct sw_flow_mask *a,
  418. const struct sw_flow_mask *b)
  419. {
  420. u8 *a_ = (u8 *)&a->key + a->range.start;
  421. u8 *b_ = (u8 *)&b->key + b->range.start;
  422. return (a->range.end == b->range.end)
  423. && (a->range.start == b->range.start)
  424. && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
  425. }
  426. static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
  427. const struct sw_flow_mask *mask)
  428. {
  429. struct list_head *ml;
  430. list_for_each(ml, &tbl->mask_list) {
  431. struct sw_flow_mask *m;
  432. m = container_of(ml, struct sw_flow_mask, list);
  433. if (mask_equal(mask, m))
  434. return m;
  435. }
  436. return NULL;
  437. }
  438. /* Add 'mask' into the mask list, if it is not already there. */
  439. static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
  440. struct sw_flow_mask *new)
  441. {
  442. struct sw_flow_mask *mask;
  443. mask = flow_mask_find(tbl, new);
  444. if (!mask) {
  445. /* Allocate a new mask if none exsits. */
  446. mask = mask_alloc();
  447. if (!mask)
  448. return -ENOMEM;
  449. mask->key = new->key;
  450. mask->range = new->range;
  451. list_add_rcu(&mask->list, &tbl->mask_list);
  452. } else {
  453. BUG_ON(!mask->ref_count);
  454. mask->ref_count++;
  455. }
  456. flow->mask = mask;
  457. return 0;
  458. }
  459. int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
  460. struct sw_flow_mask *mask)
  461. {
  462. struct table_instance *new_ti = NULL;
  463. struct table_instance *ti;
  464. int err;
  465. err = flow_mask_insert(table, flow, mask);
  466. if (err)
  467. return err;
  468. flow->hash = flow_hash(&flow->key, flow->mask->range.start,
  469. flow->mask->range.end);
  470. ti = ovsl_dereference(table->ti);
  471. table_instance_insert(ti, flow);
  472. table->count++;
  473. /* Expand table, if necessary, to make room. */
  474. if (table->count > ti->n_buckets)
  475. new_ti = table_instance_expand(ti);
  476. else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
  477. new_ti = table_instance_rehash(ti, ti->n_buckets);
  478. if (new_ti) {
  479. rcu_assign_pointer(table->ti, new_ti);
  480. table_instance_destroy(ti, true);
  481. table->last_rehash = jiffies;
  482. }
  483. return 0;
  484. }
  485. /* Initializes the flow module.
  486. * Returns zero if successful or a negative error code. */
  487. int ovs_flow_init(void)
  488. {
  489. BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
  490. BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
  491. flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
  492. 0, NULL);
  493. if (flow_cache == NULL)
  494. return -ENOMEM;
  495. return 0;
  496. }
  497. /* Uninitializes the flow module. */
  498. void ovs_flow_exit(void)
  499. {
  500. kmem_cache_destroy(flow_cache);
  501. }