flow_table.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. /*
  2. * Copyright (c) 2007-2013 Nicira, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of version 2 of the GNU General Public
  6. * License as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; if not, write to the Free Software
  15. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  16. * 02110-1301, USA
  17. */
  18. #include "flow.h"
  19. #include "datapath.h"
  20. #include <linux/uaccess.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/etherdevice.h>
  23. #include <linux/if_ether.h>
  24. #include <linux/if_vlan.h>
  25. #include <net/llc_pdu.h>
  26. #include <linux/kernel.h>
  27. #include <linux/hash.h>
  28. #include <linux/jiffies.h>
  29. #include <linux/llc.h>
  30. #include <linux/module.h>
  31. #include <linux/in.h>
  32. #include <linux/rcupdate.h>
  33. #include <linux/if_arp.h>
  34. #include <linux/ip.h>
  35. #include <linux/ipv6.h>
  36. #include <linux/sctp.h>
  37. #include <linux/tcp.h>
  38. #include <linux/udp.h>
  39. #include <linux/icmp.h>
  40. #include <linux/icmpv6.h>
  41. #include <linux/rculist.h>
  42. #include <net/ip.h>
  43. #include <net/ipv6.h>
  44. #include <net/ndisc.h>
  45. #define TBL_MIN_BUCKETS 1024
  46. #define REHASH_INTERVAL (10 * 60 * HZ)
  47. static struct kmem_cache *flow_cache;
  48. static u16 range_n_bytes(const struct sw_flow_key_range *range)
  49. {
  50. return range->end - range->start;
  51. }
  52. void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
  53. const struct sw_flow_mask *mask)
  54. {
  55. const long *m = (const long *)((const u8 *)&mask->key +
  56. mask->range.start);
  57. const long *s = (const long *)((const u8 *)src +
  58. mask->range.start);
  59. long *d = (long *)((u8 *)dst + mask->range.start);
  60. int i;
  61. /* The memory outside of the 'mask->range' are not set since
  62. * further operations on 'dst' only uses contents within
  63. * 'mask->range'.
  64. */
  65. for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
  66. *d++ = *s++ & *m++;
  67. }
  68. struct sw_flow *ovs_flow_alloc(bool percpu_stats)
  69. {
  70. struct sw_flow *flow;
  71. int cpu;
  72. flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
  73. if (!flow)
  74. return ERR_PTR(-ENOMEM);
  75. flow->sf_acts = NULL;
  76. flow->mask = NULL;
  77. flow->stats.is_percpu = percpu_stats;
  78. if (!percpu_stats) {
  79. flow->stats.stat = kzalloc(sizeof(*flow->stats.stat), GFP_KERNEL);
  80. if (!flow->stats.stat)
  81. goto err;
  82. spin_lock_init(&flow->stats.stat->lock);
  83. } else {
  84. flow->stats.cpu_stats = alloc_percpu(struct flow_stats);
  85. if (!flow->stats.cpu_stats)
  86. goto err;
  87. for_each_possible_cpu(cpu) {
  88. struct flow_stats *cpu_stats;
  89. cpu_stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
  90. spin_lock_init(&cpu_stats->lock);
  91. }
  92. }
  93. return flow;
  94. err:
  95. kmem_cache_free(flow_cache, flow);
  96. return ERR_PTR(-ENOMEM);
  97. }
  98. int ovs_flow_tbl_count(struct flow_table *table)
  99. {
  100. return table->count;
  101. }
  102. static struct flex_array *alloc_buckets(unsigned int n_buckets)
  103. {
  104. struct flex_array *buckets;
  105. int i, err;
  106. buckets = flex_array_alloc(sizeof(struct hlist_head),
  107. n_buckets, GFP_KERNEL);
  108. if (!buckets)
  109. return NULL;
  110. err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
  111. if (err) {
  112. flex_array_free(buckets);
  113. return NULL;
  114. }
  115. for (i = 0; i < n_buckets; i++)
  116. INIT_HLIST_HEAD((struct hlist_head *)
  117. flex_array_get(buckets, i));
  118. return buckets;
  119. }
  120. static void flow_free(struct sw_flow *flow)
  121. {
  122. kfree((struct sf_flow_acts __force *)flow->sf_acts);
  123. if (flow->stats.is_percpu)
  124. free_percpu(flow->stats.cpu_stats);
  125. else
  126. kfree(flow->stats.stat);
  127. kmem_cache_free(flow_cache, flow);
  128. }
  129. static void rcu_free_flow_callback(struct rcu_head *rcu)
  130. {
  131. struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
  132. flow_free(flow);
  133. }
  134. void ovs_flow_free(struct sw_flow *flow, bool deferred)
  135. {
  136. if (!flow)
  137. return;
  138. if (flow->mask) {
  139. struct sw_flow_mask *mask = flow->mask;
  140. /* ovs-lock is required to protect mask-refcount and
  141. * mask list.
  142. */
  143. ASSERT_OVSL();
  144. BUG_ON(!mask->ref_count);
  145. mask->ref_count--;
  146. if (!mask->ref_count) {
  147. list_del_rcu(&mask->list);
  148. if (deferred)
  149. kfree_rcu(mask, rcu);
  150. else
  151. kfree(mask);
  152. }
  153. }
  154. if (deferred)
  155. call_rcu(&flow->rcu, rcu_free_flow_callback);
  156. else
  157. flow_free(flow);
  158. }
  159. static void free_buckets(struct flex_array *buckets)
  160. {
  161. flex_array_free(buckets);
  162. }
  163. static void __table_instance_destroy(struct table_instance *ti)
  164. {
  165. free_buckets(ti->buckets);
  166. kfree(ti);
  167. }
  168. static struct table_instance *table_instance_alloc(int new_size)
  169. {
  170. struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
  171. if (!ti)
  172. return NULL;
  173. ti->buckets = alloc_buckets(new_size);
  174. if (!ti->buckets) {
  175. kfree(ti);
  176. return NULL;
  177. }
  178. ti->n_buckets = new_size;
  179. ti->node_ver = 0;
  180. ti->keep_flows = false;
  181. get_random_bytes(&ti->hash_seed, sizeof(u32));
  182. return ti;
  183. }
  184. int ovs_flow_tbl_init(struct flow_table *table)
  185. {
  186. struct table_instance *ti;
  187. ti = table_instance_alloc(TBL_MIN_BUCKETS);
  188. if (!ti)
  189. return -ENOMEM;
  190. rcu_assign_pointer(table->ti, ti);
  191. INIT_LIST_HEAD(&table->mask_list);
  192. table->last_rehash = jiffies;
  193. table->count = 0;
  194. return 0;
  195. }
  196. static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
  197. {
  198. struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
  199. __table_instance_destroy(ti);
  200. }
  201. static void table_instance_destroy(struct table_instance *ti, bool deferred)
  202. {
  203. int i;
  204. if (!ti)
  205. return;
  206. if (ti->keep_flows)
  207. goto skip_flows;
  208. for (i = 0; i < ti->n_buckets; i++) {
  209. struct sw_flow *flow;
  210. struct hlist_head *head = flex_array_get(ti->buckets, i);
  211. struct hlist_node *n;
  212. int ver = ti->node_ver;
  213. hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
  214. hlist_del_rcu(&flow->hash_node[ver]);
  215. ovs_flow_free(flow, deferred);
  216. }
  217. }
  218. skip_flows:
  219. if (deferred)
  220. call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
  221. else
  222. __table_instance_destroy(ti);
  223. }
  224. void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
  225. {
  226. struct table_instance *ti = ovsl_dereference(table->ti);
  227. table_instance_destroy(ti, deferred);
  228. }
  229. struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
  230. u32 *bucket, u32 *last)
  231. {
  232. struct sw_flow *flow;
  233. struct hlist_head *head;
  234. int ver;
  235. int i;
  236. ver = ti->node_ver;
  237. while (*bucket < ti->n_buckets) {
  238. i = 0;
  239. head = flex_array_get(ti->buckets, *bucket);
  240. hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
  241. if (i < *last) {
  242. i++;
  243. continue;
  244. }
  245. *last = i + 1;
  246. return flow;
  247. }
  248. (*bucket)++;
  249. *last = 0;
  250. }
  251. return NULL;
  252. }
  253. static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
  254. {
  255. hash = jhash_1word(hash, ti->hash_seed);
  256. return flex_array_get(ti->buckets,
  257. (hash & (ti->n_buckets - 1)));
  258. }
  259. static void table_instance_insert(struct table_instance *ti, struct sw_flow *flow)
  260. {
  261. struct hlist_head *head;
  262. head = find_bucket(ti, flow->hash);
  263. hlist_add_head_rcu(&flow->hash_node[ti->node_ver], head);
  264. }
  265. static void flow_table_copy_flows(struct table_instance *old,
  266. struct table_instance *new)
  267. {
  268. int old_ver;
  269. int i;
  270. old_ver = old->node_ver;
  271. new->node_ver = !old_ver;
  272. /* Insert in new table. */
  273. for (i = 0; i < old->n_buckets; i++) {
  274. struct sw_flow *flow;
  275. struct hlist_head *head;
  276. head = flex_array_get(old->buckets, i);
  277. hlist_for_each_entry(flow, head, hash_node[old_ver])
  278. table_instance_insert(new, flow);
  279. }
  280. old->keep_flows = true;
  281. }
  282. static struct table_instance *table_instance_rehash(struct table_instance *ti,
  283. int n_buckets)
  284. {
  285. struct table_instance *new_ti;
  286. new_ti = table_instance_alloc(n_buckets);
  287. if (!new_ti)
  288. return NULL;
  289. flow_table_copy_flows(ti, new_ti);
  290. return new_ti;
  291. }
  292. int ovs_flow_tbl_flush(struct flow_table *flow_table)
  293. {
  294. struct table_instance *old_ti;
  295. struct table_instance *new_ti;
  296. old_ti = ovsl_dereference(flow_table->ti);
  297. new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
  298. if (!new_ti)
  299. return -ENOMEM;
  300. rcu_assign_pointer(flow_table->ti, new_ti);
  301. flow_table->last_rehash = jiffies;
  302. flow_table->count = 0;
  303. table_instance_destroy(old_ti, true);
  304. return 0;
  305. }
  306. static u32 flow_hash(const struct sw_flow_key *key, int key_start,
  307. int key_end)
  308. {
  309. const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
  310. int hash_u32s = (key_end - key_start) >> 2;
  311. /* Make sure number of hash bytes are multiple of u32. */
  312. BUILD_BUG_ON(sizeof(long) % sizeof(u32));
  313. return arch_fast_hash2(hash_key, hash_u32s, 0);
  314. }
  315. static int flow_key_start(const struct sw_flow_key *key)
  316. {
  317. if (key->tun_key.ipv4_dst)
  318. return 0;
  319. else
  320. return rounddown(offsetof(struct sw_flow_key, phy),
  321. sizeof(long));
  322. }
  323. static bool cmp_key(const struct sw_flow_key *key1,
  324. const struct sw_flow_key *key2,
  325. int key_start, int key_end)
  326. {
  327. const long *cp1 = (const long *)((const u8 *)key1 + key_start);
  328. const long *cp2 = (const long *)((const u8 *)key2 + key_start);
  329. long diffs = 0;
  330. int i;
  331. for (i = key_start; i < key_end; i += sizeof(long))
  332. diffs |= *cp1++ ^ *cp2++;
  333. return diffs == 0;
  334. }
  335. static bool flow_cmp_masked_key(const struct sw_flow *flow,
  336. const struct sw_flow_key *key,
  337. int key_start, int key_end)
  338. {
  339. return cmp_key(&flow->key, key, key_start, key_end);
  340. }
  341. bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
  342. struct sw_flow_match *match)
  343. {
  344. struct sw_flow_key *key = match->key;
  345. int key_start = flow_key_start(key);
  346. int key_end = match->range.end;
  347. return cmp_key(&flow->unmasked_key, key, key_start, key_end);
  348. }
  349. static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
  350. const struct sw_flow_key *unmasked,
  351. struct sw_flow_mask *mask)
  352. {
  353. struct sw_flow *flow;
  354. struct hlist_head *head;
  355. int key_start = mask->range.start;
  356. int key_end = mask->range.end;
  357. u32 hash;
  358. struct sw_flow_key masked_key;
  359. ovs_flow_mask_key(&masked_key, unmasked, mask);
  360. hash = flow_hash(&masked_key, key_start, key_end);
  361. head = find_bucket(ti, hash);
  362. hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) {
  363. if (flow->mask == mask && flow->hash == hash &&
  364. flow_cmp_masked_key(flow, &masked_key,
  365. key_start, key_end))
  366. return flow;
  367. }
  368. return NULL;
  369. }
  370. struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
  371. const struct sw_flow_key *key,
  372. u32 *n_mask_hit)
  373. {
  374. struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
  375. struct sw_flow_mask *mask;
  376. struct sw_flow *flow;
  377. *n_mask_hit = 0;
  378. list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
  379. (*n_mask_hit)++;
  380. flow = masked_flow_lookup(ti, key, mask);
  381. if (flow) /* Found */
  382. return flow;
  383. }
  384. return NULL;
  385. }
  386. struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
  387. const struct sw_flow_key *key)
  388. {
  389. u32 __always_unused n_mask_hit;
  390. return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
  391. }
  392. int ovs_flow_tbl_num_masks(const struct flow_table *table)
  393. {
  394. struct sw_flow_mask *mask;
  395. int num = 0;
  396. list_for_each_entry(mask, &table->mask_list, list)
  397. num++;
  398. return num;
  399. }
  400. static struct table_instance *table_instance_expand(struct table_instance *ti)
  401. {
  402. return table_instance_rehash(ti, ti->n_buckets * 2);
  403. }
  404. void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
  405. {
  406. struct table_instance *ti = ovsl_dereference(table->ti);
  407. BUG_ON(table->count == 0);
  408. hlist_del_rcu(&flow->hash_node[ti->node_ver]);
  409. table->count--;
  410. }
  411. static struct sw_flow_mask *mask_alloc(void)
  412. {
  413. struct sw_flow_mask *mask;
  414. mask = kmalloc(sizeof(*mask), GFP_KERNEL);
  415. if (mask)
  416. mask->ref_count = 1;
  417. return mask;
  418. }
  419. static bool mask_equal(const struct sw_flow_mask *a,
  420. const struct sw_flow_mask *b)
  421. {
  422. const u8 *a_ = (const u8 *)&a->key + a->range.start;
  423. const u8 *b_ = (const u8 *)&b->key + b->range.start;
  424. return (a->range.end == b->range.end)
  425. && (a->range.start == b->range.start)
  426. && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
  427. }
  428. static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
  429. const struct sw_flow_mask *mask)
  430. {
  431. struct list_head *ml;
  432. list_for_each(ml, &tbl->mask_list) {
  433. struct sw_flow_mask *m;
  434. m = container_of(ml, struct sw_flow_mask, list);
  435. if (mask_equal(mask, m))
  436. return m;
  437. }
  438. return NULL;
  439. }
  440. /* Add 'mask' into the mask list, if it is not already there. */
  441. static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
  442. struct sw_flow_mask *new)
  443. {
  444. struct sw_flow_mask *mask;
  445. mask = flow_mask_find(tbl, new);
  446. if (!mask) {
  447. /* Allocate a new mask if none exsits. */
  448. mask = mask_alloc();
  449. if (!mask)
  450. return -ENOMEM;
  451. mask->key = new->key;
  452. mask->range = new->range;
  453. list_add_rcu(&mask->list, &tbl->mask_list);
  454. } else {
  455. BUG_ON(!mask->ref_count);
  456. mask->ref_count++;
  457. }
  458. flow->mask = mask;
  459. return 0;
  460. }
  461. int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
  462. struct sw_flow_mask *mask)
  463. {
  464. struct table_instance *new_ti = NULL;
  465. struct table_instance *ti;
  466. int err;
  467. err = flow_mask_insert(table, flow, mask);
  468. if (err)
  469. return err;
  470. flow->hash = flow_hash(&flow->key, flow->mask->range.start,
  471. flow->mask->range.end);
  472. ti = ovsl_dereference(table->ti);
  473. table_instance_insert(ti, flow);
  474. table->count++;
  475. /* Expand table, if necessary, to make room. */
  476. if (table->count > ti->n_buckets)
  477. new_ti = table_instance_expand(ti);
  478. else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
  479. new_ti = table_instance_rehash(ti, ti->n_buckets);
  480. if (new_ti) {
  481. rcu_assign_pointer(table->ti, new_ti);
  482. table_instance_destroy(ti, true);
  483. table->last_rehash = jiffies;
  484. }
  485. return 0;
  486. }
  487. /* Initializes the flow module.
  488. * Returns zero if successful or a negative error code. */
  489. int ovs_flow_init(void)
  490. {
  491. BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
  492. BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
  493. flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
  494. 0, NULL);
  495. if (flow_cache == NULL)
  496. return -ENOMEM;
  497. return 0;
  498. }
  499. /* Uninitializes the flow module. */
  500. void ovs_flow_exit(void)
  501. {
  502. kmem_cache_destroy(flow_cache);
  503. }