vport.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537
  1. /*
  2. * Copyright (c) 2007-2014 Nicira, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of version 2 of the GNU General Public
  6. * License as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; if not, write to the Free Software
  15. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  16. * 02110-1301, USA
  17. */
  18. #include <linux/etherdevice.h>
  19. #include <linux/if.h>
  20. #include <linux/if_vlan.h>
  21. #include <linux/jhash.h>
  22. #include <linux/kernel.h>
  23. #include <linux/list.h>
  24. #include <linux/mutex.h>
  25. #include <linux/percpu.h>
  26. #include <linux/rcupdate.h>
  27. #include <linux/rtnetlink.h>
  28. #include <linux/compat.h>
  29. #include <net/net_namespace.h>
  30. #include "datapath.h"
  31. #include "vport.h"
  32. #include "vport-internal_dev.h"
  33. static void ovs_vport_record_error(struct vport *,
  34. enum vport_err_type err_type);
  35. /* List of statically compiled vport implementations. Don't forget to also
  36. * add yours to the list at the bottom of vport.h. */
  37. static const struct vport_ops *vport_ops_list[] = {
  38. &ovs_netdev_vport_ops,
  39. &ovs_internal_vport_ops,
  40. #ifdef CONFIG_OPENVSWITCH_GRE
  41. &ovs_gre_vport_ops,
  42. #endif
  43. #ifdef CONFIG_OPENVSWITCH_VXLAN
  44. &ovs_vxlan_vport_ops,
  45. #endif
  46. #ifdef CONFIG_OPENVSWITCH_GENEVE
  47. &ovs_geneve_vport_ops,
  48. #endif
  49. };
  50. /* Protected by RCU read lock for reading, ovs_mutex for writing. */
  51. static struct hlist_head *dev_table;
  52. #define VPORT_HASH_BUCKETS 1024
  53. /**
  54. * ovs_vport_init - initialize vport subsystem
  55. *
  56. * Called at module load time to initialize the vport subsystem.
  57. */
  58. int ovs_vport_init(void)
  59. {
  60. dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
  61. GFP_KERNEL);
  62. if (!dev_table)
  63. return -ENOMEM;
  64. return 0;
  65. }
  66. /**
  67. * ovs_vport_exit - shutdown vport subsystem
  68. *
  69. * Called at module exit time to shutdown the vport subsystem.
  70. */
  71. void ovs_vport_exit(void)
  72. {
  73. kfree(dev_table);
  74. }
  75. static struct hlist_head *hash_bucket(struct net *net, const char *name)
  76. {
  77. unsigned int hash = jhash(name, strlen(name), (unsigned long) net);
  78. return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
  79. }
  80. /**
  81. * ovs_vport_locate - find a port that has already been created
  82. *
  83. * @name: name of port to find
  84. *
  85. * Must be called with ovs or RCU read lock.
  86. */
  87. struct vport *ovs_vport_locate(struct net *net, const char *name)
  88. {
  89. struct hlist_head *bucket = hash_bucket(net, name);
  90. struct vport *vport;
  91. hlist_for_each_entry_rcu(vport, bucket, hash_node)
  92. if (!strcmp(name, vport->ops->get_name(vport)) &&
  93. net_eq(ovs_dp_get_net(vport->dp), net))
  94. return vport;
  95. return NULL;
  96. }
  97. /**
  98. * ovs_vport_alloc - allocate and initialize new vport
  99. *
  100. * @priv_size: Size of private data area to allocate.
  101. * @ops: vport device ops
  102. *
  103. * Allocate and initialize a new vport defined by @ops. The vport will contain
  104. * a private data area of size @priv_size that can be accessed using
  105. * vport_priv(). vports that are no longer needed should be released with
  106. * vport_free().
  107. */
  108. struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
  109. const struct vport_parms *parms)
  110. {
  111. struct vport *vport;
  112. size_t alloc_size;
  113. alloc_size = sizeof(struct vport);
  114. if (priv_size) {
  115. alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
  116. alloc_size += priv_size;
  117. }
  118. vport = kzalloc(alloc_size, GFP_KERNEL);
  119. if (!vport)
  120. return ERR_PTR(-ENOMEM);
  121. vport->dp = parms->dp;
  122. vport->port_no = parms->port_no;
  123. vport->ops = ops;
  124. INIT_HLIST_NODE(&vport->dp_hash_node);
  125. if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids)) {
  126. kfree(vport);
  127. return ERR_PTR(-EINVAL);
  128. }
  129. vport->percpu_stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
  130. if (!vport->percpu_stats) {
  131. kfree(vport);
  132. return ERR_PTR(-ENOMEM);
  133. }
  134. return vport;
  135. }
  136. /**
  137. * ovs_vport_free - uninitialize and free vport
  138. *
  139. * @vport: vport to free
  140. *
  141. * Frees a vport allocated with vport_alloc() when it is no longer needed.
  142. *
  143. * The caller must ensure that an RCU grace period has passed since the last
  144. * time @vport was in a datapath.
  145. */
  146. void ovs_vport_free(struct vport *vport)
  147. {
  148. /* vport is freed from RCU callback or error path, Therefore
  149. * it is safe to use raw dereference.
  150. */
  151. kfree(rcu_dereference_raw(vport->upcall_portids));
  152. free_percpu(vport->percpu_stats);
  153. kfree(vport);
  154. }
  155. /**
  156. * ovs_vport_add - add vport device (for kernel callers)
  157. *
  158. * @parms: Information about new vport.
  159. *
  160. * Creates a new vport with the specified configuration (which is dependent on
  161. * device type). ovs_mutex must be held.
  162. */
  163. struct vport *ovs_vport_add(const struct vport_parms *parms)
  164. {
  165. struct vport *vport;
  166. int err = 0;
  167. int i;
  168. for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) {
  169. if (vport_ops_list[i]->type == parms->type) {
  170. struct hlist_head *bucket;
  171. vport = vport_ops_list[i]->create(parms);
  172. if (IS_ERR(vport)) {
  173. err = PTR_ERR(vport);
  174. goto out;
  175. }
  176. bucket = hash_bucket(ovs_dp_get_net(vport->dp),
  177. vport->ops->get_name(vport));
  178. hlist_add_head_rcu(&vport->hash_node, bucket);
  179. return vport;
  180. }
  181. }
  182. err = -EAFNOSUPPORT;
  183. out:
  184. return ERR_PTR(err);
  185. }
  186. /**
  187. * ovs_vport_set_options - modify existing vport device (for kernel callers)
  188. *
  189. * @vport: vport to modify.
  190. * @options: New configuration.
  191. *
  192. * Modifies an existing device with the specified configuration (which is
  193. * dependent on device type). ovs_mutex must be held.
  194. */
  195. int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
  196. {
  197. if (!vport->ops->set_options)
  198. return -EOPNOTSUPP;
  199. return vport->ops->set_options(vport, options);
  200. }
  201. /**
  202. * ovs_vport_del - delete existing vport device
  203. *
  204. * @vport: vport to delete.
  205. *
  206. * Detaches @vport from its datapath and destroys it. It is possible to fail
  207. * for reasons such as lack of memory. ovs_mutex must be held.
  208. */
  209. void ovs_vport_del(struct vport *vport)
  210. {
  211. ASSERT_OVSL();
  212. hlist_del_rcu(&vport->hash_node);
  213. vport->ops->destroy(vport);
  214. }
  215. /**
  216. * ovs_vport_get_stats - retrieve device stats
  217. *
  218. * @vport: vport from which to retrieve the stats
  219. * @stats: location to store stats
  220. *
  221. * Retrieves transmit, receive, and error stats for the given device.
  222. *
  223. * Must be called with ovs_mutex or rcu_read_lock.
  224. */
  225. void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
  226. {
  227. int i;
  228. memset(stats, 0, sizeof(*stats));
  229. /* We potentially have 2 sources of stats that need to be combined:
  230. * those we have collected (split into err_stats and percpu_stats) from
  231. * set_stats() and device error stats from netdev->get_stats() (for
  232. * errors that happen downstream and therefore aren't reported through
  233. * our vport_record_error() function).
  234. * Stats from first source are reported by ovs (OVS_VPORT_ATTR_STATS).
  235. * netdev-stats can be directly read over netlink-ioctl.
  236. */
  237. stats->rx_errors = atomic_long_read(&vport->err_stats.rx_errors);
  238. stats->tx_errors = atomic_long_read(&vport->err_stats.tx_errors);
  239. stats->tx_dropped = atomic_long_read(&vport->err_stats.tx_dropped);
  240. stats->rx_dropped = atomic_long_read(&vport->err_stats.rx_dropped);
  241. for_each_possible_cpu(i) {
  242. const struct pcpu_sw_netstats *percpu_stats;
  243. struct pcpu_sw_netstats local_stats;
  244. unsigned int start;
  245. percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
  246. do {
  247. start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
  248. local_stats = *percpu_stats;
  249. } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
  250. stats->rx_bytes += local_stats.rx_bytes;
  251. stats->rx_packets += local_stats.rx_packets;
  252. stats->tx_bytes += local_stats.tx_bytes;
  253. stats->tx_packets += local_stats.tx_packets;
  254. }
  255. }
  256. /**
  257. * ovs_vport_get_options - retrieve device options
  258. *
  259. * @vport: vport from which to retrieve the options.
  260. * @skb: sk_buff where options should be appended.
  261. *
  262. * Retrieves the configuration of the given device, appending an
  263. * %OVS_VPORT_ATTR_OPTIONS attribute that in turn contains nested
  264. * vport-specific attributes to @skb.
  265. *
  266. * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room, or another
  267. * negative error code if a real error occurred. If an error occurs, @skb is
  268. * left unmodified.
  269. *
  270. * Must be called with ovs_mutex or rcu_read_lock.
  271. */
  272. int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
  273. {
  274. struct nlattr *nla;
  275. int err;
  276. if (!vport->ops->get_options)
  277. return 0;
  278. nla = nla_nest_start(skb, OVS_VPORT_ATTR_OPTIONS);
  279. if (!nla)
  280. return -EMSGSIZE;
  281. err = vport->ops->get_options(vport, skb);
  282. if (err) {
  283. nla_nest_cancel(skb, nla);
  284. return err;
  285. }
  286. nla_nest_end(skb, nla);
  287. return 0;
  288. }
  289. /**
  290. * ovs_vport_set_upcall_portids - set upcall portids of @vport.
  291. *
  292. * @vport: vport to modify.
  293. * @ids: new configuration, an array of port ids.
  294. *
  295. * Sets the vport's upcall_portids to @ids.
  296. *
  297. * Returns 0 if successful, -EINVAL if @ids is zero length or cannot be parsed
  298. * as an array of U32.
  299. *
  300. * Must be called with ovs_mutex.
  301. */
  302. int ovs_vport_set_upcall_portids(struct vport *vport, struct nlattr *ids)
  303. {
  304. struct vport_portids *old, *vport_portids;
  305. if (!nla_len(ids) || nla_len(ids) % sizeof(u32))
  306. return -EINVAL;
  307. old = ovsl_dereference(vport->upcall_portids);
  308. vport_portids = kmalloc(sizeof(*vport_portids) + nla_len(ids),
  309. GFP_KERNEL);
  310. if (!vport_portids)
  311. return -ENOMEM;
  312. vport_portids->n_ids = nla_len(ids) / sizeof(u32);
  313. vport_portids->rn_ids = reciprocal_value(vport_portids->n_ids);
  314. nla_memcpy(vport_portids->ids, ids, nla_len(ids));
  315. rcu_assign_pointer(vport->upcall_portids, vport_portids);
  316. if (old)
  317. kfree_rcu(old, rcu);
  318. return 0;
  319. }
  320. /**
  321. * ovs_vport_get_upcall_portids - get the upcall_portids of @vport.
  322. *
  323. * @vport: vport from which to retrieve the portids.
  324. * @skb: sk_buff where portids should be appended.
  325. *
  326. * Retrieves the configuration of the given vport, appending the
  327. * %OVS_VPORT_ATTR_UPCALL_PID attribute which is the array of upcall
  328. * portids to @skb.
  329. *
  330. * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room.
  331. * If an error occurs, @skb is left unmodified. Must be called with
  332. * ovs_mutex or rcu_read_lock.
  333. */
  334. int ovs_vport_get_upcall_portids(const struct vport *vport,
  335. struct sk_buff *skb)
  336. {
  337. struct vport_portids *ids;
  338. ids = rcu_dereference_ovsl(vport->upcall_portids);
  339. if (vport->dp->user_features & OVS_DP_F_VPORT_PIDS)
  340. return nla_put(skb, OVS_VPORT_ATTR_UPCALL_PID,
  341. ids->n_ids * sizeof(u32), (void *)ids->ids);
  342. else
  343. return nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, ids->ids[0]);
  344. }
  345. /**
  346. * ovs_vport_find_upcall_portid - find the upcall portid to send upcall.
  347. *
  348. * @vport: vport from which the missed packet is received.
  349. * @skb: skb that the missed packet was received.
  350. *
  351. * Uses the skb_get_hash() to select the upcall portid to send the
  352. * upcall.
  353. *
  354. * Returns the portid of the target socket. Must be called with rcu_read_lock.
  355. */
  356. u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb)
  357. {
  358. struct vport_portids *ids;
  359. u32 ids_index;
  360. u32 hash;
  361. ids = rcu_dereference(vport->upcall_portids);
  362. if (ids->n_ids == 1 && ids->ids[0] == 0)
  363. return 0;
  364. hash = skb_get_hash(skb);
  365. ids_index = hash - ids->n_ids * reciprocal_divide(hash, ids->rn_ids);
  366. return ids->ids[ids_index];
  367. }
  368. /**
  369. * ovs_vport_receive - pass up received packet to the datapath for processing
  370. *
  371. * @vport: vport that received the packet
  372. * @skb: skb that was received
  373. * @tun_key: tunnel (if any) that carried packet
  374. *
  375. * Must be called with rcu_read_lock. The packet cannot be shared and
  376. * skb->data should point to the Ethernet header.
  377. */
  378. void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
  379. struct ovs_tunnel_info *tun_info)
  380. {
  381. struct pcpu_sw_netstats *stats;
  382. struct sw_flow_key key;
  383. int error;
  384. stats = this_cpu_ptr(vport->percpu_stats);
  385. u64_stats_update_begin(&stats->syncp);
  386. stats->rx_packets++;
  387. stats->rx_bytes += skb->len;
  388. u64_stats_update_end(&stats->syncp);
  389. OVS_CB(skb)->input_vport = vport;
  390. OVS_CB(skb)->egress_tun_info = NULL;
  391. /* Extract flow from 'skb' into 'key'. */
  392. error = ovs_flow_key_extract(tun_info, skb, &key);
  393. if (unlikely(error)) {
  394. kfree_skb(skb);
  395. return;
  396. }
  397. ovs_dp_process_packet(skb, &key);
  398. }
  399. /**
  400. * ovs_vport_send - send a packet on a device
  401. *
  402. * @vport: vport on which to send the packet
  403. * @skb: skb to send
  404. *
  405. * Sends the given packet and returns the length of data sent. Either ovs
  406. * lock or rcu_read_lock must be held.
  407. */
  408. int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
  409. {
  410. int sent = vport->ops->send(vport, skb);
  411. if (likely(sent > 0)) {
  412. struct pcpu_sw_netstats *stats;
  413. stats = this_cpu_ptr(vport->percpu_stats);
  414. u64_stats_update_begin(&stats->syncp);
  415. stats->tx_packets++;
  416. stats->tx_bytes += sent;
  417. u64_stats_update_end(&stats->syncp);
  418. } else if (sent < 0) {
  419. ovs_vport_record_error(vport, VPORT_E_TX_ERROR);
  420. kfree_skb(skb);
  421. } else
  422. ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
  423. return sent;
  424. }
  425. /**
  426. * ovs_vport_record_error - indicate device error to generic stats layer
  427. *
  428. * @vport: vport that encountered the error
  429. * @err_type: one of enum vport_err_type types to indicate the error type
  430. *
  431. * If using the vport generic stats layer indicate that an error of the given
  432. * type has occurred.
  433. */
  434. static void ovs_vport_record_error(struct vport *vport,
  435. enum vport_err_type err_type)
  436. {
  437. switch (err_type) {
  438. case VPORT_E_RX_DROPPED:
  439. atomic_long_inc(&vport->err_stats.rx_dropped);
  440. break;
  441. case VPORT_E_RX_ERROR:
  442. atomic_long_inc(&vport->err_stats.rx_errors);
  443. break;
  444. case VPORT_E_TX_DROPPED:
  445. atomic_long_inc(&vport->err_stats.tx_dropped);
  446. break;
  447. case VPORT_E_TX_ERROR:
  448. atomic_long_inc(&vport->err_stats.tx_errors);
  449. break;
  450. }
  451. }
  452. static void free_vport_rcu(struct rcu_head *rcu)
  453. {
  454. struct vport *vport = container_of(rcu, struct vport, rcu);
  455. ovs_vport_free(vport);
  456. }
  457. void ovs_vport_deferred_free(struct vport *vport)
  458. {
  459. if (!vport)
  460. return;
  461. call_rcu(&vport->rcu, free_vport_rcu);
  462. }