datapath.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961
  1. /*
  2. * Copyright (c) 2007-2013 Nicira, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of version 2 of the GNU General Public
  6. * License as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; if not, write to the Free Software
  15. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  16. * 02110-1301, USA
  17. */
  18. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19. #include <linux/init.h>
  20. #include <linux/module.h>
  21. #include <linux/if_arp.h>
  22. #include <linux/if_vlan.h>
  23. #include <linux/in.h>
  24. #include <linux/ip.h>
  25. #include <linux/jhash.h>
  26. #include <linux/delay.h>
  27. #include <linux/time.h>
  28. #include <linux/etherdevice.h>
  29. #include <linux/genetlink.h>
  30. #include <linux/kernel.h>
  31. #include <linux/kthread.h>
  32. #include <linux/mutex.h>
  33. #include <linux/percpu.h>
  34. #include <linux/rcupdate.h>
  35. #include <linux/tcp.h>
  36. #include <linux/udp.h>
  37. #include <linux/ethtool.h>
  38. #include <linux/wait.h>
  39. #include <asm/div64.h>
  40. #include <linux/highmem.h>
  41. #include <linux/netfilter_bridge.h>
  42. #include <linux/netfilter_ipv4.h>
  43. #include <linux/inetdevice.h>
  44. #include <linux/list.h>
  45. #include <linux/lockdep.h>
  46. #include <linux/openvswitch.h>
  47. #include <linux/rculist.h>
  48. #include <linux/dmi.h>
  49. #include <linux/workqueue.h>
  50. #include <net/genetlink.h>
  51. #include <net/net_namespace.h>
  52. #include <net/netns/generic.h>
  53. #include "datapath.h"
  54. #include "flow.h"
  55. #include "flow_table.h"
  56. #include "flow_netlink.h"
  57. #include "vport-internal_dev.h"
  58. #include "vport-netdev.h"
  59. int ovs_net_id __read_mostly;
  60. static void ovs_notify(struct genl_family *family,
  61. struct sk_buff *skb, struct genl_info *info)
  62. {
  63. genl_notify(family, skb, genl_info_net(info), info->snd_portid,
  64. 0, info->nlhdr, GFP_KERNEL);
  65. }
  66. /**
  67. * DOC: Locking:
  68. *
  69. * All writes e.g. Writes to device state (add/remove datapath, port, set
  70. * operations on vports, etc.), Writes to other state (flow table
  71. * modifications, set miscellaneous datapath parameters, etc.) are protected
  72. * by ovs_lock.
  73. *
  74. * Reads are protected by RCU.
  75. *
  76. * There are a few special cases (mostly stats) that have their own
  77. * synchronization but they nest under all of above and don't interact with
  78. * each other.
  79. *
  80. * The RTNL lock nests inside ovs_mutex.
  81. */
  82. static DEFINE_MUTEX(ovs_mutex);
  83. void ovs_lock(void)
  84. {
  85. mutex_lock(&ovs_mutex);
  86. }
  87. void ovs_unlock(void)
  88. {
  89. mutex_unlock(&ovs_mutex);
  90. }
  91. #ifdef CONFIG_LOCKDEP
  92. int lockdep_ovsl_is_held(void)
  93. {
  94. if (debug_locks)
  95. return lockdep_is_held(&ovs_mutex);
  96. else
  97. return 1;
  98. }
  99. #endif
  100. static struct vport *new_vport(const struct vport_parms *);
  101. static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
  102. const struct dp_upcall_info *);
  103. static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
  104. const struct dp_upcall_info *);
  105. /* Must be called with rcu_read_lock or ovs_mutex. */
  106. static struct datapath *get_dp(struct net *net, int dp_ifindex)
  107. {
  108. struct datapath *dp = NULL;
  109. struct net_device *dev;
  110. rcu_read_lock();
  111. dev = dev_get_by_index_rcu(net, dp_ifindex);
  112. if (dev) {
  113. struct vport *vport = ovs_internal_dev_get_vport(dev);
  114. if (vport)
  115. dp = vport->dp;
  116. }
  117. rcu_read_unlock();
  118. return dp;
  119. }
  120. /* Must be called with rcu_read_lock or ovs_mutex. */
  121. static const char *ovs_dp_name(const struct datapath *dp)
  122. {
  123. struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
  124. return vport->ops->get_name(vport);
  125. }
  126. static int get_dpifindex(struct datapath *dp)
  127. {
  128. struct vport *local;
  129. int ifindex;
  130. rcu_read_lock();
  131. local = ovs_vport_rcu(dp, OVSP_LOCAL);
  132. if (local)
  133. ifindex = netdev_vport_priv(local)->dev->ifindex;
  134. else
  135. ifindex = 0;
  136. rcu_read_unlock();
  137. return ifindex;
  138. }
  139. static void destroy_dp_rcu(struct rcu_head *rcu)
  140. {
  141. struct datapath *dp = container_of(rcu, struct datapath, rcu);
  142. free_percpu(dp->stats_percpu);
  143. release_net(ovs_dp_get_net(dp));
  144. kfree(dp->ports);
  145. kfree(dp);
  146. }
  147. static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
  148. u16 port_no)
  149. {
  150. return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
  151. }
  152. struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
  153. {
  154. struct vport *vport;
  155. struct hlist_head *head;
  156. head = vport_hash_bucket(dp, port_no);
  157. hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
  158. if (vport->port_no == port_no)
  159. return vport;
  160. }
  161. return NULL;
  162. }
  163. /* Called with ovs_mutex. */
  164. static struct vport *new_vport(const struct vport_parms *parms)
  165. {
  166. struct vport *vport;
  167. vport = ovs_vport_add(parms);
  168. if (!IS_ERR(vport)) {
  169. struct datapath *dp = parms->dp;
  170. struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
  171. hlist_add_head_rcu(&vport->dp_hash_node, head);
  172. }
  173. return vport;
  174. }
  175. void ovs_dp_detach_port(struct vport *p)
  176. {
  177. ASSERT_OVSL();
  178. /* First drop references to device. */
  179. hlist_del_rcu(&p->dp_hash_node);
  180. /* Then destroy it. */
  181. ovs_vport_del(p);
  182. }
  183. /* Must be called with rcu_read_lock. */
  184. void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
  185. {
  186. struct datapath *dp = p->dp;
  187. struct sw_flow *flow;
  188. struct dp_stats_percpu *stats;
  189. struct sw_flow_key key;
  190. u64 *stats_counter;
  191. u32 n_mask_hit;
  192. int error;
  193. stats = this_cpu_ptr(dp->stats_percpu);
  194. /* Extract flow from 'skb' into 'key'. */
  195. error = ovs_flow_extract(skb, p->port_no, &key);
  196. if (unlikely(error)) {
  197. kfree_skb(skb);
  198. return;
  199. }
  200. /* Look up flow. */
  201. flow = ovs_flow_tbl_lookup_stats(&dp->table, &key, &n_mask_hit);
  202. if (unlikely(!flow)) {
  203. struct dp_upcall_info upcall;
  204. upcall.cmd = OVS_PACKET_CMD_MISS;
  205. upcall.key = &key;
  206. upcall.userdata = NULL;
  207. upcall.portid = p->upcall_portid;
  208. ovs_dp_upcall(dp, skb, &upcall);
  209. consume_skb(skb);
  210. stats_counter = &stats->n_missed;
  211. goto out;
  212. }
  213. OVS_CB(skb)->flow = flow;
  214. OVS_CB(skb)->pkt_key = &key;
  215. ovs_flow_stats_update(OVS_CB(skb)->flow, skb);
  216. ovs_execute_actions(dp, skb);
  217. stats_counter = &stats->n_hit;
  218. out:
  219. /* Update datapath statistics. */
  220. u64_stats_update_begin(&stats->syncp);
  221. (*stats_counter)++;
  222. stats->n_mask_hit += n_mask_hit;
  223. u64_stats_update_end(&stats->syncp);
  224. }
  225. static struct genl_family dp_packet_genl_family = {
  226. .id = GENL_ID_GENERATE,
  227. .hdrsize = sizeof(struct ovs_header),
  228. .name = OVS_PACKET_FAMILY,
  229. .version = OVS_PACKET_VERSION,
  230. .maxattr = OVS_PACKET_ATTR_MAX,
  231. .netnsok = true,
  232. .parallel_ops = true,
  233. };
  234. int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
  235. const struct dp_upcall_info *upcall_info)
  236. {
  237. struct dp_stats_percpu *stats;
  238. int err;
  239. if (upcall_info->portid == 0) {
  240. err = -ENOTCONN;
  241. goto err;
  242. }
  243. if (!skb_is_gso(skb))
  244. err = queue_userspace_packet(dp, skb, upcall_info);
  245. else
  246. err = queue_gso_packets(dp, skb, upcall_info);
  247. if (err)
  248. goto err;
  249. return 0;
  250. err:
  251. stats = this_cpu_ptr(dp->stats_percpu);
  252. u64_stats_update_begin(&stats->syncp);
  253. stats->n_lost++;
  254. u64_stats_update_end(&stats->syncp);
  255. return err;
  256. }
  257. static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
  258. const struct dp_upcall_info *upcall_info)
  259. {
  260. unsigned short gso_type = skb_shinfo(skb)->gso_type;
  261. struct dp_upcall_info later_info;
  262. struct sw_flow_key later_key;
  263. struct sk_buff *segs, *nskb;
  264. int err;
  265. segs = __skb_gso_segment(skb, NETIF_F_SG, false);
  266. if (IS_ERR(segs))
  267. return PTR_ERR(segs);
  268. /* Queue all of the segments. */
  269. skb = segs;
  270. do {
  271. err = queue_userspace_packet(dp, skb, upcall_info);
  272. if (err)
  273. break;
  274. if (skb == segs && gso_type & SKB_GSO_UDP) {
  275. /* The initial flow key extracted by ovs_flow_extract()
  276. * in this case is for a first fragment, so we need to
  277. * properly mark later fragments.
  278. */
  279. later_key = *upcall_info->key;
  280. later_key.ip.frag = OVS_FRAG_TYPE_LATER;
  281. later_info = *upcall_info;
  282. later_info.key = &later_key;
  283. upcall_info = &later_info;
  284. }
  285. } while ((skb = skb->next));
  286. /* Free all of the segments. */
  287. skb = segs;
  288. do {
  289. nskb = skb->next;
  290. if (err)
  291. kfree_skb(skb);
  292. else
  293. consume_skb(skb);
  294. } while ((skb = nskb));
  295. return err;
  296. }
  297. static size_t key_attr_size(void)
  298. {
  299. return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */
  300. + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */
  301. + nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */
  302. + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
  303. + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
  304. + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */
  305. + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */
  306. + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
  307. + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */
  308. + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */
  309. + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */
  310. + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */
  311. + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
  312. + nla_total_size(4) /* OVS_KEY_ATTR_8021Q */
  313. + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */
  314. + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
  315. + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */
  316. + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */
  317. + nla_total_size(28); /* OVS_KEY_ATTR_ND */
  318. }
  319. static size_t upcall_msg_size(const struct nlattr *userdata,
  320. unsigned int hdrlen)
  321. {
  322. size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
  323. + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
  324. + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */
  325. /* OVS_PACKET_ATTR_USERDATA */
  326. if (userdata)
  327. size += NLA_ALIGN(userdata->nla_len);
  328. return size;
  329. }
  330. static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
  331. const struct dp_upcall_info *upcall_info)
  332. {
  333. struct ovs_header *upcall;
  334. struct sk_buff *nskb = NULL;
  335. struct sk_buff *user_skb; /* to be queued to userspace */
  336. struct nlattr *nla;
  337. struct genl_info info = {
  338. .dst_sk = ovs_dp_get_net(dp)->genl_sock,
  339. .snd_portid = upcall_info->portid,
  340. };
  341. size_t len;
  342. unsigned int hlen;
  343. int err, dp_ifindex;
  344. dp_ifindex = get_dpifindex(dp);
  345. if (!dp_ifindex)
  346. return -ENODEV;
  347. if (vlan_tx_tag_present(skb)) {
  348. nskb = skb_clone(skb, GFP_ATOMIC);
  349. if (!nskb)
  350. return -ENOMEM;
  351. nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb));
  352. if (!nskb)
  353. return -ENOMEM;
  354. nskb->vlan_tci = 0;
  355. skb = nskb;
  356. }
  357. if (nla_attr_size(skb->len) > USHRT_MAX) {
  358. err = -EFBIG;
  359. goto out;
  360. }
  361. /* Complete checksum if needed */
  362. if (skb->ip_summed == CHECKSUM_PARTIAL &&
  363. (err = skb_checksum_help(skb)))
  364. goto out;
  365. /* Older versions of OVS user space enforce alignment of the last
  366. * Netlink attribute to NLA_ALIGNTO which would require extensive
  367. * padding logic. Only perform zerocopy if padding is not required.
  368. */
  369. if (dp->user_features & OVS_DP_F_UNALIGNED)
  370. hlen = skb_zerocopy_headlen(skb);
  371. else
  372. hlen = skb->len;
  373. len = upcall_msg_size(upcall_info->userdata, hlen);
  374. user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC);
  375. if (!user_skb) {
  376. err = -ENOMEM;
  377. goto out;
  378. }
  379. upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
  380. 0, upcall_info->cmd);
  381. upcall->dp_ifindex = dp_ifindex;
  382. nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
  383. ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb);
  384. nla_nest_end(user_skb, nla);
  385. if (upcall_info->userdata)
  386. __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
  387. nla_len(upcall_info->userdata),
  388. nla_data(upcall_info->userdata));
  389. /* Only reserve room for attribute header, packet data is added
  390. * in skb_zerocopy() */
  391. if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
  392. err = -ENOBUFS;
  393. goto out;
  394. }
  395. nla->nla_len = nla_attr_size(skb->len);
  396. err = skb_zerocopy(user_skb, skb, skb->len, hlen);
  397. if (err)
  398. goto out;
  399. /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
  400. if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
  401. size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len;
  402. if (plen > 0)
  403. memset(skb_put(user_skb, plen), 0, plen);
  404. }
  405. ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
  406. err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
  407. out:
  408. if (err)
  409. skb_tx_error(skb);
  410. kfree_skb(nskb);
  411. return err;
  412. }
  413. static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
  414. {
  415. struct ovs_header *ovs_header = info->userhdr;
  416. struct nlattr **a = info->attrs;
  417. struct sw_flow_actions *acts;
  418. struct sk_buff *packet;
  419. struct sw_flow *flow;
  420. struct datapath *dp;
  421. struct ethhdr *eth;
  422. int len;
  423. int err;
  424. err = -EINVAL;
  425. if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
  426. !a[OVS_PACKET_ATTR_ACTIONS])
  427. goto err;
  428. len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
  429. packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
  430. err = -ENOMEM;
  431. if (!packet)
  432. goto err;
  433. skb_reserve(packet, NET_IP_ALIGN);
  434. nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
  435. skb_reset_mac_header(packet);
  436. eth = eth_hdr(packet);
  437. /* Normally, setting the skb 'protocol' field would be handled by a
  438. * call to eth_type_trans(), but it assumes there's a sending
  439. * device, which we may not have. */
  440. if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
  441. packet->protocol = eth->h_proto;
  442. else
  443. packet->protocol = htons(ETH_P_802_2);
  444. /* Build an sw_flow for sending this packet. */
  445. flow = ovs_flow_alloc(false);
  446. err = PTR_ERR(flow);
  447. if (IS_ERR(flow))
  448. goto err_kfree_skb;
  449. err = ovs_flow_extract(packet, -1, &flow->key);
  450. if (err)
  451. goto err_flow_free;
  452. err = ovs_nla_get_flow_metadata(flow, a[OVS_PACKET_ATTR_KEY]);
  453. if (err)
  454. goto err_flow_free;
  455. acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
  456. err = PTR_ERR(acts);
  457. if (IS_ERR(acts))
  458. goto err_flow_free;
  459. err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
  460. &flow->key, 0, &acts);
  461. rcu_assign_pointer(flow->sf_acts, acts);
  462. if (err)
  463. goto err_flow_free;
  464. OVS_CB(packet)->flow = flow;
  465. OVS_CB(packet)->pkt_key = &flow->key;
  466. packet->priority = flow->key.phy.priority;
  467. packet->mark = flow->key.phy.skb_mark;
  468. rcu_read_lock();
  469. dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
  470. err = -ENODEV;
  471. if (!dp)
  472. goto err_unlock;
  473. local_bh_disable();
  474. err = ovs_execute_actions(dp, packet);
  475. local_bh_enable();
  476. rcu_read_unlock();
  477. ovs_flow_free(flow, false);
  478. return err;
  479. err_unlock:
  480. rcu_read_unlock();
  481. err_flow_free:
  482. ovs_flow_free(flow, false);
  483. err_kfree_skb:
  484. kfree_skb(packet);
  485. err:
  486. return err;
  487. }
  488. static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
  489. [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
  490. [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
  491. [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
  492. };
  493. static const struct genl_ops dp_packet_genl_ops[] = {
  494. { .cmd = OVS_PACKET_CMD_EXECUTE,
  495. .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
  496. .policy = packet_policy,
  497. .doit = ovs_packet_cmd_execute
  498. }
  499. };
  500. static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
  501. struct ovs_dp_megaflow_stats *mega_stats)
  502. {
  503. int i;
  504. memset(mega_stats, 0, sizeof(*mega_stats));
  505. stats->n_flows = ovs_flow_tbl_count(&dp->table);
  506. mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
  507. stats->n_hit = stats->n_missed = stats->n_lost = 0;
  508. for_each_possible_cpu(i) {
  509. const struct dp_stats_percpu *percpu_stats;
  510. struct dp_stats_percpu local_stats;
  511. unsigned int start;
  512. percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
  513. do {
  514. start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
  515. local_stats = *percpu_stats;
  516. } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
  517. stats->n_hit += local_stats.n_hit;
  518. stats->n_missed += local_stats.n_missed;
  519. stats->n_lost += local_stats.n_lost;
  520. mega_stats->n_mask_hit += local_stats.n_mask_hit;
  521. }
  522. }
  523. static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
  524. [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
  525. [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
  526. [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
  527. };
  528. static struct genl_family dp_flow_genl_family = {
  529. .id = GENL_ID_GENERATE,
  530. .hdrsize = sizeof(struct ovs_header),
  531. .name = OVS_FLOW_FAMILY,
  532. .version = OVS_FLOW_VERSION,
  533. .maxattr = OVS_FLOW_ATTR_MAX,
  534. .netnsok = true,
  535. .parallel_ops = true,
  536. };
  537. static struct genl_multicast_group ovs_dp_flow_multicast_group = {
  538. .name = OVS_FLOW_MCGROUP
  539. };
  540. static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
  541. {
  542. return NLMSG_ALIGN(sizeof(struct ovs_header))
  543. + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */
  544. + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */
  545. + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
  546. + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
  547. + nla_total_size(8) /* OVS_FLOW_ATTR_USED */
  548. + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
  549. }
  550. /* Called with ovs_mutex. */
  551. static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
  552. struct sk_buff *skb, u32 portid,
  553. u32 seq, u32 flags, u8 cmd)
  554. {
  555. const int skb_orig_len = skb->len;
  556. struct nlattr *start;
  557. struct ovs_flow_stats stats;
  558. __be16 tcp_flags;
  559. unsigned long used;
  560. struct ovs_header *ovs_header;
  561. struct nlattr *nla;
  562. int err;
  563. ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
  564. if (!ovs_header)
  565. return -EMSGSIZE;
  566. ovs_header->dp_ifindex = get_dpifindex(dp);
  567. /* Fill flow key. */
  568. nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
  569. if (!nla)
  570. goto nla_put_failure;
  571. err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb);
  572. if (err)
  573. goto error;
  574. nla_nest_end(skb, nla);
  575. nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK);
  576. if (!nla)
  577. goto nla_put_failure;
  578. err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb);
  579. if (err)
  580. goto error;
  581. nla_nest_end(skb, nla);
  582. ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
  583. if (used &&
  584. nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
  585. goto nla_put_failure;
  586. if (stats.n_packets &&
  587. nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
  588. goto nla_put_failure;
  589. if ((u8)ntohs(tcp_flags) &&
  590. nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
  591. goto nla_put_failure;
  592. /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
  593. * this is the first flow to be dumped into 'skb'. This is unusual for
  594. * Netlink but individual action lists can be longer than
  595. * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
  596. * The userspace caller can always fetch the actions separately if it
  597. * really wants them. (Most userspace callers in fact don't care.)
  598. *
  599. * This can only fail for dump operations because the skb is always
  600. * properly sized for single flows.
  601. */
  602. start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
  603. if (start) {
  604. const struct sw_flow_actions *sf_acts;
  605. sf_acts = rcu_dereference_ovsl(flow->sf_acts);
  606. err = ovs_nla_put_actions(sf_acts->actions,
  607. sf_acts->actions_len, skb);
  608. if (!err)
  609. nla_nest_end(skb, start);
  610. else {
  611. if (skb_orig_len)
  612. goto error;
  613. nla_nest_cancel(skb, start);
  614. }
  615. } else if (skb_orig_len)
  616. goto nla_put_failure;
  617. return genlmsg_end(skb, ovs_header);
  618. nla_put_failure:
  619. err = -EMSGSIZE;
  620. error:
  621. genlmsg_cancel(skb, ovs_header);
  622. return err;
  623. }
  624. static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow,
  625. struct genl_info *info)
  626. {
  627. size_t len;
  628. len = ovs_flow_cmd_msg_size(ovsl_dereference(flow->sf_acts));
  629. return genlmsg_new_unicast(len, info, GFP_KERNEL);
  630. }
  631. static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
  632. struct datapath *dp,
  633. struct genl_info *info,
  634. u8 cmd)
  635. {
  636. struct sk_buff *skb;
  637. int retval;
  638. skb = ovs_flow_cmd_alloc_info(flow, info);
  639. if (!skb)
  640. return ERR_PTR(-ENOMEM);
  641. retval = ovs_flow_cmd_fill_info(flow, dp, skb, info->snd_portid,
  642. info->snd_seq, 0, cmd);
  643. BUG_ON(retval < 0);
  644. return skb;
  645. }
  646. static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
  647. {
  648. struct nlattr **a = info->attrs;
  649. struct ovs_header *ovs_header = info->userhdr;
  650. struct sw_flow_key key, masked_key;
  651. struct sw_flow *flow = NULL;
  652. struct sw_flow_mask mask;
  653. struct sk_buff *reply;
  654. struct datapath *dp;
  655. struct sw_flow_actions *acts = NULL;
  656. struct sw_flow_match match;
  657. bool exact_5tuple;
  658. int error;
  659. /* Extract key. */
  660. error = -EINVAL;
  661. if (!a[OVS_FLOW_ATTR_KEY])
  662. goto error;
  663. ovs_match_init(&match, &key, &mask);
  664. error = ovs_nla_get_match(&match, &exact_5tuple,
  665. a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
  666. if (error)
  667. goto error;
  668. /* Validate actions. */
  669. if (a[OVS_FLOW_ATTR_ACTIONS]) {
  670. acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
  671. error = PTR_ERR(acts);
  672. if (IS_ERR(acts))
  673. goto error;
  674. ovs_flow_mask_key(&masked_key, &key, &mask);
  675. error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
  676. &masked_key, 0, &acts);
  677. if (error) {
  678. OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
  679. goto err_kfree;
  680. }
  681. } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
  682. error = -EINVAL;
  683. goto error;
  684. }
  685. ovs_lock();
  686. dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
  687. error = -ENODEV;
  688. if (!dp)
  689. goto err_unlock_ovs;
  690. /* Check if this is a duplicate flow */
  691. flow = ovs_flow_tbl_lookup(&dp->table, &key);
  692. if (!flow) {
  693. /* Bail out if we're not allowed to create a new flow. */
  694. error = -ENOENT;
  695. if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
  696. goto err_unlock_ovs;
  697. /* Allocate flow. */
  698. flow = ovs_flow_alloc(!exact_5tuple);
  699. if (IS_ERR(flow)) {
  700. error = PTR_ERR(flow);
  701. goto err_unlock_ovs;
  702. }
  703. flow->key = masked_key;
  704. flow->unmasked_key = key;
  705. rcu_assign_pointer(flow->sf_acts, acts);
  706. /* Put flow in bucket. */
  707. error = ovs_flow_tbl_insert(&dp->table, flow, &mask);
  708. if (error) {
  709. acts = NULL;
  710. goto err_flow_free;
  711. }
  712. reply = ovs_flow_cmd_build_info(flow, dp, info, OVS_FLOW_CMD_NEW);
  713. } else {
  714. /* We found a matching flow. */
  715. struct sw_flow_actions *old_acts;
  716. /* Bail out if we're not allowed to modify an existing flow.
  717. * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
  718. * because Generic Netlink treats the latter as a dump
  719. * request. We also accept NLM_F_EXCL in case that bug ever
  720. * gets fixed.
  721. */
  722. error = -EEXIST;
  723. if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
  724. info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
  725. goto err_unlock_ovs;
  726. /* The unmasked key has to be the same for flow updates. */
  727. if (!ovs_flow_cmp_unmasked_key(flow, &match))
  728. goto err_unlock_ovs;
  729. /* Update actions. */
  730. old_acts = ovsl_dereference(flow->sf_acts);
  731. rcu_assign_pointer(flow->sf_acts, acts);
  732. ovs_nla_free_flow_actions(old_acts);
  733. reply = ovs_flow_cmd_build_info(flow, dp, info, OVS_FLOW_CMD_NEW);
  734. /* Clear stats. */
  735. if (a[OVS_FLOW_ATTR_CLEAR])
  736. ovs_flow_stats_clear(flow);
  737. }
  738. ovs_unlock();
  739. if (!IS_ERR(reply))
  740. ovs_notify(&dp_flow_genl_family, reply, info);
  741. else
  742. genl_set_err(&dp_flow_genl_family, sock_net(skb->sk), 0,
  743. 0, PTR_ERR(reply));
  744. return 0;
  745. err_flow_free:
  746. ovs_flow_free(flow, false);
  747. err_unlock_ovs:
  748. ovs_unlock();
  749. err_kfree:
  750. kfree(acts);
  751. error:
  752. return error;
  753. }
  754. static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
  755. {
  756. struct nlattr **a = info->attrs;
  757. struct ovs_header *ovs_header = info->userhdr;
  758. struct sw_flow_key key;
  759. struct sk_buff *reply;
  760. struct sw_flow *flow;
  761. struct datapath *dp;
  762. struct sw_flow_match match;
  763. int err;
  764. if (!a[OVS_FLOW_ATTR_KEY]) {
  765. OVS_NLERR("Flow get message rejected, Key attribute missing.\n");
  766. return -EINVAL;
  767. }
  768. ovs_match_init(&match, &key, NULL);
  769. err = ovs_nla_get_match(&match, NULL, a[OVS_FLOW_ATTR_KEY], NULL);
  770. if (err)
  771. return err;
  772. ovs_lock();
  773. dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
  774. if (!dp) {
  775. err = -ENODEV;
  776. goto unlock;
  777. }
  778. flow = ovs_flow_tbl_lookup(&dp->table, &key);
  779. if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
  780. err = -ENOENT;
  781. goto unlock;
  782. }
  783. reply = ovs_flow_cmd_build_info(flow, dp, info, OVS_FLOW_CMD_NEW);
  784. if (IS_ERR(reply)) {
  785. err = PTR_ERR(reply);
  786. goto unlock;
  787. }
  788. ovs_unlock();
  789. return genlmsg_reply(reply, info);
  790. unlock:
  791. ovs_unlock();
  792. return err;
  793. }
  794. static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
  795. {
  796. struct nlattr **a = info->attrs;
  797. struct ovs_header *ovs_header = info->userhdr;
  798. struct sw_flow_key key;
  799. struct sk_buff *reply;
  800. struct sw_flow *flow;
  801. struct datapath *dp;
  802. struct sw_flow_match match;
  803. int err;
  804. ovs_lock();
  805. dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
  806. if (!dp) {
  807. err = -ENODEV;
  808. goto unlock;
  809. }
  810. if (!a[OVS_FLOW_ATTR_KEY]) {
  811. err = ovs_flow_tbl_flush(&dp->table);
  812. goto unlock;
  813. }
  814. ovs_match_init(&match, &key, NULL);
  815. err = ovs_nla_get_match(&match, NULL, a[OVS_FLOW_ATTR_KEY], NULL);
  816. if (err)
  817. goto unlock;
  818. flow = ovs_flow_tbl_lookup(&dp->table, &key);
  819. if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
  820. err = -ENOENT;
  821. goto unlock;
  822. }
  823. reply = ovs_flow_cmd_alloc_info(flow, info);
  824. if (!reply) {
  825. err = -ENOMEM;
  826. goto unlock;
  827. }
  828. ovs_flow_tbl_remove(&dp->table, flow);
  829. err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid,
  830. info->snd_seq, 0, OVS_FLOW_CMD_DEL);
  831. BUG_ON(err < 0);
  832. ovs_flow_free(flow, true);
  833. ovs_unlock();
  834. ovs_notify(&dp_flow_genl_family, reply, info);
  835. return 0;
  836. unlock:
  837. ovs_unlock();
  838. return err;
  839. }
  840. static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
  841. {
  842. struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
  843. struct table_instance *ti;
  844. struct datapath *dp;
  845. rcu_read_lock();
  846. dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
  847. if (!dp) {
  848. rcu_read_unlock();
  849. return -ENODEV;
  850. }
  851. ti = rcu_dereference(dp->table.ti);
  852. for (;;) {
  853. struct sw_flow *flow;
  854. u32 bucket, obj;
  855. bucket = cb->args[0];
  856. obj = cb->args[1];
  857. flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
  858. if (!flow)
  859. break;
  860. if (ovs_flow_cmd_fill_info(flow, dp, skb,
  861. NETLINK_CB(cb->skb).portid,
  862. cb->nlh->nlmsg_seq, NLM_F_MULTI,
  863. OVS_FLOW_CMD_NEW) < 0)
  864. break;
  865. cb->args[0] = bucket;
  866. cb->args[1] = obj;
  867. }
  868. rcu_read_unlock();
  869. return skb->len;
  870. }
  871. static const struct genl_ops dp_flow_genl_ops[] = {
  872. { .cmd = OVS_FLOW_CMD_NEW,
  873. .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
  874. .policy = flow_policy,
  875. .doit = ovs_flow_cmd_new_or_set
  876. },
  877. { .cmd = OVS_FLOW_CMD_DEL,
  878. .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
  879. .policy = flow_policy,
  880. .doit = ovs_flow_cmd_del
  881. },
  882. { .cmd = OVS_FLOW_CMD_GET,
  883. .flags = 0, /* OK for unprivileged users. */
  884. .policy = flow_policy,
  885. .doit = ovs_flow_cmd_get,
  886. .dumpit = ovs_flow_cmd_dump
  887. },
  888. { .cmd = OVS_FLOW_CMD_SET,
  889. .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
  890. .policy = flow_policy,
  891. .doit = ovs_flow_cmd_new_or_set,
  892. },
  893. };
  894. static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
  895. [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
  896. [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
  897. [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
  898. };
  899. static struct genl_family dp_datapath_genl_family = {
  900. .id = GENL_ID_GENERATE,
  901. .hdrsize = sizeof(struct ovs_header),
  902. .name = OVS_DATAPATH_FAMILY,
  903. .version = OVS_DATAPATH_VERSION,
  904. .maxattr = OVS_DP_ATTR_MAX,
  905. .netnsok = true,
  906. .parallel_ops = true,
  907. };
  908. static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
  909. .name = OVS_DATAPATH_MCGROUP
  910. };
  911. static size_t ovs_dp_cmd_msg_size(void)
  912. {
  913. size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
  914. msgsize += nla_total_size(IFNAMSIZ);
  915. msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
  916. msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
  917. msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
  918. return msgsize;
  919. }
  920. static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
  921. u32 portid, u32 seq, u32 flags, u8 cmd)
  922. {
  923. struct ovs_header *ovs_header;
  924. struct ovs_dp_stats dp_stats;
  925. struct ovs_dp_megaflow_stats dp_megaflow_stats;
  926. int err;
  927. ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
  928. flags, cmd);
  929. if (!ovs_header)
  930. goto error;
  931. ovs_header->dp_ifindex = get_dpifindex(dp);
  932. rcu_read_lock();
  933. err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
  934. rcu_read_unlock();
  935. if (err)
  936. goto nla_put_failure;
  937. get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
  938. if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
  939. &dp_stats))
  940. goto nla_put_failure;
  941. if (nla_put(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
  942. sizeof(struct ovs_dp_megaflow_stats),
  943. &dp_megaflow_stats))
  944. goto nla_put_failure;
  945. if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
  946. goto nla_put_failure;
  947. return genlmsg_end(skb, ovs_header);
  948. nla_put_failure:
  949. genlmsg_cancel(skb, ovs_header);
  950. error:
  951. return -EMSGSIZE;
  952. }
  953. static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp,
  954. struct genl_info *info, u8 cmd)
  955. {
  956. struct sk_buff *skb;
  957. int retval;
  958. skb = genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info, GFP_KERNEL);
  959. if (!skb)
  960. return ERR_PTR(-ENOMEM);
  961. retval = ovs_dp_cmd_fill_info(dp, skb, info->snd_portid, info->snd_seq, 0, cmd);
  962. if (retval < 0) {
  963. kfree_skb(skb);
  964. return ERR_PTR(retval);
  965. }
  966. return skb;
  967. }
  968. /* Called with ovs_mutex. */
  969. static struct datapath *lookup_datapath(struct net *net,
  970. struct ovs_header *ovs_header,
  971. struct nlattr *a[OVS_DP_ATTR_MAX + 1])
  972. {
  973. struct datapath *dp;
  974. if (!a[OVS_DP_ATTR_NAME])
  975. dp = get_dp(net, ovs_header->dp_ifindex);
  976. else {
  977. struct vport *vport;
  978. rcu_read_lock();
  979. vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
  980. dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
  981. rcu_read_unlock();
  982. }
  983. return dp ? dp : ERR_PTR(-ENODEV);
  984. }
  985. static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
  986. {
  987. struct datapath *dp;
  988. dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
  989. if (IS_ERR(dp))
  990. return;
  991. WARN(dp->user_features, "Dropping previously announced user features\n");
  992. dp->user_features = 0;
  993. }
  994. static void ovs_dp_change(struct datapath *dp, struct nlattr **a)
  995. {
  996. if (a[OVS_DP_ATTR_USER_FEATURES])
  997. dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
  998. }
  999. static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
  1000. {
  1001. struct nlattr **a = info->attrs;
  1002. struct vport_parms parms;
  1003. struct sk_buff *reply;
  1004. struct datapath *dp;
  1005. struct vport *vport;
  1006. struct ovs_net *ovs_net;
  1007. int err, i;
  1008. err = -EINVAL;
  1009. if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
  1010. goto err;
  1011. ovs_lock();
  1012. err = -ENOMEM;
  1013. dp = kzalloc(sizeof(*dp), GFP_KERNEL);
  1014. if (dp == NULL)
  1015. goto err_unlock_ovs;
  1016. ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
  1017. /* Allocate table. */
  1018. err = ovs_flow_tbl_init(&dp->table);
  1019. if (err)
  1020. goto err_free_dp;
  1021. dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
  1022. if (!dp->stats_percpu) {
  1023. err = -ENOMEM;
  1024. goto err_destroy_table;
  1025. }
  1026. dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
  1027. GFP_KERNEL);
  1028. if (!dp->ports) {
  1029. err = -ENOMEM;
  1030. goto err_destroy_percpu;
  1031. }
  1032. for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
  1033. INIT_HLIST_HEAD(&dp->ports[i]);
  1034. /* Set up our datapath device. */
  1035. parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
  1036. parms.type = OVS_VPORT_TYPE_INTERNAL;
  1037. parms.options = NULL;
  1038. parms.dp = dp;
  1039. parms.port_no = OVSP_LOCAL;
  1040. parms.upcall_portid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
  1041. ovs_dp_change(dp, a);
  1042. vport = new_vport(&parms);
  1043. if (IS_ERR(vport)) {
  1044. err = PTR_ERR(vport);
  1045. if (err == -EBUSY)
  1046. err = -EEXIST;
  1047. if (err == -EEXIST) {
  1048. /* An outdated user space instance that does not understand
  1049. * the concept of user_features has attempted to create a new
  1050. * datapath and is likely to reuse it. Drop all user features.
  1051. */
  1052. if (info->genlhdr->version < OVS_DP_VER_FEATURES)
  1053. ovs_dp_reset_user_features(skb, info);
  1054. }
  1055. goto err_destroy_ports_array;
  1056. }
  1057. reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_NEW);
  1058. err = PTR_ERR(reply);
  1059. if (IS_ERR(reply))
  1060. goto err_destroy_local_port;
  1061. ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
  1062. list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
  1063. ovs_unlock();
  1064. ovs_notify(&dp_datapath_genl_family, reply, info);
  1065. return 0;
  1066. err_destroy_local_port:
  1067. ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
  1068. err_destroy_ports_array:
  1069. kfree(dp->ports);
  1070. err_destroy_percpu:
  1071. free_percpu(dp->stats_percpu);
  1072. err_destroy_table:
  1073. ovs_flow_tbl_destroy(&dp->table, false);
  1074. err_free_dp:
  1075. release_net(ovs_dp_get_net(dp));
  1076. kfree(dp);
  1077. err_unlock_ovs:
  1078. ovs_unlock();
  1079. err:
  1080. return err;
  1081. }
  1082. /* Called with ovs_mutex. */
  1083. static void __dp_destroy(struct datapath *dp)
  1084. {
  1085. int i;
  1086. for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
  1087. struct vport *vport;
  1088. struct hlist_node *n;
  1089. hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
  1090. if (vport->port_no != OVSP_LOCAL)
  1091. ovs_dp_detach_port(vport);
  1092. }
  1093. list_del_rcu(&dp->list_node);
  1094. /* OVSP_LOCAL is datapath internal port. We need to make sure that
  1095. * all ports in datapath are destroyed first before freeing datapath.
  1096. */
  1097. ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
  1098. /* RCU destroy the flow table */
  1099. ovs_flow_tbl_destroy(&dp->table, true);
  1100. call_rcu(&dp->rcu, destroy_dp_rcu);
  1101. }
  1102. static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
  1103. {
  1104. struct sk_buff *reply;
  1105. struct datapath *dp;
  1106. int err;
  1107. ovs_lock();
  1108. dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
  1109. err = PTR_ERR(dp);
  1110. if (IS_ERR(dp))
  1111. goto unlock;
  1112. reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_DEL);
  1113. err = PTR_ERR(reply);
  1114. if (IS_ERR(reply))
  1115. goto unlock;
  1116. __dp_destroy(dp);
  1117. ovs_unlock();
  1118. ovs_notify(&dp_datapath_genl_family, reply, info);
  1119. return 0;
  1120. unlock:
  1121. ovs_unlock();
  1122. return err;
  1123. }
  1124. static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
  1125. {
  1126. struct sk_buff *reply;
  1127. struct datapath *dp;
  1128. int err;
  1129. ovs_lock();
  1130. dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
  1131. err = PTR_ERR(dp);
  1132. if (IS_ERR(dp))
  1133. goto unlock;
  1134. ovs_dp_change(dp, info->attrs);
  1135. reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_NEW);
  1136. if (IS_ERR(reply)) {
  1137. err = PTR_ERR(reply);
  1138. genl_set_err(&dp_datapath_genl_family, sock_net(skb->sk), 0,
  1139. 0, err);
  1140. err = 0;
  1141. goto unlock;
  1142. }
  1143. ovs_unlock();
  1144. ovs_notify(&dp_datapath_genl_family, reply, info);
  1145. return 0;
  1146. unlock:
  1147. ovs_unlock();
  1148. return err;
  1149. }
  1150. static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
  1151. {
  1152. struct sk_buff *reply;
  1153. struct datapath *dp;
  1154. int err;
  1155. ovs_lock();
  1156. dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
  1157. if (IS_ERR(dp)) {
  1158. err = PTR_ERR(dp);
  1159. goto unlock;
  1160. }
  1161. reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_NEW);
  1162. if (IS_ERR(reply)) {
  1163. err = PTR_ERR(reply);
  1164. goto unlock;
  1165. }
  1166. ovs_unlock();
  1167. return genlmsg_reply(reply, info);
  1168. unlock:
  1169. ovs_unlock();
  1170. return err;
  1171. }
  1172. static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
  1173. {
  1174. struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
  1175. struct datapath *dp;
  1176. int skip = cb->args[0];
  1177. int i = 0;
  1178. rcu_read_lock();
  1179. list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) {
  1180. if (i >= skip &&
  1181. ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
  1182. cb->nlh->nlmsg_seq, NLM_F_MULTI,
  1183. OVS_DP_CMD_NEW) < 0)
  1184. break;
  1185. i++;
  1186. }
  1187. rcu_read_unlock();
  1188. cb->args[0] = i;
  1189. return skb->len;
  1190. }
  1191. static const struct genl_ops dp_datapath_genl_ops[] = {
  1192. { .cmd = OVS_DP_CMD_NEW,
  1193. .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
  1194. .policy = datapath_policy,
  1195. .doit = ovs_dp_cmd_new
  1196. },
  1197. { .cmd = OVS_DP_CMD_DEL,
  1198. .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
  1199. .policy = datapath_policy,
  1200. .doit = ovs_dp_cmd_del
  1201. },
  1202. { .cmd = OVS_DP_CMD_GET,
  1203. .flags = 0, /* OK for unprivileged users. */
  1204. .policy = datapath_policy,
  1205. .doit = ovs_dp_cmd_get,
  1206. .dumpit = ovs_dp_cmd_dump
  1207. },
  1208. { .cmd = OVS_DP_CMD_SET,
  1209. .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
  1210. .policy = datapath_policy,
  1211. .doit = ovs_dp_cmd_set,
  1212. },
  1213. };
  1214. static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
  1215. [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
  1216. [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
  1217. [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
  1218. [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
  1219. [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
  1220. [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
  1221. };
  1222. struct genl_family dp_vport_genl_family = {
  1223. .id = GENL_ID_GENERATE,
  1224. .hdrsize = sizeof(struct ovs_header),
  1225. .name = OVS_VPORT_FAMILY,
  1226. .version = OVS_VPORT_VERSION,
  1227. .maxattr = OVS_VPORT_ATTR_MAX,
  1228. .netnsok = true,
  1229. .parallel_ops = true,
  1230. };
  1231. static struct genl_multicast_group ovs_dp_vport_multicast_group = {
  1232. .name = OVS_VPORT_MCGROUP
  1233. };
  1234. /* Called with ovs_mutex or RCU read lock. */
  1235. static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
  1236. u32 portid, u32 seq, u32 flags, u8 cmd)
  1237. {
  1238. struct ovs_header *ovs_header;
  1239. struct ovs_vport_stats vport_stats;
  1240. int err;
  1241. ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
  1242. flags, cmd);
  1243. if (!ovs_header)
  1244. return -EMSGSIZE;
  1245. ovs_header->dp_ifindex = get_dpifindex(vport->dp);
  1246. if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
  1247. nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
  1248. nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
  1249. nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_portid))
  1250. goto nla_put_failure;
  1251. ovs_vport_get_stats(vport, &vport_stats);
  1252. if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
  1253. &vport_stats))
  1254. goto nla_put_failure;
  1255. err = ovs_vport_get_options(vport, skb);
  1256. if (err == -EMSGSIZE)
  1257. goto error;
  1258. return genlmsg_end(skb, ovs_header);
  1259. nla_put_failure:
  1260. err = -EMSGSIZE;
  1261. error:
  1262. genlmsg_cancel(skb, ovs_header);
  1263. return err;
  1264. }
  1265. /* Called with ovs_mutex or RCU read lock. */
  1266. struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
  1267. u32 seq, u8 cmd)
  1268. {
  1269. struct sk_buff *skb;
  1270. int retval;
  1271. skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
  1272. if (!skb)
  1273. return ERR_PTR(-ENOMEM);
  1274. retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
  1275. BUG_ON(retval < 0);
  1276. return skb;
  1277. }
  1278. /* Called with ovs_mutex or RCU read lock. */
  1279. static struct vport *lookup_vport(struct net *net,
  1280. struct ovs_header *ovs_header,
  1281. struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
  1282. {
  1283. struct datapath *dp;
  1284. struct vport *vport;
  1285. if (a[OVS_VPORT_ATTR_NAME]) {
  1286. vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
  1287. if (!vport)
  1288. return ERR_PTR(-ENODEV);
  1289. if (ovs_header->dp_ifindex &&
  1290. ovs_header->dp_ifindex != get_dpifindex(vport->dp))
  1291. return ERR_PTR(-ENODEV);
  1292. return vport;
  1293. } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
  1294. u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
  1295. if (port_no >= DP_MAX_PORTS)
  1296. return ERR_PTR(-EFBIG);
  1297. dp = get_dp(net, ovs_header->dp_ifindex);
  1298. if (!dp)
  1299. return ERR_PTR(-ENODEV);
  1300. vport = ovs_vport_ovsl_rcu(dp, port_no);
  1301. if (!vport)
  1302. return ERR_PTR(-ENODEV);
  1303. return vport;
  1304. } else
  1305. return ERR_PTR(-EINVAL);
  1306. }
  1307. static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
  1308. {
  1309. struct nlattr **a = info->attrs;
  1310. struct ovs_header *ovs_header = info->userhdr;
  1311. struct vport_parms parms;
  1312. struct sk_buff *reply;
  1313. struct vport *vport;
  1314. struct datapath *dp;
  1315. u32 port_no;
  1316. int err;
  1317. err = -EINVAL;
  1318. if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
  1319. !a[OVS_VPORT_ATTR_UPCALL_PID])
  1320. goto exit;
  1321. ovs_lock();
  1322. dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
  1323. err = -ENODEV;
  1324. if (!dp)
  1325. goto exit_unlock;
  1326. if (a[OVS_VPORT_ATTR_PORT_NO]) {
  1327. port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
  1328. err = -EFBIG;
  1329. if (port_no >= DP_MAX_PORTS)
  1330. goto exit_unlock;
  1331. vport = ovs_vport_ovsl(dp, port_no);
  1332. err = -EBUSY;
  1333. if (vport)
  1334. goto exit_unlock;
  1335. } else {
  1336. for (port_no = 1; ; port_no++) {
  1337. if (port_no >= DP_MAX_PORTS) {
  1338. err = -EFBIG;
  1339. goto exit_unlock;
  1340. }
  1341. vport = ovs_vport_ovsl(dp, port_no);
  1342. if (!vport)
  1343. break;
  1344. }
  1345. }
  1346. parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
  1347. parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
  1348. parms.options = a[OVS_VPORT_ATTR_OPTIONS];
  1349. parms.dp = dp;
  1350. parms.port_no = port_no;
  1351. parms.upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
  1352. vport = new_vport(&parms);
  1353. err = PTR_ERR(vport);
  1354. if (IS_ERR(vport))
  1355. goto exit_unlock;
  1356. err = 0;
  1357. reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
  1358. OVS_VPORT_CMD_NEW);
  1359. if (IS_ERR(reply)) {
  1360. err = PTR_ERR(reply);
  1361. ovs_dp_detach_port(vport);
  1362. goto exit_unlock;
  1363. }
  1364. ovs_notify(&dp_vport_genl_family, reply, info);
  1365. exit_unlock:
  1366. ovs_unlock();
  1367. exit:
  1368. return err;
  1369. }
  1370. static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
  1371. {
  1372. struct nlattr **a = info->attrs;
  1373. struct sk_buff *reply;
  1374. struct vport *vport;
  1375. int err;
  1376. ovs_lock();
  1377. vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
  1378. err = PTR_ERR(vport);
  1379. if (IS_ERR(vport))
  1380. goto exit_unlock;
  1381. if (a[OVS_VPORT_ATTR_TYPE] &&
  1382. nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
  1383. err = -EINVAL;
  1384. goto exit_unlock;
  1385. }
  1386. reply = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1387. if (!reply) {
  1388. err = -ENOMEM;
  1389. goto exit_unlock;
  1390. }
  1391. if (a[OVS_VPORT_ATTR_OPTIONS]) {
  1392. err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
  1393. if (err)
  1394. goto exit_free;
  1395. }
  1396. if (a[OVS_VPORT_ATTR_UPCALL_PID])
  1397. vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
  1398. err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
  1399. info->snd_seq, 0, OVS_VPORT_CMD_NEW);
  1400. BUG_ON(err < 0);
  1401. ovs_unlock();
  1402. ovs_notify(&dp_vport_genl_family, reply, info);
  1403. return 0;
  1404. exit_free:
  1405. kfree_skb(reply);
  1406. exit_unlock:
  1407. ovs_unlock();
  1408. return err;
  1409. }
  1410. static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
  1411. {
  1412. struct nlattr **a = info->attrs;
  1413. struct sk_buff *reply;
  1414. struct vport *vport;
  1415. int err;
  1416. ovs_lock();
  1417. vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
  1418. err = PTR_ERR(vport);
  1419. if (IS_ERR(vport))
  1420. goto exit_unlock;
  1421. if (vport->port_no == OVSP_LOCAL) {
  1422. err = -EINVAL;
  1423. goto exit_unlock;
  1424. }
  1425. reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
  1426. info->snd_seq, OVS_VPORT_CMD_DEL);
  1427. err = PTR_ERR(reply);
  1428. if (IS_ERR(reply))
  1429. goto exit_unlock;
  1430. err = 0;
  1431. ovs_dp_detach_port(vport);
  1432. ovs_notify(&dp_vport_genl_family, reply, info);
  1433. exit_unlock:
  1434. ovs_unlock();
  1435. return err;
  1436. }
  1437. static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
  1438. {
  1439. struct nlattr **a = info->attrs;
  1440. struct ovs_header *ovs_header = info->userhdr;
  1441. struct sk_buff *reply;
  1442. struct vport *vport;
  1443. int err;
  1444. rcu_read_lock();
  1445. vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
  1446. err = PTR_ERR(vport);
  1447. if (IS_ERR(vport))
  1448. goto exit_unlock;
  1449. reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
  1450. info->snd_seq, OVS_VPORT_CMD_NEW);
  1451. err = PTR_ERR(reply);
  1452. if (IS_ERR(reply))
  1453. goto exit_unlock;
  1454. rcu_read_unlock();
  1455. return genlmsg_reply(reply, info);
  1456. exit_unlock:
  1457. rcu_read_unlock();
  1458. return err;
  1459. }
  1460. static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
  1461. {
  1462. struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
  1463. struct datapath *dp;
  1464. int bucket = cb->args[0], skip = cb->args[1];
  1465. int i, j = 0;
  1466. rcu_read_lock();
  1467. dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
  1468. if (!dp) {
  1469. rcu_read_unlock();
  1470. return -ENODEV;
  1471. }
  1472. for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
  1473. struct vport *vport;
  1474. j = 0;
  1475. hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
  1476. if (j >= skip &&
  1477. ovs_vport_cmd_fill_info(vport, skb,
  1478. NETLINK_CB(cb->skb).portid,
  1479. cb->nlh->nlmsg_seq,
  1480. NLM_F_MULTI,
  1481. OVS_VPORT_CMD_NEW) < 0)
  1482. goto out;
  1483. j++;
  1484. }
  1485. skip = 0;
  1486. }
  1487. out:
  1488. rcu_read_unlock();
  1489. cb->args[0] = i;
  1490. cb->args[1] = j;
  1491. return skb->len;
  1492. }
  1493. static const struct genl_ops dp_vport_genl_ops[] = {
  1494. { .cmd = OVS_VPORT_CMD_NEW,
  1495. .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
  1496. .policy = vport_policy,
  1497. .doit = ovs_vport_cmd_new
  1498. },
  1499. { .cmd = OVS_VPORT_CMD_DEL,
  1500. .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
  1501. .policy = vport_policy,
  1502. .doit = ovs_vport_cmd_del
  1503. },
  1504. { .cmd = OVS_VPORT_CMD_GET,
  1505. .flags = 0, /* OK for unprivileged users. */
  1506. .policy = vport_policy,
  1507. .doit = ovs_vport_cmd_get,
  1508. .dumpit = ovs_vport_cmd_dump
  1509. },
  1510. { .cmd = OVS_VPORT_CMD_SET,
  1511. .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
  1512. .policy = vport_policy,
  1513. .doit = ovs_vport_cmd_set,
  1514. },
  1515. };
  1516. struct genl_family_and_ops {
  1517. struct genl_family *family;
  1518. const struct genl_ops *ops;
  1519. int n_ops;
  1520. const struct genl_multicast_group *group;
  1521. };
  1522. static const struct genl_family_and_ops dp_genl_families[] = {
  1523. { &dp_datapath_genl_family,
  1524. dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
  1525. &ovs_dp_datapath_multicast_group },
  1526. { &dp_vport_genl_family,
  1527. dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
  1528. &ovs_dp_vport_multicast_group },
  1529. { &dp_flow_genl_family,
  1530. dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
  1531. &ovs_dp_flow_multicast_group },
  1532. { &dp_packet_genl_family,
  1533. dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
  1534. NULL },
  1535. };
  1536. static void dp_unregister_genl(int n_families)
  1537. {
  1538. int i;
  1539. for (i = 0; i < n_families; i++)
  1540. genl_unregister_family(dp_genl_families[i].family);
  1541. }
  1542. static int dp_register_genl(void)
  1543. {
  1544. int n_registered;
  1545. int err;
  1546. int i;
  1547. n_registered = 0;
  1548. for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
  1549. const struct genl_family_and_ops *f = &dp_genl_families[i];
  1550. f->family->ops = f->ops;
  1551. f->family->n_ops = f->n_ops;
  1552. f->family->mcgrps = f->group;
  1553. f->family->n_mcgrps = f->group ? 1 : 0;
  1554. err = genl_register_family(f->family);
  1555. if (err)
  1556. goto error;
  1557. n_registered++;
  1558. }
  1559. return 0;
  1560. error:
  1561. dp_unregister_genl(n_registered);
  1562. return err;
  1563. }
  1564. static int __net_init ovs_init_net(struct net *net)
  1565. {
  1566. struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
  1567. INIT_LIST_HEAD(&ovs_net->dps);
  1568. INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
  1569. return 0;
  1570. }
  1571. static void __net_exit ovs_exit_net(struct net *net)
  1572. {
  1573. struct datapath *dp, *dp_next;
  1574. struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
  1575. ovs_lock();
  1576. list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
  1577. __dp_destroy(dp);
  1578. ovs_unlock();
  1579. cancel_work_sync(&ovs_net->dp_notify_work);
  1580. }
  1581. static struct pernet_operations ovs_net_ops = {
  1582. .init = ovs_init_net,
  1583. .exit = ovs_exit_net,
  1584. .id = &ovs_net_id,
  1585. .size = sizeof(struct ovs_net),
  1586. };
  1587. static int __init dp_init(void)
  1588. {
  1589. int err;
  1590. BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
  1591. pr_info("Open vSwitch switching datapath\n");
  1592. err = ovs_flow_init();
  1593. if (err)
  1594. goto error;
  1595. err = ovs_vport_init();
  1596. if (err)
  1597. goto error_flow_exit;
  1598. err = register_pernet_device(&ovs_net_ops);
  1599. if (err)
  1600. goto error_vport_exit;
  1601. err = register_netdevice_notifier(&ovs_dp_device_notifier);
  1602. if (err)
  1603. goto error_netns_exit;
  1604. err = dp_register_genl();
  1605. if (err < 0)
  1606. goto error_unreg_notifier;
  1607. return 0;
  1608. error_unreg_notifier:
  1609. unregister_netdevice_notifier(&ovs_dp_device_notifier);
  1610. error_netns_exit:
  1611. unregister_pernet_device(&ovs_net_ops);
  1612. error_vport_exit:
  1613. ovs_vport_exit();
  1614. error_flow_exit:
  1615. ovs_flow_exit();
  1616. error:
  1617. return err;
  1618. }
  1619. static void dp_cleanup(void)
  1620. {
  1621. dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
  1622. unregister_netdevice_notifier(&ovs_dp_device_notifier);
  1623. unregister_pernet_device(&ovs_net_ops);
  1624. rcu_barrier();
  1625. ovs_vport_exit();
  1626. ovs_flow_exit();
  1627. }
  1628. module_init(dp_init);
  1629. module_exit(dp_cleanup);
  1630. MODULE_DESCRIPTION("Open vSwitch switching datapath");
  1631. MODULE_LICENSE("GPL");