act_api.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483
  1. /*
  2. * net/sched/act_api.c Packet action API.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Author: Jamal Hadi Salim
  10. *
  11. *
  12. */
  13. #include <linux/types.h>
  14. #include <linux/kernel.h>
  15. #include <linux/string.h>
  16. #include <linux/errno.h>
  17. #include <linux/slab.h>
  18. #include <linux/skbuff.h>
  19. #include <linux/init.h>
  20. #include <linux/kmod.h>
  21. #include <linux/err.h>
  22. #include <linux/module.h>
  23. #include <linux/rhashtable.h>
  24. #include <linux/list.h>
  25. #include <net/net_namespace.h>
  26. #include <net/sock.h>
  27. #include <net/sch_generic.h>
  28. #include <net/pkt_cls.h>
  29. #include <net/act_api.h>
  30. #include <net/netlink.h>
  31. static int tcf_action_goto_chain_init(struct tc_action *a, struct tcf_proto *tp)
  32. {
  33. u32 chain_index = a->tcfa_action & TC_ACT_EXT_VAL_MASK;
  34. if (!tp)
  35. return -EINVAL;
  36. a->goto_chain = tcf_chain_get(tp->chain->block, chain_index, true);
  37. if (!a->goto_chain)
  38. return -ENOMEM;
  39. return 0;
  40. }
  41. static void tcf_action_goto_chain_fini(struct tc_action *a)
  42. {
  43. tcf_chain_put(a->goto_chain);
  44. }
  45. static void tcf_action_goto_chain_exec(const struct tc_action *a,
  46. struct tcf_result *res)
  47. {
  48. const struct tcf_chain *chain = a->goto_chain;
  49. res->goto_tp = rcu_dereference_bh(chain->filter_chain);
  50. }
  51. /* XXX: For standalone actions, we don't need a RCU grace period either, because
  52. * actions are always connected to filters and filters are already destroyed in
  53. * RCU callbacks, so after a RCU grace period actions are already disconnected
  54. * from filters. Readers later can not find us.
  55. */
  56. static void free_tcf(struct tc_action *p)
  57. {
  58. free_percpu(p->cpu_bstats);
  59. free_percpu(p->cpu_qstats);
  60. if (p->act_cookie) {
  61. kfree(p->act_cookie->data);
  62. kfree(p->act_cookie);
  63. }
  64. if (p->goto_chain)
  65. tcf_action_goto_chain_fini(p);
  66. kfree(p);
  67. }
  68. static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p)
  69. {
  70. spin_lock_bh(&idrinfo->lock);
  71. idr_remove(&idrinfo->action_idr, p->tcfa_index);
  72. spin_unlock_bh(&idrinfo->lock);
  73. gen_kill_estimator(&p->tcfa_rate_est);
  74. free_tcf(p);
  75. }
  76. int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
  77. {
  78. int ret = 0;
  79. ASSERT_RTNL();
  80. if (p) {
  81. if (bind)
  82. p->tcfa_bindcnt--;
  83. else if (strict && p->tcfa_bindcnt > 0)
  84. return -EPERM;
  85. p->tcfa_refcnt--;
  86. if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) {
  87. if (p->ops->cleanup)
  88. p->ops->cleanup(p);
  89. tcf_idr_remove(p->idrinfo, p);
  90. ret = ACT_P_DELETED;
  91. }
  92. }
  93. return ret;
  94. }
  95. EXPORT_SYMBOL(__tcf_idr_release);
  96. static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
  97. struct netlink_callback *cb)
  98. {
  99. int err = 0, index = -1, s_i = 0, n_i = 0;
  100. u32 act_flags = cb->args[2];
  101. unsigned long jiffy_since = cb->args[3];
  102. struct nlattr *nest;
  103. struct idr *idr = &idrinfo->action_idr;
  104. struct tc_action *p;
  105. unsigned long id = 1;
  106. spin_lock_bh(&idrinfo->lock);
  107. s_i = cb->args[0];
  108. idr_for_each_entry_ul(idr, p, id) {
  109. index++;
  110. if (index < s_i)
  111. continue;
  112. if (jiffy_since &&
  113. time_after(jiffy_since,
  114. (unsigned long)p->tcfa_tm.lastuse))
  115. continue;
  116. nest = nla_nest_start(skb, n_i);
  117. if (!nest)
  118. goto nla_put_failure;
  119. err = tcf_action_dump_1(skb, p, 0, 0);
  120. if (err < 0) {
  121. index--;
  122. nlmsg_trim(skb, nest);
  123. goto done;
  124. }
  125. nla_nest_end(skb, nest);
  126. n_i++;
  127. if (!(act_flags & TCA_FLAG_LARGE_DUMP_ON) &&
  128. n_i >= TCA_ACT_MAX_PRIO)
  129. goto done;
  130. }
  131. done:
  132. if (index >= 0)
  133. cb->args[0] = index + 1;
  134. spin_unlock_bh(&idrinfo->lock);
  135. if (n_i) {
  136. if (act_flags & TCA_FLAG_LARGE_DUMP_ON)
  137. cb->args[1] = n_i;
  138. }
  139. return n_i;
  140. nla_put_failure:
  141. nla_nest_cancel(skb, nest);
  142. goto done;
  143. }
  144. static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
  145. const struct tc_action_ops *ops)
  146. {
  147. struct nlattr *nest;
  148. int n_i = 0;
  149. int ret = -EINVAL;
  150. struct idr *idr = &idrinfo->action_idr;
  151. struct tc_action *p;
  152. unsigned long id = 1;
  153. nest = nla_nest_start(skb, 0);
  154. if (nest == NULL)
  155. goto nla_put_failure;
  156. if (nla_put_string(skb, TCA_KIND, ops->kind))
  157. goto nla_put_failure;
  158. idr_for_each_entry_ul(idr, p, id) {
  159. ret = __tcf_idr_release(p, false, true);
  160. if (ret == ACT_P_DELETED) {
  161. module_put(ops->owner);
  162. n_i++;
  163. } else if (ret < 0) {
  164. goto nla_put_failure;
  165. }
  166. }
  167. if (nla_put_u32(skb, TCA_FCNT, n_i))
  168. goto nla_put_failure;
  169. nla_nest_end(skb, nest);
  170. return n_i;
  171. nla_put_failure:
  172. nla_nest_cancel(skb, nest);
  173. return ret;
  174. }
  175. int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
  176. struct netlink_callback *cb, int type,
  177. const struct tc_action_ops *ops)
  178. {
  179. struct tcf_idrinfo *idrinfo = tn->idrinfo;
  180. if (type == RTM_DELACTION) {
  181. return tcf_del_walker(idrinfo, skb, ops);
  182. } else if (type == RTM_GETACTION) {
  183. return tcf_dump_walker(idrinfo, skb, cb);
  184. } else {
  185. WARN(1, "tcf_generic_walker: unknown action %d\n", type);
  186. return -EINVAL;
  187. }
  188. }
  189. EXPORT_SYMBOL(tcf_generic_walker);
  190. static struct tc_action *tcf_idr_lookup(u32 index, struct tcf_idrinfo *idrinfo)
  191. {
  192. struct tc_action *p = NULL;
  193. spin_lock_bh(&idrinfo->lock);
  194. p = idr_find(&idrinfo->action_idr, index);
  195. spin_unlock_bh(&idrinfo->lock);
  196. return p;
  197. }
  198. int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
  199. {
  200. struct tcf_idrinfo *idrinfo = tn->idrinfo;
  201. struct tc_action *p = tcf_idr_lookup(index, idrinfo);
  202. if (p) {
  203. *a = p;
  204. return 1;
  205. }
  206. return 0;
  207. }
  208. EXPORT_SYMBOL(tcf_idr_search);
  209. bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a,
  210. int bind)
  211. {
  212. struct tcf_idrinfo *idrinfo = tn->idrinfo;
  213. struct tc_action *p = tcf_idr_lookup(index, idrinfo);
  214. if (index && p) {
  215. if (bind)
  216. p->tcfa_bindcnt++;
  217. p->tcfa_refcnt++;
  218. *a = p;
  219. return true;
  220. }
  221. return false;
  222. }
  223. EXPORT_SYMBOL(tcf_idr_check);
  224. void tcf_idr_cleanup(struct tc_action *a, struct nlattr *est)
  225. {
  226. if (est)
  227. gen_kill_estimator(&a->tcfa_rate_est);
  228. free_tcf(a);
  229. }
  230. EXPORT_SYMBOL(tcf_idr_cleanup);
  231. int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
  232. struct tc_action **a, const struct tc_action_ops *ops,
  233. int bind, bool cpustats)
  234. {
  235. struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
  236. struct tcf_idrinfo *idrinfo = tn->idrinfo;
  237. struct idr *idr = &idrinfo->action_idr;
  238. int err = -ENOMEM;
  239. if (unlikely(!p))
  240. return -ENOMEM;
  241. p->tcfa_refcnt = 1;
  242. if (bind)
  243. p->tcfa_bindcnt = 1;
  244. if (cpustats) {
  245. p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
  246. if (!p->cpu_bstats)
  247. goto err1;
  248. p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
  249. if (!p->cpu_qstats)
  250. goto err2;
  251. }
  252. spin_lock_init(&p->tcfa_lock);
  253. idr_preload(GFP_KERNEL);
  254. spin_lock_bh(&idrinfo->lock);
  255. /* user doesn't specify an index */
  256. if (!index) {
  257. index = 1;
  258. err = idr_alloc_u32(idr, NULL, &index, UINT_MAX, GFP_ATOMIC);
  259. } else {
  260. err = idr_alloc_u32(idr, NULL, &index, index, GFP_ATOMIC);
  261. }
  262. spin_unlock_bh(&idrinfo->lock);
  263. idr_preload_end();
  264. if (err)
  265. goto err3;
  266. p->tcfa_index = index;
  267. p->tcfa_tm.install = jiffies;
  268. p->tcfa_tm.lastuse = jiffies;
  269. p->tcfa_tm.firstuse = 0;
  270. if (est) {
  271. err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
  272. &p->tcfa_rate_est,
  273. &p->tcfa_lock, NULL, est);
  274. if (err)
  275. goto err4;
  276. }
  277. p->idrinfo = idrinfo;
  278. p->ops = ops;
  279. INIT_LIST_HEAD(&p->list);
  280. *a = p;
  281. return 0;
  282. err4:
  283. idr_remove(idr, index);
  284. err3:
  285. free_percpu(p->cpu_qstats);
  286. err2:
  287. free_percpu(p->cpu_bstats);
  288. err1:
  289. kfree(p);
  290. return err;
  291. }
  292. EXPORT_SYMBOL(tcf_idr_create);
  293. void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a)
  294. {
  295. struct tcf_idrinfo *idrinfo = tn->idrinfo;
  296. spin_lock_bh(&idrinfo->lock);
  297. idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
  298. spin_unlock_bh(&idrinfo->lock);
  299. }
  300. EXPORT_SYMBOL(tcf_idr_insert);
  301. void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
  302. struct tcf_idrinfo *idrinfo)
  303. {
  304. struct idr *idr = &idrinfo->action_idr;
  305. struct tc_action *p;
  306. int ret;
  307. unsigned long id = 1;
  308. idr_for_each_entry_ul(idr, p, id) {
  309. ret = __tcf_idr_release(p, false, true);
  310. if (ret == ACT_P_DELETED)
  311. module_put(ops->owner);
  312. else if (ret < 0)
  313. return;
  314. }
  315. idr_destroy(&idrinfo->action_idr);
  316. }
  317. EXPORT_SYMBOL(tcf_idrinfo_destroy);
  318. static LIST_HEAD(act_base);
  319. static DEFINE_RWLOCK(act_mod_lock);
  320. int tcf_register_action(struct tc_action_ops *act,
  321. struct pernet_operations *ops)
  322. {
  323. struct tc_action_ops *a;
  324. int ret;
  325. if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup)
  326. return -EINVAL;
  327. /* We have to register pernet ops before making the action ops visible,
  328. * otherwise tcf_action_init_1() could get a partially initialized
  329. * netns.
  330. */
  331. ret = register_pernet_subsys(ops);
  332. if (ret)
  333. return ret;
  334. write_lock(&act_mod_lock);
  335. list_for_each_entry(a, &act_base, head) {
  336. if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
  337. write_unlock(&act_mod_lock);
  338. unregister_pernet_subsys(ops);
  339. return -EEXIST;
  340. }
  341. }
  342. list_add_tail(&act->head, &act_base);
  343. write_unlock(&act_mod_lock);
  344. return 0;
  345. }
  346. EXPORT_SYMBOL(tcf_register_action);
  347. int tcf_unregister_action(struct tc_action_ops *act,
  348. struct pernet_operations *ops)
  349. {
  350. struct tc_action_ops *a;
  351. int err = -ENOENT;
  352. write_lock(&act_mod_lock);
  353. list_for_each_entry(a, &act_base, head) {
  354. if (a == act) {
  355. list_del(&act->head);
  356. err = 0;
  357. break;
  358. }
  359. }
  360. write_unlock(&act_mod_lock);
  361. if (!err)
  362. unregister_pernet_subsys(ops);
  363. return err;
  364. }
  365. EXPORT_SYMBOL(tcf_unregister_action);
  366. /* lookup by name */
  367. static struct tc_action_ops *tc_lookup_action_n(char *kind)
  368. {
  369. struct tc_action_ops *a, *res = NULL;
  370. if (kind) {
  371. read_lock(&act_mod_lock);
  372. list_for_each_entry(a, &act_base, head) {
  373. if (strcmp(kind, a->kind) == 0) {
  374. if (try_module_get(a->owner))
  375. res = a;
  376. break;
  377. }
  378. }
  379. read_unlock(&act_mod_lock);
  380. }
  381. return res;
  382. }
  383. /* lookup by nlattr */
  384. static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
  385. {
  386. struct tc_action_ops *a, *res = NULL;
  387. if (kind) {
  388. read_lock(&act_mod_lock);
  389. list_for_each_entry(a, &act_base, head) {
  390. if (nla_strcmp(kind, a->kind) == 0) {
  391. if (try_module_get(a->owner))
  392. res = a;
  393. break;
  394. }
  395. }
  396. read_unlock(&act_mod_lock);
  397. }
  398. return res;
  399. }
  400. /*TCA_ACT_MAX_PRIO is 32, there count upto 32 */
  401. #define TCA_ACT_MAX_PRIO_MASK 0x1FF
  402. int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
  403. int nr_actions, struct tcf_result *res)
  404. {
  405. u32 jmp_prgcnt = 0;
  406. u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */
  407. int i;
  408. int ret = TC_ACT_OK;
  409. if (skb_skip_tc_classify(skb))
  410. return TC_ACT_OK;
  411. restart_act_graph:
  412. for (i = 0; i < nr_actions; i++) {
  413. const struct tc_action *a = actions[i];
  414. if (jmp_prgcnt > 0) {
  415. jmp_prgcnt -= 1;
  416. continue;
  417. }
  418. repeat:
  419. ret = a->ops->act(skb, a, res);
  420. if (ret == TC_ACT_REPEAT)
  421. goto repeat; /* we need a ttl - JHS */
  422. if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
  423. jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
  424. if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
  425. /* faulty opcode, stop pipeline */
  426. return TC_ACT_OK;
  427. } else {
  428. jmp_ttl -= 1;
  429. if (jmp_ttl > 0)
  430. goto restart_act_graph;
  431. else /* faulty graph, stop pipeline */
  432. return TC_ACT_OK;
  433. }
  434. } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
  435. tcf_action_goto_chain_exec(a, res);
  436. }
  437. if (ret != TC_ACT_PIPE)
  438. break;
  439. }
  440. return ret;
  441. }
  442. EXPORT_SYMBOL(tcf_action_exec);
  443. int tcf_action_destroy(struct list_head *actions, int bind)
  444. {
  445. const struct tc_action_ops *ops;
  446. struct tc_action *a, *tmp;
  447. int ret = 0;
  448. list_for_each_entry_safe(a, tmp, actions, list) {
  449. ops = a->ops;
  450. ret = __tcf_idr_release(a, bind, true);
  451. if (ret == ACT_P_DELETED)
  452. module_put(ops->owner);
  453. else if (ret < 0)
  454. return ret;
  455. }
  456. return ret;
  457. }
  458. int
  459. tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
  460. {
  461. return a->ops->dump(skb, a, bind, ref);
  462. }
  463. int
  464. tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
  465. {
  466. int err = -EINVAL;
  467. unsigned char *b = skb_tail_pointer(skb);
  468. struct nlattr *nest;
  469. if (nla_put_string(skb, TCA_KIND, a->ops->kind))
  470. goto nla_put_failure;
  471. if (tcf_action_copy_stats(skb, a, 0))
  472. goto nla_put_failure;
  473. if (a->act_cookie) {
  474. if (nla_put(skb, TCA_ACT_COOKIE, a->act_cookie->len,
  475. a->act_cookie->data))
  476. goto nla_put_failure;
  477. }
  478. nest = nla_nest_start(skb, TCA_OPTIONS);
  479. if (nest == NULL)
  480. goto nla_put_failure;
  481. err = tcf_action_dump_old(skb, a, bind, ref);
  482. if (err > 0) {
  483. nla_nest_end(skb, nest);
  484. return err;
  485. }
  486. nla_put_failure:
  487. nlmsg_trim(skb, b);
  488. return -1;
  489. }
  490. EXPORT_SYMBOL(tcf_action_dump_1);
  491. int tcf_action_dump(struct sk_buff *skb, struct list_head *actions,
  492. int bind, int ref)
  493. {
  494. struct tc_action *a;
  495. int err = -EINVAL;
  496. struct nlattr *nest;
  497. list_for_each_entry(a, actions, list) {
  498. nest = nla_nest_start(skb, a->order);
  499. if (nest == NULL)
  500. goto nla_put_failure;
  501. err = tcf_action_dump_1(skb, a, bind, ref);
  502. if (err < 0)
  503. goto errout;
  504. nla_nest_end(skb, nest);
  505. }
  506. return 0;
  507. nla_put_failure:
  508. err = -EINVAL;
  509. errout:
  510. nla_nest_cancel(skb, nest);
  511. return err;
  512. }
  513. static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
  514. {
  515. struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
  516. if (!c)
  517. return NULL;
  518. c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
  519. if (!c->data) {
  520. kfree(c);
  521. return NULL;
  522. }
  523. c->len = nla_len(tb[TCA_ACT_COOKIE]);
  524. return c;
  525. }
  526. struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
  527. struct nlattr *nla, struct nlattr *est,
  528. char *name, int ovr, int bind,
  529. struct netlink_ext_ack *extack)
  530. {
  531. struct tc_action *a;
  532. struct tc_action_ops *a_o;
  533. struct tc_cookie *cookie = NULL;
  534. char act_name[IFNAMSIZ];
  535. struct nlattr *tb[TCA_ACT_MAX + 1];
  536. struct nlattr *kind;
  537. int err;
  538. if (name == NULL) {
  539. err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, NULL);
  540. if (err < 0)
  541. goto err_out;
  542. err = -EINVAL;
  543. kind = tb[TCA_ACT_KIND];
  544. if (!kind)
  545. goto err_out;
  546. if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ)
  547. goto err_out;
  548. if (tb[TCA_ACT_COOKIE]) {
  549. int cklen = nla_len(tb[TCA_ACT_COOKIE]);
  550. if (cklen > TC_COOKIE_MAX_SIZE)
  551. goto err_out;
  552. cookie = nla_memdup_cookie(tb);
  553. if (!cookie) {
  554. err = -ENOMEM;
  555. goto err_out;
  556. }
  557. }
  558. } else {
  559. err = -EINVAL;
  560. if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ)
  561. goto err_out;
  562. }
  563. a_o = tc_lookup_action_n(act_name);
  564. if (a_o == NULL) {
  565. #ifdef CONFIG_MODULES
  566. rtnl_unlock();
  567. request_module("act_%s", act_name);
  568. rtnl_lock();
  569. a_o = tc_lookup_action_n(act_name);
  570. /* We dropped the RTNL semaphore in order to
  571. * perform the module load. So, even if we
  572. * succeeded in loading the module we have to
  573. * tell the caller to replay the request. We
  574. * indicate this using -EAGAIN.
  575. */
  576. if (a_o != NULL) {
  577. err = -EAGAIN;
  578. goto err_mod;
  579. }
  580. #endif
  581. err = -ENOENT;
  582. goto err_out;
  583. }
  584. /* backward compatibility for policer */
  585. if (name == NULL)
  586. err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind);
  587. else
  588. err = a_o->init(net, nla, est, &a, ovr, bind);
  589. if (err < 0)
  590. goto err_mod;
  591. if (name == NULL && tb[TCA_ACT_COOKIE]) {
  592. if (a->act_cookie) {
  593. kfree(a->act_cookie->data);
  594. kfree(a->act_cookie);
  595. }
  596. a->act_cookie = cookie;
  597. }
  598. /* module count goes up only when brand new policy is created
  599. * if it exists and is only bound to in a_o->init() then
  600. * ACT_P_CREATED is not returned (a zero is).
  601. */
  602. if (err != ACT_P_CREATED)
  603. module_put(a_o->owner);
  604. if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) {
  605. err = tcf_action_goto_chain_init(a, tp);
  606. if (err) {
  607. LIST_HEAD(actions);
  608. list_add_tail(&a->list, &actions);
  609. tcf_action_destroy(&actions, bind);
  610. return ERR_PTR(err);
  611. }
  612. }
  613. return a;
  614. err_mod:
  615. module_put(a_o->owner);
  616. err_out:
  617. if (cookie) {
  618. kfree(cookie->data);
  619. kfree(cookie);
  620. }
  621. return ERR_PTR(err);
  622. }
  623. static void cleanup_a(struct list_head *actions, int ovr)
  624. {
  625. struct tc_action *a;
  626. if (!ovr)
  627. return;
  628. list_for_each_entry(a, actions, list)
  629. a->tcfa_refcnt--;
  630. }
  631. int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
  632. struct nlattr *est, char *name, int ovr, int bind,
  633. struct list_head *actions, struct netlink_ext_ack *extack)
  634. {
  635. struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
  636. struct tc_action *act;
  637. int err;
  638. int i;
  639. err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, NULL);
  640. if (err < 0)
  641. return err;
  642. for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
  643. act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind,
  644. extack);
  645. if (IS_ERR(act)) {
  646. err = PTR_ERR(act);
  647. goto err;
  648. }
  649. act->order = i;
  650. if (ovr)
  651. act->tcfa_refcnt++;
  652. list_add_tail(&act->list, actions);
  653. }
  654. /* Remove the temp refcnt which was necessary to protect against
  655. * destroying an existing action which was being replaced
  656. */
  657. cleanup_a(actions, ovr);
  658. return 0;
  659. err:
  660. tcf_action_destroy(actions, bind);
  661. return err;
  662. }
  663. int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
  664. int compat_mode)
  665. {
  666. int err = 0;
  667. struct gnet_dump d;
  668. if (p == NULL)
  669. goto errout;
  670. /* compat_mode being true specifies a call that is supposed
  671. * to add additional backward compatibility statistic TLVs.
  672. */
  673. if (compat_mode) {
  674. if (p->type == TCA_OLD_COMPAT)
  675. err = gnet_stats_start_copy_compat(skb, 0,
  676. TCA_STATS,
  677. TCA_XSTATS,
  678. &p->tcfa_lock, &d,
  679. TCA_PAD);
  680. else
  681. return 0;
  682. } else
  683. err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
  684. &p->tcfa_lock, &d, TCA_ACT_PAD);
  685. if (err < 0)
  686. goto errout;
  687. if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 ||
  688. gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
  689. gnet_stats_copy_queue(&d, p->cpu_qstats,
  690. &p->tcfa_qstats,
  691. p->tcfa_qstats.qlen) < 0)
  692. goto errout;
  693. if (gnet_stats_finish_copy(&d) < 0)
  694. goto errout;
  695. return 0;
  696. errout:
  697. return -1;
  698. }
  699. static int tca_get_fill(struct sk_buff *skb, struct list_head *actions,
  700. u32 portid, u32 seq, u16 flags, int event, int bind,
  701. int ref)
  702. {
  703. struct tcamsg *t;
  704. struct nlmsghdr *nlh;
  705. unsigned char *b = skb_tail_pointer(skb);
  706. struct nlattr *nest;
  707. nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
  708. if (!nlh)
  709. goto out_nlmsg_trim;
  710. t = nlmsg_data(nlh);
  711. t->tca_family = AF_UNSPEC;
  712. t->tca__pad1 = 0;
  713. t->tca__pad2 = 0;
  714. nest = nla_nest_start(skb, TCA_ACT_TAB);
  715. if (!nest)
  716. goto out_nlmsg_trim;
  717. if (tcf_action_dump(skb, actions, bind, ref) < 0)
  718. goto out_nlmsg_trim;
  719. nla_nest_end(skb, nest);
  720. nlh->nlmsg_len = skb_tail_pointer(skb) - b;
  721. return skb->len;
  722. out_nlmsg_trim:
  723. nlmsg_trim(skb, b);
  724. return -1;
  725. }
  726. static int
  727. tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
  728. struct list_head *actions, int event)
  729. {
  730. struct sk_buff *skb;
  731. skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
  732. if (!skb)
  733. return -ENOBUFS;
  734. if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
  735. 0, 0) <= 0) {
  736. kfree_skb(skb);
  737. return -EINVAL;
  738. }
  739. return rtnl_unicast(skb, net, portid);
  740. }
  741. static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
  742. struct nlmsghdr *n, u32 portid)
  743. {
  744. struct nlattr *tb[TCA_ACT_MAX + 1];
  745. const struct tc_action_ops *ops;
  746. struct tc_action *a;
  747. int index;
  748. int err;
  749. err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, NULL);
  750. if (err < 0)
  751. goto err_out;
  752. err = -EINVAL;
  753. if (tb[TCA_ACT_INDEX] == NULL ||
  754. nla_len(tb[TCA_ACT_INDEX]) < sizeof(index))
  755. goto err_out;
  756. index = nla_get_u32(tb[TCA_ACT_INDEX]);
  757. err = -EINVAL;
  758. ops = tc_lookup_action(tb[TCA_ACT_KIND]);
  759. if (!ops) /* could happen in batch of actions */
  760. goto err_out;
  761. err = -ENOENT;
  762. if (ops->lookup(net, &a, index) == 0)
  763. goto err_mod;
  764. module_put(ops->owner);
  765. return a;
  766. err_mod:
  767. module_put(ops->owner);
  768. err_out:
  769. return ERR_PTR(err);
  770. }
  771. static int tca_action_flush(struct net *net, struct nlattr *nla,
  772. struct nlmsghdr *n, u32 portid)
  773. {
  774. struct sk_buff *skb;
  775. unsigned char *b;
  776. struct nlmsghdr *nlh;
  777. struct tcamsg *t;
  778. struct netlink_callback dcb;
  779. struct nlattr *nest;
  780. struct nlattr *tb[TCA_ACT_MAX + 1];
  781. const struct tc_action_ops *ops;
  782. struct nlattr *kind;
  783. int err = -ENOMEM;
  784. skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
  785. if (!skb) {
  786. pr_debug("tca_action_flush: failed skb alloc\n");
  787. return err;
  788. }
  789. b = skb_tail_pointer(skb);
  790. err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, NULL);
  791. if (err < 0)
  792. goto err_out;
  793. err = -EINVAL;
  794. kind = tb[TCA_ACT_KIND];
  795. ops = tc_lookup_action(kind);
  796. if (!ops) /*some idjot trying to flush unknown action */
  797. goto err_out;
  798. nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
  799. sizeof(*t), 0);
  800. if (!nlh)
  801. goto out_module_put;
  802. t = nlmsg_data(nlh);
  803. t->tca_family = AF_UNSPEC;
  804. t->tca__pad1 = 0;
  805. t->tca__pad2 = 0;
  806. nest = nla_nest_start(skb, TCA_ACT_TAB);
  807. if (!nest)
  808. goto out_module_put;
  809. err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops);
  810. if (err <= 0) {
  811. nla_nest_cancel(skb, nest);
  812. goto out_module_put;
  813. }
  814. nla_nest_end(skb, nest);
  815. nlh->nlmsg_len = skb_tail_pointer(skb) - b;
  816. nlh->nlmsg_flags |= NLM_F_ROOT;
  817. module_put(ops->owner);
  818. err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
  819. n->nlmsg_flags & NLM_F_ECHO);
  820. if (err > 0)
  821. return 0;
  822. return err;
  823. out_module_put:
  824. module_put(ops->owner);
  825. err_out:
  826. kfree_skb(skb);
  827. return err;
  828. }
  829. static int
  830. tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
  831. u32 portid)
  832. {
  833. int ret;
  834. struct sk_buff *skb;
  835. skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
  836. if (!skb)
  837. return -ENOBUFS;
  838. if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
  839. 0, 1) <= 0) {
  840. kfree_skb(skb);
  841. return -EINVAL;
  842. }
  843. /* now do the delete */
  844. ret = tcf_action_destroy(actions, 0);
  845. if (ret < 0) {
  846. kfree_skb(skb);
  847. return ret;
  848. }
  849. ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
  850. n->nlmsg_flags & NLM_F_ECHO);
  851. if (ret > 0)
  852. return 0;
  853. return ret;
  854. }
  855. static int
  856. tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
  857. u32 portid, int event)
  858. {
  859. int i, ret;
  860. struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
  861. struct tc_action *act;
  862. LIST_HEAD(actions);
  863. ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, NULL);
  864. if (ret < 0)
  865. return ret;
  866. if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
  867. if (tb[1])
  868. return tca_action_flush(net, tb[1], n, portid);
  869. return -EINVAL;
  870. }
  871. for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
  872. act = tcf_action_get_1(net, tb[i], n, portid);
  873. if (IS_ERR(act)) {
  874. ret = PTR_ERR(act);
  875. goto err;
  876. }
  877. act->order = i;
  878. list_add_tail(&act->list, &actions);
  879. }
  880. if (event == RTM_GETACTION)
  881. ret = tcf_get_notify(net, portid, n, &actions, event);
  882. else { /* delete */
  883. ret = tcf_del_notify(net, n, &actions, portid);
  884. if (ret)
  885. goto err;
  886. return ret;
  887. }
  888. err:
  889. if (event != RTM_GETACTION)
  890. tcf_action_destroy(&actions, 0);
  891. return ret;
  892. }
  893. static int
  894. tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
  895. u32 portid)
  896. {
  897. struct sk_buff *skb;
  898. int err = 0;
  899. skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
  900. if (!skb)
  901. return -ENOBUFS;
  902. if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
  903. RTM_NEWACTION, 0, 0) <= 0) {
  904. kfree_skb(skb);
  905. return -EINVAL;
  906. }
  907. err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
  908. n->nlmsg_flags & NLM_F_ECHO);
  909. if (err > 0)
  910. err = 0;
  911. return err;
  912. }
  913. static int tcf_action_add(struct net *net, struct nlattr *nla,
  914. struct nlmsghdr *n, u32 portid, int ovr,
  915. struct netlink_ext_ack *extack)
  916. {
  917. int ret = 0;
  918. LIST_HEAD(actions);
  919. ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, &actions,
  920. extack);
  921. if (ret)
  922. return ret;
  923. return tcf_add_notify(net, n, &actions, portid);
  924. }
  925. static u32 tcaa_root_flags_allowed = TCA_FLAG_LARGE_DUMP_ON;
  926. static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = {
  927. [TCA_ROOT_FLAGS] = { .type = NLA_BITFIELD32,
  928. .validation_data = &tcaa_root_flags_allowed },
  929. [TCA_ROOT_TIME_DELTA] = { .type = NLA_U32 },
  930. };
  931. static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
  932. struct netlink_ext_ack *extack)
  933. {
  934. struct net *net = sock_net(skb->sk);
  935. struct nlattr *tca[TCA_ROOT_MAX + 1];
  936. u32 portid = skb ? NETLINK_CB(skb).portid : 0;
  937. int ret = 0, ovr = 0;
  938. if ((n->nlmsg_type != RTM_GETACTION) &&
  939. !netlink_capable(skb, CAP_NET_ADMIN))
  940. return -EPERM;
  941. ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ROOT_MAX, NULL,
  942. extack);
  943. if (ret < 0)
  944. return ret;
  945. if (tca[TCA_ACT_TAB] == NULL) {
  946. pr_notice("tc_ctl_action: received NO action attribs\n");
  947. return -EINVAL;
  948. }
  949. /* n->nlmsg_flags & NLM_F_CREATE */
  950. switch (n->nlmsg_type) {
  951. case RTM_NEWACTION:
  952. /* we are going to assume all other flags
  953. * imply create only if it doesn't exist
  954. * Note that CREATE | EXCL implies that
  955. * but since we want avoid ambiguity (eg when flags
  956. * is zero) then just set this
  957. */
  958. if (n->nlmsg_flags & NLM_F_REPLACE)
  959. ovr = 1;
  960. replay:
  961. ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr,
  962. extack);
  963. if (ret == -EAGAIN)
  964. goto replay;
  965. break;
  966. case RTM_DELACTION:
  967. ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
  968. portid, RTM_DELACTION);
  969. break;
  970. case RTM_GETACTION:
  971. ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
  972. portid, RTM_GETACTION);
  973. break;
  974. default:
  975. BUG();
  976. }
  977. return ret;
  978. }
  979. static struct nlattr *find_dump_kind(struct nlattr **nla)
  980. {
  981. struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
  982. struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
  983. struct nlattr *kind;
  984. tb1 = nla[TCA_ACT_TAB];
  985. if (tb1 == NULL)
  986. return NULL;
  987. if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1),
  988. NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0)
  989. return NULL;
  990. if (tb[1] == NULL)
  991. return NULL;
  992. if (nla_parse_nested(tb2, TCA_ACT_MAX, tb[1], NULL, NULL) < 0)
  993. return NULL;
  994. kind = tb2[TCA_ACT_KIND];
  995. return kind;
  996. }
  997. static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
  998. {
  999. struct net *net = sock_net(skb->sk);
  1000. struct nlmsghdr *nlh;
  1001. unsigned char *b = skb_tail_pointer(skb);
  1002. struct nlattr *nest;
  1003. struct tc_action_ops *a_o;
  1004. int ret = 0;
  1005. struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
  1006. struct nlattr *tb[TCA_ROOT_MAX + 1];
  1007. struct nlattr *count_attr = NULL;
  1008. unsigned long jiffy_since = 0;
  1009. struct nlattr *kind = NULL;
  1010. struct nla_bitfield32 bf;
  1011. u32 msecs_since = 0;
  1012. u32 act_count = 0;
  1013. ret = nlmsg_parse(cb->nlh, sizeof(struct tcamsg), tb, TCA_ROOT_MAX,
  1014. tcaa_policy, NULL);
  1015. if (ret < 0)
  1016. return ret;
  1017. kind = find_dump_kind(tb);
  1018. if (kind == NULL) {
  1019. pr_info("tc_dump_action: action bad kind\n");
  1020. return 0;
  1021. }
  1022. a_o = tc_lookup_action(kind);
  1023. if (a_o == NULL)
  1024. return 0;
  1025. cb->args[2] = 0;
  1026. if (tb[TCA_ROOT_FLAGS]) {
  1027. bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]);
  1028. cb->args[2] = bf.value;
  1029. }
  1030. if (tb[TCA_ROOT_TIME_DELTA]) {
  1031. msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]);
  1032. }
  1033. nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
  1034. cb->nlh->nlmsg_type, sizeof(*t), 0);
  1035. if (!nlh)
  1036. goto out_module_put;
  1037. if (msecs_since)
  1038. jiffy_since = jiffies - msecs_to_jiffies(msecs_since);
  1039. t = nlmsg_data(nlh);
  1040. t->tca_family = AF_UNSPEC;
  1041. t->tca__pad1 = 0;
  1042. t->tca__pad2 = 0;
  1043. cb->args[3] = jiffy_since;
  1044. count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32));
  1045. if (!count_attr)
  1046. goto out_module_put;
  1047. nest = nla_nest_start(skb, TCA_ACT_TAB);
  1048. if (nest == NULL)
  1049. goto out_module_put;
  1050. ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o);
  1051. if (ret < 0)
  1052. goto out_module_put;
  1053. if (ret > 0) {
  1054. nla_nest_end(skb, nest);
  1055. ret = skb->len;
  1056. act_count = cb->args[1];
  1057. memcpy(nla_data(count_attr), &act_count, sizeof(u32));
  1058. cb->args[1] = 0;
  1059. } else
  1060. nlmsg_trim(skb, b);
  1061. nlh->nlmsg_len = skb_tail_pointer(skb) - b;
  1062. if (NETLINK_CB(cb->skb).portid && ret)
  1063. nlh->nlmsg_flags |= NLM_F_MULTI;
  1064. module_put(a_o->owner);
  1065. return skb->len;
  1066. out_module_put:
  1067. module_put(a_o->owner);
  1068. nlmsg_trim(skb, b);
  1069. return skb->len;
  1070. }
  1071. struct tcf_action_net {
  1072. struct rhashtable egdev_ht;
  1073. };
  1074. static unsigned int tcf_action_net_id;
  1075. struct tcf_action_egdev_cb {
  1076. struct list_head list;
  1077. tc_setup_cb_t *cb;
  1078. void *cb_priv;
  1079. };
  1080. struct tcf_action_egdev {
  1081. struct rhash_head ht_node;
  1082. const struct net_device *dev;
  1083. unsigned int refcnt;
  1084. struct list_head cb_list;
  1085. };
  1086. static const struct rhashtable_params tcf_action_egdev_ht_params = {
  1087. .key_offset = offsetof(struct tcf_action_egdev, dev),
  1088. .head_offset = offsetof(struct tcf_action_egdev, ht_node),
  1089. .key_len = sizeof(const struct net_device *),
  1090. };
  1091. static struct tcf_action_egdev *
  1092. tcf_action_egdev_lookup(const struct net_device *dev)
  1093. {
  1094. struct net *net = dev_net(dev);
  1095. struct tcf_action_net *tan = net_generic(net, tcf_action_net_id);
  1096. return rhashtable_lookup_fast(&tan->egdev_ht, &dev,
  1097. tcf_action_egdev_ht_params);
  1098. }
  1099. static struct tcf_action_egdev *
  1100. tcf_action_egdev_get(const struct net_device *dev)
  1101. {
  1102. struct tcf_action_egdev *egdev;
  1103. struct tcf_action_net *tan;
  1104. egdev = tcf_action_egdev_lookup(dev);
  1105. if (egdev)
  1106. goto inc_ref;
  1107. egdev = kzalloc(sizeof(*egdev), GFP_KERNEL);
  1108. if (!egdev)
  1109. return NULL;
  1110. INIT_LIST_HEAD(&egdev->cb_list);
  1111. egdev->dev = dev;
  1112. tan = net_generic(dev_net(dev), tcf_action_net_id);
  1113. rhashtable_insert_fast(&tan->egdev_ht, &egdev->ht_node,
  1114. tcf_action_egdev_ht_params);
  1115. inc_ref:
  1116. egdev->refcnt++;
  1117. return egdev;
  1118. }
  1119. static void tcf_action_egdev_put(struct tcf_action_egdev *egdev)
  1120. {
  1121. struct tcf_action_net *tan;
  1122. if (--egdev->refcnt)
  1123. return;
  1124. tan = net_generic(dev_net(egdev->dev), tcf_action_net_id);
  1125. rhashtable_remove_fast(&tan->egdev_ht, &egdev->ht_node,
  1126. tcf_action_egdev_ht_params);
  1127. kfree(egdev);
  1128. }
  1129. static struct tcf_action_egdev_cb *
  1130. tcf_action_egdev_cb_lookup(struct tcf_action_egdev *egdev,
  1131. tc_setup_cb_t *cb, void *cb_priv)
  1132. {
  1133. struct tcf_action_egdev_cb *egdev_cb;
  1134. list_for_each_entry(egdev_cb, &egdev->cb_list, list)
  1135. if (egdev_cb->cb == cb && egdev_cb->cb_priv == cb_priv)
  1136. return egdev_cb;
  1137. return NULL;
  1138. }
  1139. static int tcf_action_egdev_cb_call(struct tcf_action_egdev *egdev,
  1140. enum tc_setup_type type,
  1141. void *type_data, bool err_stop)
  1142. {
  1143. struct tcf_action_egdev_cb *egdev_cb;
  1144. int ok_count = 0;
  1145. int err;
  1146. list_for_each_entry(egdev_cb, &egdev->cb_list, list) {
  1147. err = egdev_cb->cb(type, type_data, egdev_cb->cb_priv);
  1148. if (err) {
  1149. if (err_stop)
  1150. return err;
  1151. } else {
  1152. ok_count++;
  1153. }
  1154. }
  1155. return ok_count;
  1156. }
  1157. static int tcf_action_egdev_cb_add(struct tcf_action_egdev *egdev,
  1158. tc_setup_cb_t *cb, void *cb_priv)
  1159. {
  1160. struct tcf_action_egdev_cb *egdev_cb;
  1161. egdev_cb = tcf_action_egdev_cb_lookup(egdev, cb, cb_priv);
  1162. if (WARN_ON(egdev_cb))
  1163. return -EEXIST;
  1164. egdev_cb = kzalloc(sizeof(*egdev_cb), GFP_KERNEL);
  1165. if (!egdev_cb)
  1166. return -ENOMEM;
  1167. egdev_cb->cb = cb;
  1168. egdev_cb->cb_priv = cb_priv;
  1169. list_add(&egdev_cb->list, &egdev->cb_list);
  1170. return 0;
  1171. }
  1172. static void tcf_action_egdev_cb_del(struct tcf_action_egdev *egdev,
  1173. tc_setup_cb_t *cb, void *cb_priv)
  1174. {
  1175. struct tcf_action_egdev_cb *egdev_cb;
  1176. egdev_cb = tcf_action_egdev_cb_lookup(egdev, cb, cb_priv);
  1177. if (WARN_ON(!egdev_cb))
  1178. return;
  1179. list_del(&egdev_cb->list);
  1180. kfree(egdev_cb);
  1181. }
  1182. static int __tc_setup_cb_egdev_register(const struct net_device *dev,
  1183. tc_setup_cb_t *cb, void *cb_priv)
  1184. {
  1185. struct tcf_action_egdev *egdev = tcf_action_egdev_get(dev);
  1186. int err;
  1187. if (!egdev)
  1188. return -ENOMEM;
  1189. err = tcf_action_egdev_cb_add(egdev, cb, cb_priv);
  1190. if (err)
  1191. goto err_cb_add;
  1192. return 0;
  1193. err_cb_add:
  1194. tcf_action_egdev_put(egdev);
  1195. return err;
  1196. }
  1197. int tc_setup_cb_egdev_register(const struct net_device *dev,
  1198. tc_setup_cb_t *cb, void *cb_priv)
  1199. {
  1200. int err;
  1201. rtnl_lock();
  1202. err = __tc_setup_cb_egdev_register(dev, cb, cb_priv);
  1203. rtnl_unlock();
  1204. return err;
  1205. }
  1206. EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_register);
  1207. static void __tc_setup_cb_egdev_unregister(const struct net_device *dev,
  1208. tc_setup_cb_t *cb, void *cb_priv)
  1209. {
  1210. struct tcf_action_egdev *egdev = tcf_action_egdev_lookup(dev);
  1211. if (WARN_ON(!egdev))
  1212. return;
  1213. tcf_action_egdev_cb_del(egdev, cb, cb_priv);
  1214. tcf_action_egdev_put(egdev);
  1215. }
  1216. void tc_setup_cb_egdev_unregister(const struct net_device *dev,
  1217. tc_setup_cb_t *cb, void *cb_priv)
  1218. {
  1219. rtnl_lock();
  1220. __tc_setup_cb_egdev_unregister(dev, cb, cb_priv);
  1221. rtnl_unlock();
  1222. }
  1223. EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_unregister);
  1224. int tc_setup_cb_egdev_call(const struct net_device *dev,
  1225. enum tc_setup_type type, void *type_data,
  1226. bool err_stop)
  1227. {
  1228. struct tcf_action_egdev *egdev = tcf_action_egdev_lookup(dev);
  1229. if (!egdev)
  1230. return 0;
  1231. return tcf_action_egdev_cb_call(egdev, type, type_data, err_stop);
  1232. }
  1233. EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_call);
  1234. static __net_init int tcf_action_net_init(struct net *net)
  1235. {
  1236. struct tcf_action_net *tan = net_generic(net, tcf_action_net_id);
  1237. return rhashtable_init(&tan->egdev_ht, &tcf_action_egdev_ht_params);
  1238. }
  1239. static void __net_exit tcf_action_net_exit(struct net *net)
  1240. {
  1241. struct tcf_action_net *tan = net_generic(net, tcf_action_net_id);
  1242. rhashtable_destroy(&tan->egdev_ht);
  1243. }
  1244. static struct pernet_operations tcf_action_net_ops = {
  1245. .init = tcf_action_net_init,
  1246. .exit = tcf_action_net_exit,
  1247. .id = &tcf_action_net_id,
  1248. .size = sizeof(struct tcf_action_net),
  1249. .async = true,
  1250. };
  1251. static int __init tc_action_init(void)
  1252. {
  1253. int err;
  1254. err = register_pernet_subsys(&tcf_action_net_ops);
  1255. if (err)
  1256. return err;
  1257. rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
  1258. rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
  1259. rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
  1260. 0);
  1261. return 0;
  1262. }
  1263. subsys_initcall(tc_action_init);