act_api.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754
  1. /*
  2. * net/sched/act_api.c Packet action API.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Author: Jamal Hadi Salim
  10. *
  11. *
  12. */
  13. #include <linux/types.h>
  14. #include <linux/kernel.h>
  15. #include <linux/string.h>
  16. #include <linux/errno.h>
  17. #include <linux/slab.h>
  18. #include <linux/skbuff.h>
  19. #include <linux/init.h>
  20. #include <linux/kmod.h>
  21. #include <linux/err.h>
  22. #include <linux/module.h>
  23. #include <linux/rhashtable.h>
  24. #include <linux/list.h>
  25. #include <net/net_namespace.h>
  26. #include <net/sock.h>
  27. #include <net/sch_generic.h>
  28. #include <net/pkt_cls.h>
  29. #include <net/act_api.h>
  30. #include <net/netlink.h>
  31. static int tcf_action_goto_chain_init(struct tc_action *a, struct tcf_proto *tp)
  32. {
  33. u32 chain_index = a->tcfa_action & TC_ACT_EXT_VAL_MASK;
  34. if (!tp)
  35. return -EINVAL;
  36. a->goto_chain = tcf_chain_get_by_act(tp->chain->block, chain_index);
  37. if (!a->goto_chain)
  38. return -ENOMEM;
  39. return 0;
  40. }
  41. static void tcf_action_goto_chain_fini(struct tc_action *a)
  42. {
  43. tcf_chain_put_by_act(a->goto_chain);
  44. }
  45. static void tcf_action_goto_chain_exec(const struct tc_action *a,
  46. struct tcf_result *res)
  47. {
  48. const struct tcf_chain *chain = a->goto_chain;
  49. res->goto_tp = rcu_dereference_bh(chain->filter_chain);
  50. }
  51. static void tcf_free_cookie_rcu(struct rcu_head *p)
  52. {
  53. struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu);
  54. kfree(cookie->data);
  55. kfree(cookie);
  56. }
  57. static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
  58. struct tc_cookie *new_cookie)
  59. {
  60. struct tc_cookie *old;
  61. old = xchg((__force struct tc_cookie **)old_cookie, new_cookie);
  62. if (old)
  63. call_rcu(&old->rcu, tcf_free_cookie_rcu);
  64. }
  65. /* XXX: For standalone actions, we don't need a RCU grace period either, because
  66. * actions are always connected to filters and filters are already destroyed in
  67. * RCU callbacks, so after a RCU grace period actions are already disconnected
  68. * from filters. Readers later can not find us.
  69. */
  70. static void free_tcf(struct tc_action *p)
  71. {
  72. free_percpu(p->cpu_bstats);
  73. free_percpu(p->cpu_bstats_hw);
  74. free_percpu(p->cpu_qstats);
  75. tcf_set_action_cookie(&p->act_cookie, NULL);
  76. if (p->goto_chain)
  77. tcf_action_goto_chain_fini(p);
  78. kfree(p);
  79. }
  80. static void tcf_action_cleanup(struct tc_action *p)
  81. {
  82. if (p->ops->cleanup)
  83. p->ops->cleanup(p);
  84. gen_kill_estimator(&p->tcfa_rate_est);
  85. free_tcf(p);
  86. }
  87. static int __tcf_action_put(struct tc_action *p, bool bind)
  88. {
  89. struct tcf_idrinfo *idrinfo = p->idrinfo;
  90. if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
  91. if (bind)
  92. atomic_dec(&p->tcfa_bindcnt);
  93. idr_remove(&idrinfo->action_idr, p->tcfa_index);
  94. mutex_unlock(&idrinfo->lock);
  95. tcf_action_cleanup(p);
  96. return 1;
  97. }
  98. if (bind)
  99. atomic_dec(&p->tcfa_bindcnt);
  100. return 0;
  101. }
  102. int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
  103. {
  104. int ret = 0;
  105. /* Release with strict==1 and bind==0 is only called through act API
  106. * interface (classifiers always bind). Only case when action with
  107. * positive reference count and zero bind count can exist is when it was
  108. * also created with act API (unbinding last classifier will destroy the
  109. * action if it was created by classifier). So only case when bind count
  110. * can be changed after initial check is when unbound action is
  111. * destroyed by act API while classifier binds to action with same id
  112. * concurrently. This result either creation of new action(same behavior
  113. * as before), or reusing existing action if concurrent process
  114. * increments reference count before action is deleted. Both scenarios
  115. * are acceptable.
  116. */
  117. if (p) {
  118. if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0)
  119. return -EPERM;
  120. if (__tcf_action_put(p, bind))
  121. ret = ACT_P_DELETED;
  122. }
  123. return ret;
  124. }
  125. EXPORT_SYMBOL(__tcf_idr_release);
  126. static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
  127. {
  128. struct tc_cookie *act_cookie;
  129. u32 cookie_len = 0;
  130. rcu_read_lock();
  131. act_cookie = rcu_dereference(act->act_cookie);
  132. if (act_cookie)
  133. cookie_len = nla_total_size(act_cookie->len);
  134. rcu_read_unlock();
  135. return nla_total_size(0) /* action number nested */
  136. + nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
  137. + cookie_len /* TCA_ACT_COOKIE */
  138. + nla_total_size(0) /* TCA_ACT_STATS nested */
  139. /* TCA_STATS_BASIC */
  140. + nla_total_size_64bit(sizeof(struct gnet_stats_basic))
  141. /* TCA_STATS_QUEUE */
  142. + nla_total_size_64bit(sizeof(struct gnet_stats_queue))
  143. + nla_total_size(0) /* TCA_OPTIONS nested */
  144. + nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */
  145. }
  146. static size_t tcf_action_full_attrs_size(size_t sz)
  147. {
  148. return NLMSG_HDRLEN /* struct nlmsghdr */
  149. + sizeof(struct tcamsg)
  150. + nla_total_size(0) /* TCA_ACT_TAB nested */
  151. + sz;
  152. }
  153. static size_t tcf_action_fill_size(const struct tc_action *act)
  154. {
  155. size_t sz = tcf_action_shared_attrs_size(act);
  156. if (act->ops->get_fill_size)
  157. return act->ops->get_fill_size(act) + sz;
  158. return sz;
  159. }
  160. static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
  161. struct netlink_callback *cb)
  162. {
  163. int err = 0, index = -1, s_i = 0, n_i = 0;
  164. u32 act_flags = cb->args[2];
  165. unsigned long jiffy_since = cb->args[3];
  166. struct nlattr *nest;
  167. struct idr *idr = &idrinfo->action_idr;
  168. struct tc_action *p;
  169. unsigned long id = 1;
  170. mutex_lock(&idrinfo->lock);
  171. s_i = cb->args[0];
  172. idr_for_each_entry_ul(idr, p, id) {
  173. index++;
  174. if (index < s_i)
  175. continue;
  176. if (jiffy_since &&
  177. time_after(jiffy_since,
  178. (unsigned long)p->tcfa_tm.lastuse))
  179. continue;
  180. nest = nla_nest_start(skb, n_i);
  181. if (!nest) {
  182. index--;
  183. goto nla_put_failure;
  184. }
  185. err = tcf_action_dump_1(skb, p, 0, 0);
  186. if (err < 0) {
  187. index--;
  188. nlmsg_trim(skb, nest);
  189. goto done;
  190. }
  191. nla_nest_end(skb, nest);
  192. n_i++;
  193. if (!(act_flags & TCA_FLAG_LARGE_DUMP_ON) &&
  194. n_i >= TCA_ACT_MAX_PRIO)
  195. goto done;
  196. }
  197. done:
  198. if (index >= 0)
  199. cb->args[0] = index + 1;
  200. mutex_unlock(&idrinfo->lock);
  201. if (n_i) {
  202. if (act_flags & TCA_FLAG_LARGE_DUMP_ON)
  203. cb->args[1] = n_i;
  204. }
  205. return n_i;
  206. nla_put_failure:
  207. nla_nest_cancel(skb, nest);
  208. goto done;
  209. }
  210. static int tcf_idr_release_unsafe(struct tc_action *p)
  211. {
  212. if (atomic_read(&p->tcfa_bindcnt) > 0)
  213. return -EPERM;
  214. if (refcount_dec_and_test(&p->tcfa_refcnt)) {
  215. idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
  216. tcf_action_cleanup(p);
  217. return ACT_P_DELETED;
  218. }
  219. return 0;
  220. }
  221. static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
  222. const struct tc_action_ops *ops)
  223. {
  224. struct nlattr *nest;
  225. int n_i = 0;
  226. int ret = -EINVAL;
  227. struct idr *idr = &idrinfo->action_idr;
  228. struct tc_action *p;
  229. unsigned long id = 1;
  230. nest = nla_nest_start(skb, 0);
  231. if (nest == NULL)
  232. goto nla_put_failure;
  233. if (nla_put_string(skb, TCA_KIND, ops->kind))
  234. goto nla_put_failure;
  235. mutex_lock(&idrinfo->lock);
  236. idr_for_each_entry_ul(idr, p, id) {
  237. ret = tcf_idr_release_unsafe(p);
  238. if (ret == ACT_P_DELETED) {
  239. module_put(ops->owner);
  240. n_i++;
  241. } else if (ret < 0) {
  242. mutex_unlock(&idrinfo->lock);
  243. goto nla_put_failure;
  244. }
  245. }
  246. mutex_unlock(&idrinfo->lock);
  247. if (nla_put_u32(skb, TCA_FCNT, n_i))
  248. goto nla_put_failure;
  249. nla_nest_end(skb, nest);
  250. return n_i;
  251. nla_put_failure:
  252. nla_nest_cancel(skb, nest);
  253. return ret;
  254. }
  255. int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
  256. struct netlink_callback *cb, int type,
  257. const struct tc_action_ops *ops,
  258. struct netlink_ext_ack *extack)
  259. {
  260. struct tcf_idrinfo *idrinfo = tn->idrinfo;
  261. if (type == RTM_DELACTION) {
  262. return tcf_del_walker(idrinfo, skb, ops);
  263. } else if (type == RTM_GETACTION) {
  264. return tcf_dump_walker(idrinfo, skb, cb);
  265. } else {
  266. WARN(1, "tcf_generic_walker: unknown command %d\n", type);
  267. NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command");
  268. return -EINVAL;
  269. }
  270. }
  271. EXPORT_SYMBOL(tcf_generic_walker);
  272. int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
  273. {
  274. struct tcf_idrinfo *idrinfo = tn->idrinfo;
  275. struct tc_action *p;
  276. mutex_lock(&idrinfo->lock);
  277. p = idr_find(&idrinfo->action_idr, index);
  278. if (IS_ERR(p))
  279. p = NULL;
  280. else if (p)
  281. refcount_inc(&p->tcfa_refcnt);
  282. mutex_unlock(&idrinfo->lock);
  283. if (p) {
  284. *a = p;
  285. return true;
  286. }
  287. return false;
  288. }
  289. EXPORT_SYMBOL(tcf_idr_search);
  290. static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
  291. {
  292. struct tc_action *p;
  293. int ret = 0;
  294. mutex_lock(&idrinfo->lock);
  295. p = idr_find(&idrinfo->action_idr, index);
  296. if (!p) {
  297. mutex_unlock(&idrinfo->lock);
  298. return -ENOENT;
  299. }
  300. if (!atomic_read(&p->tcfa_bindcnt)) {
  301. if (refcount_dec_and_test(&p->tcfa_refcnt)) {
  302. struct module *owner = p->ops->owner;
  303. WARN_ON(p != idr_remove(&idrinfo->action_idr,
  304. p->tcfa_index));
  305. mutex_unlock(&idrinfo->lock);
  306. tcf_action_cleanup(p);
  307. module_put(owner);
  308. return 0;
  309. }
  310. ret = 0;
  311. } else {
  312. ret = -EPERM;
  313. }
  314. mutex_unlock(&idrinfo->lock);
  315. return ret;
  316. }
  317. int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
  318. struct tc_action **a, const struct tc_action_ops *ops,
  319. int bind, bool cpustats)
  320. {
  321. struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
  322. struct tcf_idrinfo *idrinfo = tn->idrinfo;
  323. int err = -ENOMEM;
  324. if (unlikely(!p))
  325. return -ENOMEM;
  326. refcount_set(&p->tcfa_refcnt, 1);
  327. if (bind)
  328. atomic_set(&p->tcfa_bindcnt, 1);
  329. if (cpustats) {
  330. p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
  331. if (!p->cpu_bstats)
  332. goto err1;
  333. p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
  334. if (!p->cpu_bstats_hw)
  335. goto err2;
  336. p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
  337. if (!p->cpu_qstats)
  338. goto err3;
  339. }
  340. spin_lock_init(&p->tcfa_lock);
  341. p->tcfa_index = index;
  342. p->tcfa_tm.install = jiffies;
  343. p->tcfa_tm.lastuse = jiffies;
  344. p->tcfa_tm.firstuse = 0;
  345. if (est) {
  346. err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
  347. &p->tcfa_rate_est,
  348. &p->tcfa_lock, NULL, est);
  349. if (err)
  350. goto err4;
  351. }
  352. p->idrinfo = idrinfo;
  353. p->ops = ops;
  354. *a = p;
  355. return 0;
  356. err4:
  357. free_percpu(p->cpu_qstats);
  358. err3:
  359. free_percpu(p->cpu_bstats_hw);
  360. err2:
  361. free_percpu(p->cpu_bstats);
  362. err1:
  363. kfree(p);
  364. return err;
  365. }
  366. EXPORT_SYMBOL(tcf_idr_create);
  367. void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a)
  368. {
  369. struct tcf_idrinfo *idrinfo = tn->idrinfo;
  370. mutex_lock(&idrinfo->lock);
  371. /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
  372. WARN_ON(!IS_ERR(idr_replace(&idrinfo->action_idr, a, a->tcfa_index)));
  373. mutex_unlock(&idrinfo->lock);
  374. }
  375. EXPORT_SYMBOL(tcf_idr_insert);
  376. /* Cleanup idr index that was allocated but not initialized. */
  377. void tcf_idr_cleanup(struct tc_action_net *tn, u32 index)
  378. {
  379. struct tcf_idrinfo *idrinfo = tn->idrinfo;
  380. mutex_lock(&idrinfo->lock);
  381. /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
  382. WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index)));
  383. mutex_unlock(&idrinfo->lock);
  384. }
  385. EXPORT_SYMBOL(tcf_idr_cleanup);
  386. /* Check if action with specified index exists. If actions is found, increments
  387. * its reference and bind counters, and return 1. Otherwise insert temporary
  388. * error pointer (to prevent concurrent users from inserting actions with same
  389. * index) and return 0.
  390. */
  391. int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
  392. struct tc_action **a, int bind)
  393. {
  394. struct tcf_idrinfo *idrinfo = tn->idrinfo;
  395. struct tc_action *p;
  396. int ret;
  397. again:
  398. mutex_lock(&idrinfo->lock);
  399. if (*index) {
  400. p = idr_find(&idrinfo->action_idr, *index);
  401. if (IS_ERR(p)) {
  402. /* This means that another process allocated
  403. * index but did not assign the pointer yet.
  404. */
  405. mutex_unlock(&idrinfo->lock);
  406. goto again;
  407. }
  408. if (p) {
  409. refcount_inc(&p->tcfa_refcnt);
  410. if (bind)
  411. atomic_inc(&p->tcfa_bindcnt);
  412. *a = p;
  413. ret = 1;
  414. } else {
  415. *a = NULL;
  416. ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
  417. *index, GFP_KERNEL);
  418. if (!ret)
  419. idr_replace(&idrinfo->action_idr,
  420. ERR_PTR(-EBUSY), *index);
  421. }
  422. } else {
  423. *index = 1;
  424. *a = NULL;
  425. ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
  426. UINT_MAX, GFP_KERNEL);
  427. if (!ret)
  428. idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY),
  429. *index);
  430. }
  431. mutex_unlock(&idrinfo->lock);
  432. return ret;
  433. }
  434. EXPORT_SYMBOL(tcf_idr_check_alloc);
  435. void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
  436. struct tcf_idrinfo *idrinfo)
  437. {
  438. struct idr *idr = &idrinfo->action_idr;
  439. struct tc_action *p;
  440. int ret;
  441. unsigned long id = 1;
  442. idr_for_each_entry_ul(idr, p, id) {
  443. ret = __tcf_idr_release(p, false, true);
  444. if (ret == ACT_P_DELETED)
  445. module_put(ops->owner);
  446. else if (ret < 0)
  447. return;
  448. }
  449. idr_destroy(&idrinfo->action_idr);
  450. }
  451. EXPORT_SYMBOL(tcf_idrinfo_destroy);
  452. static LIST_HEAD(act_base);
  453. static DEFINE_RWLOCK(act_mod_lock);
  454. int tcf_register_action(struct tc_action_ops *act,
  455. struct pernet_operations *ops)
  456. {
  457. struct tc_action_ops *a;
  458. int ret;
  459. if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup)
  460. return -EINVAL;
  461. /* We have to register pernet ops before making the action ops visible,
  462. * otherwise tcf_action_init_1() could get a partially initialized
  463. * netns.
  464. */
  465. ret = register_pernet_subsys(ops);
  466. if (ret)
  467. return ret;
  468. write_lock(&act_mod_lock);
  469. list_for_each_entry(a, &act_base, head) {
  470. if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
  471. write_unlock(&act_mod_lock);
  472. unregister_pernet_subsys(ops);
  473. return -EEXIST;
  474. }
  475. }
  476. list_add_tail(&act->head, &act_base);
  477. write_unlock(&act_mod_lock);
  478. return 0;
  479. }
  480. EXPORT_SYMBOL(tcf_register_action);
  481. int tcf_unregister_action(struct tc_action_ops *act,
  482. struct pernet_operations *ops)
  483. {
  484. struct tc_action_ops *a;
  485. int err = -ENOENT;
  486. write_lock(&act_mod_lock);
  487. list_for_each_entry(a, &act_base, head) {
  488. if (a == act) {
  489. list_del(&act->head);
  490. err = 0;
  491. break;
  492. }
  493. }
  494. write_unlock(&act_mod_lock);
  495. if (!err)
  496. unregister_pernet_subsys(ops);
  497. return err;
  498. }
  499. EXPORT_SYMBOL(tcf_unregister_action);
  500. /* lookup by name */
  501. static struct tc_action_ops *tc_lookup_action_n(char *kind)
  502. {
  503. struct tc_action_ops *a, *res = NULL;
  504. if (kind) {
  505. read_lock(&act_mod_lock);
  506. list_for_each_entry(a, &act_base, head) {
  507. if (strcmp(kind, a->kind) == 0) {
  508. if (try_module_get(a->owner))
  509. res = a;
  510. break;
  511. }
  512. }
  513. read_unlock(&act_mod_lock);
  514. }
  515. return res;
  516. }
  517. /* lookup by nlattr */
  518. static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
  519. {
  520. struct tc_action_ops *a, *res = NULL;
  521. if (kind) {
  522. read_lock(&act_mod_lock);
  523. list_for_each_entry(a, &act_base, head) {
  524. if (nla_strcmp(kind, a->kind) == 0) {
  525. if (try_module_get(a->owner))
  526. res = a;
  527. break;
  528. }
  529. }
  530. read_unlock(&act_mod_lock);
  531. }
  532. return res;
  533. }
  534. /*TCA_ACT_MAX_PRIO is 32, there count upto 32 */
  535. #define TCA_ACT_MAX_PRIO_MASK 0x1FF
  536. int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
  537. int nr_actions, struct tcf_result *res)
  538. {
  539. u32 jmp_prgcnt = 0;
  540. u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */
  541. int i;
  542. int ret = TC_ACT_OK;
  543. if (skb_skip_tc_classify(skb))
  544. return TC_ACT_OK;
  545. restart_act_graph:
  546. for (i = 0; i < nr_actions; i++) {
  547. const struct tc_action *a = actions[i];
  548. if (jmp_prgcnt > 0) {
  549. jmp_prgcnt -= 1;
  550. continue;
  551. }
  552. repeat:
  553. ret = a->ops->act(skb, a, res);
  554. if (ret == TC_ACT_REPEAT)
  555. goto repeat; /* we need a ttl - JHS */
  556. if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
  557. jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
  558. if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
  559. /* faulty opcode, stop pipeline */
  560. return TC_ACT_OK;
  561. } else {
  562. jmp_ttl -= 1;
  563. if (jmp_ttl > 0)
  564. goto restart_act_graph;
  565. else /* faulty graph, stop pipeline */
  566. return TC_ACT_OK;
  567. }
  568. } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
  569. tcf_action_goto_chain_exec(a, res);
  570. }
  571. if (ret != TC_ACT_PIPE)
  572. break;
  573. }
  574. return ret;
  575. }
  576. EXPORT_SYMBOL(tcf_action_exec);
  577. int tcf_action_destroy(struct tc_action *actions[], int bind)
  578. {
  579. const struct tc_action_ops *ops;
  580. struct tc_action *a;
  581. int ret = 0, i;
  582. for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
  583. a = actions[i];
  584. actions[i] = NULL;
  585. ops = a->ops;
  586. ret = __tcf_idr_release(a, bind, true);
  587. if (ret == ACT_P_DELETED)
  588. module_put(ops->owner);
  589. else if (ret < 0)
  590. return ret;
  591. }
  592. return ret;
  593. }
  594. static int tcf_action_destroy_1(struct tc_action *a, int bind)
  595. {
  596. struct tc_action *actions[] = { a, NULL };
  597. return tcf_action_destroy(actions, bind);
  598. }
  599. static int tcf_action_put(struct tc_action *p)
  600. {
  601. return __tcf_action_put(p, false);
  602. }
  603. /* Put all actions in this array, skip those NULL's. */
  604. static void tcf_action_put_many(struct tc_action *actions[])
  605. {
  606. int i;
  607. for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
  608. struct tc_action *a = actions[i];
  609. const struct tc_action_ops *ops;
  610. if (!a)
  611. continue;
  612. ops = a->ops;
  613. if (tcf_action_put(a))
  614. module_put(ops->owner);
  615. }
  616. }
  617. int
  618. tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
  619. {
  620. return a->ops->dump(skb, a, bind, ref);
  621. }
  622. int
  623. tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
  624. {
  625. int err = -EINVAL;
  626. unsigned char *b = skb_tail_pointer(skb);
  627. struct nlattr *nest;
  628. struct tc_cookie *cookie;
  629. if (nla_put_string(skb, TCA_KIND, a->ops->kind))
  630. goto nla_put_failure;
  631. if (tcf_action_copy_stats(skb, a, 0))
  632. goto nla_put_failure;
  633. rcu_read_lock();
  634. cookie = rcu_dereference(a->act_cookie);
  635. if (cookie) {
  636. if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
  637. rcu_read_unlock();
  638. goto nla_put_failure;
  639. }
  640. }
  641. rcu_read_unlock();
  642. nest = nla_nest_start(skb, TCA_OPTIONS);
  643. if (nest == NULL)
  644. goto nla_put_failure;
  645. err = tcf_action_dump_old(skb, a, bind, ref);
  646. if (err > 0) {
  647. nla_nest_end(skb, nest);
  648. return err;
  649. }
  650. nla_put_failure:
  651. nlmsg_trim(skb, b);
  652. return -1;
  653. }
  654. EXPORT_SYMBOL(tcf_action_dump_1);
  655. int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
  656. int bind, int ref)
  657. {
  658. struct tc_action *a;
  659. int err = -EINVAL, i;
  660. struct nlattr *nest;
  661. for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
  662. a = actions[i];
  663. nest = nla_nest_start(skb, a->order);
  664. if (nest == NULL)
  665. goto nla_put_failure;
  666. err = tcf_action_dump_1(skb, a, bind, ref);
  667. if (err < 0)
  668. goto errout;
  669. nla_nest_end(skb, nest);
  670. }
  671. return 0;
  672. nla_put_failure:
  673. err = -EINVAL;
  674. errout:
  675. nla_nest_cancel(skb, nest);
  676. return err;
  677. }
  678. static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
  679. {
  680. struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
  681. if (!c)
  682. return NULL;
  683. c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
  684. if (!c->data) {
  685. kfree(c);
  686. return NULL;
  687. }
  688. c->len = nla_len(tb[TCA_ACT_COOKIE]);
  689. return c;
  690. }
  691. static bool tcf_action_valid(int action)
  692. {
  693. int opcode = TC_ACT_EXT_OPCODE(action);
  694. if (!opcode)
  695. return action <= TC_ACT_VALUE_MAX;
  696. return opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC;
  697. }
  698. struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
  699. struct nlattr *nla, struct nlattr *est,
  700. char *name, int ovr, int bind,
  701. bool rtnl_held,
  702. struct netlink_ext_ack *extack)
  703. {
  704. struct tc_action *a;
  705. struct tc_action_ops *a_o;
  706. struct tc_cookie *cookie = NULL;
  707. char act_name[IFNAMSIZ];
  708. struct nlattr *tb[TCA_ACT_MAX + 1];
  709. struct nlattr *kind;
  710. int err;
  711. if (name == NULL) {
  712. err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, extack);
  713. if (err < 0)
  714. goto err_out;
  715. err = -EINVAL;
  716. kind = tb[TCA_ACT_KIND];
  717. if (!kind) {
  718. NL_SET_ERR_MSG(extack, "TC action kind must be specified");
  719. goto err_out;
  720. }
  721. if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) {
  722. NL_SET_ERR_MSG(extack, "TC action name too long");
  723. goto err_out;
  724. }
  725. if (tb[TCA_ACT_COOKIE]) {
  726. int cklen = nla_len(tb[TCA_ACT_COOKIE]);
  727. if (cklen > TC_COOKIE_MAX_SIZE) {
  728. NL_SET_ERR_MSG(extack, "TC cookie size above the maximum");
  729. goto err_out;
  730. }
  731. cookie = nla_memdup_cookie(tb);
  732. if (!cookie) {
  733. NL_SET_ERR_MSG(extack, "No memory to generate TC cookie");
  734. err = -ENOMEM;
  735. goto err_out;
  736. }
  737. }
  738. } else {
  739. if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) {
  740. NL_SET_ERR_MSG(extack, "TC action name too long");
  741. err = -EINVAL;
  742. goto err_out;
  743. }
  744. }
  745. a_o = tc_lookup_action_n(act_name);
  746. if (a_o == NULL) {
  747. #ifdef CONFIG_MODULES
  748. if (rtnl_held)
  749. rtnl_unlock();
  750. request_module("act_%s", act_name);
  751. if (rtnl_held)
  752. rtnl_lock();
  753. a_o = tc_lookup_action_n(act_name);
  754. /* We dropped the RTNL semaphore in order to
  755. * perform the module load. So, even if we
  756. * succeeded in loading the module we have to
  757. * tell the caller to replay the request. We
  758. * indicate this using -EAGAIN.
  759. */
  760. if (a_o != NULL) {
  761. err = -EAGAIN;
  762. goto err_mod;
  763. }
  764. #endif
  765. NL_SET_ERR_MSG(extack, "Failed to load TC action module");
  766. err = -ENOENT;
  767. goto err_out;
  768. }
  769. /* backward compatibility for policer */
  770. if (name == NULL)
  771. err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind,
  772. rtnl_held, extack);
  773. else
  774. err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held,
  775. extack);
  776. if (err < 0)
  777. goto err_mod;
  778. if (!name && tb[TCA_ACT_COOKIE])
  779. tcf_set_action_cookie(&a->act_cookie, cookie);
  780. /* module count goes up only when brand new policy is created
  781. * if it exists and is only bound to in a_o->init() then
  782. * ACT_P_CREATED is not returned (a zero is).
  783. */
  784. if (err != ACT_P_CREATED)
  785. module_put(a_o->owner);
  786. if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) {
  787. err = tcf_action_goto_chain_init(a, tp);
  788. if (err) {
  789. tcf_action_destroy_1(a, bind);
  790. NL_SET_ERR_MSG(extack, "Failed to init TC action chain");
  791. return ERR_PTR(err);
  792. }
  793. }
  794. if (!tcf_action_valid(a->tcfa_action)) {
  795. tcf_action_destroy_1(a, bind);
  796. NL_SET_ERR_MSG(extack, "Invalid control action value");
  797. return ERR_PTR(-EINVAL);
  798. }
  799. return a;
  800. err_mod:
  801. module_put(a_o->owner);
  802. err_out:
  803. if (cookie) {
  804. kfree(cookie->data);
  805. kfree(cookie);
  806. }
  807. return ERR_PTR(err);
  808. }
  809. /* Returns numbers of initialized actions or negative error. */
  810. int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
  811. struct nlattr *est, char *name, int ovr, int bind,
  812. struct tc_action *actions[], size_t *attr_size,
  813. bool rtnl_held, struct netlink_ext_ack *extack)
  814. {
  815. struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
  816. struct tc_action *act;
  817. size_t sz = 0;
  818. int err;
  819. int i;
  820. err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack);
  821. if (err < 0)
  822. return err;
  823. for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
  824. act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind,
  825. rtnl_held, extack);
  826. if (IS_ERR(act)) {
  827. err = PTR_ERR(act);
  828. goto err;
  829. }
  830. act->order = i;
  831. sz += tcf_action_fill_size(act);
  832. /* Start from index 0 */
  833. actions[i - 1] = act;
  834. }
  835. *attr_size = tcf_action_full_attrs_size(sz);
  836. return i - 1;
  837. err:
  838. tcf_action_destroy(actions, bind);
  839. return err;
  840. }
  841. int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
  842. int compat_mode)
  843. {
  844. int err = 0;
  845. struct gnet_dump d;
  846. if (p == NULL)
  847. goto errout;
  848. /* compat_mode being true specifies a call that is supposed
  849. * to add additional backward compatibility statistic TLVs.
  850. */
  851. if (compat_mode) {
  852. if (p->type == TCA_OLD_COMPAT)
  853. err = gnet_stats_start_copy_compat(skb, 0,
  854. TCA_STATS,
  855. TCA_XSTATS,
  856. &p->tcfa_lock, &d,
  857. TCA_PAD);
  858. else
  859. return 0;
  860. } else
  861. err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
  862. &p->tcfa_lock, &d, TCA_ACT_PAD);
  863. if (err < 0)
  864. goto errout;
  865. if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 ||
  866. gnet_stats_copy_basic_hw(NULL, &d, p->cpu_bstats_hw,
  867. &p->tcfa_bstats_hw) < 0 ||
  868. gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
  869. gnet_stats_copy_queue(&d, p->cpu_qstats,
  870. &p->tcfa_qstats,
  871. p->tcfa_qstats.qlen) < 0)
  872. goto errout;
  873. if (gnet_stats_finish_copy(&d) < 0)
  874. goto errout;
  875. return 0;
  876. errout:
  877. return -1;
  878. }
  879. static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
  880. u32 portid, u32 seq, u16 flags, int event, int bind,
  881. int ref)
  882. {
  883. struct tcamsg *t;
  884. struct nlmsghdr *nlh;
  885. unsigned char *b = skb_tail_pointer(skb);
  886. struct nlattr *nest;
  887. nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
  888. if (!nlh)
  889. goto out_nlmsg_trim;
  890. t = nlmsg_data(nlh);
  891. t->tca_family = AF_UNSPEC;
  892. t->tca__pad1 = 0;
  893. t->tca__pad2 = 0;
  894. nest = nla_nest_start(skb, TCA_ACT_TAB);
  895. if (!nest)
  896. goto out_nlmsg_trim;
  897. if (tcf_action_dump(skb, actions, bind, ref) < 0)
  898. goto out_nlmsg_trim;
  899. nla_nest_end(skb, nest);
  900. nlh->nlmsg_len = skb_tail_pointer(skb) - b;
  901. return skb->len;
  902. out_nlmsg_trim:
  903. nlmsg_trim(skb, b);
  904. return -1;
  905. }
  906. static int
  907. tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
  908. struct tc_action *actions[], int event,
  909. struct netlink_ext_ack *extack)
  910. {
  911. struct sk_buff *skb;
  912. skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
  913. if (!skb)
  914. return -ENOBUFS;
  915. if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
  916. 0, 1) <= 0) {
  917. NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
  918. kfree_skb(skb);
  919. return -EINVAL;
  920. }
  921. return rtnl_unicast(skb, net, portid);
  922. }
  923. static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
  924. struct nlmsghdr *n, u32 portid,
  925. struct netlink_ext_ack *extack)
  926. {
  927. struct nlattr *tb[TCA_ACT_MAX + 1];
  928. const struct tc_action_ops *ops;
  929. struct tc_action *a;
  930. int index;
  931. int err;
  932. err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, extack);
  933. if (err < 0)
  934. goto err_out;
  935. err = -EINVAL;
  936. if (tb[TCA_ACT_INDEX] == NULL ||
  937. nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) {
  938. NL_SET_ERR_MSG(extack, "Invalid TC action index value");
  939. goto err_out;
  940. }
  941. index = nla_get_u32(tb[TCA_ACT_INDEX]);
  942. err = -EINVAL;
  943. ops = tc_lookup_action(tb[TCA_ACT_KIND]);
  944. if (!ops) { /* could happen in batch of actions */
  945. NL_SET_ERR_MSG(extack, "Specified TC action kind not found");
  946. goto err_out;
  947. }
  948. err = -ENOENT;
  949. if (ops->lookup(net, &a, index) == 0) {
  950. NL_SET_ERR_MSG(extack, "TC action with specified index not found");
  951. goto err_mod;
  952. }
  953. module_put(ops->owner);
  954. return a;
  955. err_mod:
  956. module_put(ops->owner);
  957. err_out:
  958. return ERR_PTR(err);
  959. }
  960. static int tca_action_flush(struct net *net, struct nlattr *nla,
  961. struct nlmsghdr *n, u32 portid,
  962. struct netlink_ext_ack *extack)
  963. {
  964. struct sk_buff *skb;
  965. unsigned char *b;
  966. struct nlmsghdr *nlh;
  967. struct tcamsg *t;
  968. struct netlink_callback dcb;
  969. struct nlattr *nest;
  970. struct nlattr *tb[TCA_ACT_MAX + 1];
  971. const struct tc_action_ops *ops;
  972. struct nlattr *kind;
  973. int err = -ENOMEM;
  974. skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
  975. if (!skb)
  976. return err;
  977. b = skb_tail_pointer(skb);
  978. err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, extack);
  979. if (err < 0)
  980. goto err_out;
  981. err = -EINVAL;
  982. kind = tb[TCA_ACT_KIND];
  983. ops = tc_lookup_action(kind);
  984. if (!ops) { /*some idjot trying to flush unknown action */
  985. NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action");
  986. goto err_out;
  987. }
  988. nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
  989. sizeof(*t), 0);
  990. if (!nlh) {
  991. NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification");
  992. goto out_module_put;
  993. }
  994. t = nlmsg_data(nlh);
  995. t->tca_family = AF_UNSPEC;
  996. t->tca__pad1 = 0;
  997. t->tca__pad2 = 0;
  998. nest = nla_nest_start(skb, TCA_ACT_TAB);
  999. if (!nest) {
  1000. NL_SET_ERR_MSG(extack, "Failed to add new netlink message");
  1001. goto out_module_put;
  1002. }
  1003. err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops, extack);
  1004. if (err <= 0) {
  1005. nla_nest_cancel(skb, nest);
  1006. goto out_module_put;
  1007. }
  1008. nla_nest_end(skb, nest);
  1009. nlh->nlmsg_len = skb_tail_pointer(skb) - b;
  1010. nlh->nlmsg_flags |= NLM_F_ROOT;
  1011. module_put(ops->owner);
  1012. err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
  1013. n->nlmsg_flags & NLM_F_ECHO);
  1014. if (err > 0)
  1015. return 0;
  1016. if (err < 0)
  1017. NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification");
  1018. return err;
  1019. out_module_put:
  1020. module_put(ops->owner);
  1021. err_out:
  1022. kfree_skb(skb);
  1023. return err;
  1024. }
  1025. static int tcf_action_delete(struct net *net, struct tc_action *actions[])
  1026. {
  1027. int i;
  1028. for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
  1029. struct tc_action *a = actions[i];
  1030. const struct tc_action_ops *ops = a->ops;
  1031. /* Actions can be deleted concurrently so we must save their
  1032. * type and id to search again after reference is released.
  1033. */
  1034. struct tcf_idrinfo *idrinfo = a->idrinfo;
  1035. u32 act_index = a->tcfa_index;
  1036. actions[i] = NULL;
  1037. if (tcf_action_put(a)) {
  1038. /* last reference, action was deleted concurrently */
  1039. module_put(ops->owner);
  1040. } else {
  1041. int ret;
  1042. /* now do the delete */
  1043. ret = tcf_idr_delete_index(idrinfo, act_index);
  1044. if (ret < 0)
  1045. return ret;
  1046. }
  1047. }
  1048. return 0;
  1049. }
  1050. static int
  1051. tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
  1052. u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
  1053. {
  1054. int ret;
  1055. struct sk_buff *skb;
  1056. skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
  1057. GFP_KERNEL);
  1058. if (!skb)
  1059. return -ENOBUFS;
  1060. if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
  1061. 0, 2) <= 0) {
  1062. NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes");
  1063. kfree_skb(skb);
  1064. return -EINVAL;
  1065. }
  1066. /* now do the delete */
  1067. ret = tcf_action_delete(net, actions);
  1068. if (ret < 0) {
  1069. NL_SET_ERR_MSG(extack, "Failed to delete TC action");
  1070. kfree_skb(skb);
  1071. return ret;
  1072. }
  1073. ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
  1074. n->nlmsg_flags & NLM_F_ECHO);
  1075. if (ret > 0)
  1076. return 0;
  1077. return ret;
  1078. }
  1079. static int
  1080. tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
  1081. u32 portid, int event, struct netlink_ext_ack *extack)
  1082. {
  1083. int i, ret;
  1084. struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
  1085. struct tc_action *act;
  1086. size_t attr_size = 0;
  1087. struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
  1088. ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack);
  1089. if (ret < 0)
  1090. return ret;
  1091. if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
  1092. if (tb[1])
  1093. return tca_action_flush(net, tb[1], n, portid, extack);
  1094. NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action");
  1095. return -EINVAL;
  1096. }
  1097. for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
  1098. act = tcf_action_get_1(net, tb[i], n, portid, extack);
  1099. if (IS_ERR(act)) {
  1100. ret = PTR_ERR(act);
  1101. goto err;
  1102. }
  1103. act->order = i;
  1104. attr_size += tcf_action_fill_size(act);
  1105. actions[i - 1] = act;
  1106. }
  1107. attr_size = tcf_action_full_attrs_size(attr_size);
  1108. if (event == RTM_GETACTION)
  1109. ret = tcf_get_notify(net, portid, n, actions, event, extack);
  1110. else { /* delete */
  1111. ret = tcf_del_notify(net, n, actions, portid, attr_size, extack);
  1112. if (ret)
  1113. goto err;
  1114. return 0;
  1115. }
  1116. err:
  1117. tcf_action_put_many(actions);
  1118. return ret;
  1119. }
  1120. static int
  1121. tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
  1122. u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
  1123. {
  1124. struct sk_buff *skb;
  1125. int err = 0;
  1126. skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
  1127. GFP_KERNEL);
  1128. if (!skb)
  1129. return -ENOBUFS;
  1130. if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
  1131. RTM_NEWACTION, 0, 0) <= 0) {
  1132. NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
  1133. kfree_skb(skb);
  1134. return -EINVAL;
  1135. }
  1136. err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
  1137. n->nlmsg_flags & NLM_F_ECHO);
  1138. if (err > 0)
  1139. err = 0;
  1140. return err;
  1141. }
  1142. static int tcf_action_add(struct net *net, struct nlattr *nla,
  1143. struct nlmsghdr *n, u32 portid, int ovr,
  1144. struct netlink_ext_ack *extack)
  1145. {
  1146. size_t attr_size = 0;
  1147. int ret = 0;
  1148. struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
  1149. ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, actions,
  1150. &attr_size, true, extack);
  1151. if (ret < 0)
  1152. return ret;
  1153. ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
  1154. if (ovr)
  1155. tcf_action_put_many(actions);
  1156. return ret;
  1157. }
  1158. static u32 tcaa_root_flags_allowed = TCA_FLAG_LARGE_DUMP_ON;
  1159. static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = {
  1160. [TCA_ROOT_FLAGS] = { .type = NLA_BITFIELD32,
  1161. .validation_data = &tcaa_root_flags_allowed },
  1162. [TCA_ROOT_TIME_DELTA] = { .type = NLA_U32 },
  1163. };
  1164. static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
  1165. struct netlink_ext_ack *extack)
  1166. {
  1167. struct net *net = sock_net(skb->sk);
  1168. struct nlattr *tca[TCA_ROOT_MAX + 1];
  1169. u32 portid = skb ? NETLINK_CB(skb).portid : 0;
  1170. int ret = 0, ovr = 0;
  1171. if ((n->nlmsg_type != RTM_GETACTION) &&
  1172. !netlink_capable(skb, CAP_NET_ADMIN))
  1173. return -EPERM;
  1174. ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ROOT_MAX, NULL,
  1175. extack);
  1176. if (ret < 0)
  1177. return ret;
  1178. if (tca[TCA_ACT_TAB] == NULL) {
  1179. NL_SET_ERR_MSG(extack, "Netlink action attributes missing");
  1180. return -EINVAL;
  1181. }
  1182. /* n->nlmsg_flags & NLM_F_CREATE */
  1183. switch (n->nlmsg_type) {
  1184. case RTM_NEWACTION:
  1185. /* we are going to assume all other flags
  1186. * imply create only if it doesn't exist
  1187. * Note that CREATE | EXCL implies that
  1188. * but since we want avoid ambiguity (eg when flags
  1189. * is zero) then just set this
  1190. */
  1191. if (n->nlmsg_flags & NLM_F_REPLACE)
  1192. ovr = 1;
  1193. replay:
  1194. ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr,
  1195. extack);
  1196. if (ret == -EAGAIN)
  1197. goto replay;
  1198. break;
  1199. case RTM_DELACTION:
  1200. ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
  1201. portid, RTM_DELACTION, extack);
  1202. break;
  1203. case RTM_GETACTION:
  1204. ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
  1205. portid, RTM_GETACTION, extack);
  1206. break;
  1207. default:
  1208. BUG();
  1209. }
  1210. return ret;
  1211. }
  1212. static struct nlattr *find_dump_kind(struct nlattr **nla)
  1213. {
  1214. struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
  1215. struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
  1216. struct nlattr *kind;
  1217. tb1 = nla[TCA_ACT_TAB];
  1218. if (tb1 == NULL)
  1219. return NULL;
  1220. if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1),
  1221. NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0)
  1222. return NULL;
  1223. if (tb[1] == NULL)
  1224. return NULL;
  1225. if (nla_parse_nested(tb2, TCA_ACT_MAX, tb[1], NULL, NULL) < 0)
  1226. return NULL;
  1227. kind = tb2[TCA_ACT_KIND];
  1228. return kind;
  1229. }
  1230. static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
  1231. {
  1232. struct net *net = sock_net(skb->sk);
  1233. struct nlmsghdr *nlh;
  1234. unsigned char *b = skb_tail_pointer(skb);
  1235. struct nlattr *nest;
  1236. struct tc_action_ops *a_o;
  1237. int ret = 0;
  1238. struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
  1239. struct nlattr *tb[TCA_ROOT_MAX + 1];
  1240. struct nlattr *count_attr = NULL;
  1241. unsigned long jiffy_since = 0;
  1242. struct nlattr *kind = NULL;
  1243. struct nla_bitfield32 bf;
  1244. u32 msecs_since = 0;
  1245. u32 act_count = 0;
  1246. ret = nlmsg_parse(cb->nlh, sizeof(struct tcamsg), tb, TCA_ROOT_MAX,
  1247. tcaa_policy, cb->extack);
  1248. if (ret < 0)
  1249. return ret;
  1250. kind = find_dump_kind(tb);
  1251. if (kind == NULL) {
  1252. pr_info("tc_dump_action: action bad kind\n");
  1253. return 0;
  1254. }
  1255. a_o = tc_lookup_action(kind);
  1256. if (a_o == NULL)
  1257. return 0;
  1258. cb->args[2] = 0;
  1259. if (tb[TCA_ROOT_FLAGS]) {
  1260. bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]);
  1261. cb->args[2] = bf.value;
  1262. }
  1263. if (tb[TCA_ROOT_TIME_DELTA]) {
  1264. msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]);
  1265. }
  1266. nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
  1267. cb->nlh->nlmsg_type, sizeof(*t), 0);
  1268. if (!nlh)
  1269. goto out_module_put;
  1270. if (msecs_since)
  1271. jiffy_since = jiffies - msecs_to_jiffies(msecs_since);
  1272. t = nlmsg_data(nlh);
  1273. t->tca_family = AF_UNSPEC;
  1274. t->tca__pad1 = 0;
  1275. t->tca__pad2 = 0;
  1276. cb->args[3] = jiffy_since;
  1277. count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32));
  1278. if (!count_attr)
  1279. goto out_module_put;
  1280. nest = nla_nest_start(skb, TCA_ACT_TAB);
  1281. if (nest == NULL)
  1282. goto out_module_put;
  1283. ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o, NULL);
  1284. if (ret < 0)
  1285. goto out_module_put;
  1286. if (ret > 0) {
  1287. nla_nest_end(skb, nest);
  1288. ret = skb->len;
  1289. act_count = cb->args[1];
  1290. memcpy(nla_data(count_attr), &act_count, sizeof(u32));
  1291. cb->args[1] = 0;
  1292. } else
  1293. nlmsg_trim(skb, b);
  1294. nlh->nlmsg_len = skb_tail_pointer(skb) - b;
  1295. if (NETLINK_CB(cb->skb).portid && ret)
  1296. nlh->nlmsg_flags |= NLM_F_MULTI;
  1297. module_put(a_o->owner);
  1298. return skb->len;
  1299. out_module_put:
  1300. module_put(a_o->owner);
  1301. nlmsg_trim(skb, b);
  1302. return skb->len;
  1303. }
  1304. struct tcf_action_net {
  1305. struct rhashtable egdev_ht;
  1306. };
  1307. static unsigned int tcf_action_net_id;
  1308. struct tcf_action_egdev_cb {
  1309. struct list_head list;
  1310. tc_setup_cb_t *cb;
  1311. void *cb_priv;
  1312. };
  1313. struct tcf_action_egdev {
  1314. struct rhash_head ht_node;
  1315. const struct net_device *dev;
  1316. unsigned int refcnt;
  1317. struct list_head cb_list;
  1318. };
  1319. static const struct rhashtable_params tcf_action_egdev_ht_params = {
  1320. .key_offset = offsetof(struct tcf_action_egdev, dev),
  1321. .head_offset = offsetof(struct tcf_action_egdev, ht_node),
  1322. .key_len = sizeof(const struct net_device *),
  1323. };
  1324. static struct tcf_action_egdev *
  1325. tcf_action_egdev_lookup(const struct net_device *dev)
  1326. {
  1327. struct net *net = dev_net(dev);
  1328. struct tcf_action_net *tan = net_generic(net, tcf_action_net_id);
  1329. return rhashtable_lookup_fast(&tan->egdev_ht, &dev,
  1330. tcf_action_egdev_ht_params);
  1331. }
  1332. static struct tcf_action_egdev *
  1333. tcf_action_egdev_get(const struct net_device *dev)
  1334. {
  1335. struct tcf_action_egdev *egdev;
  1336. struct tcf_action_net *tan;
  1337. egdev = tcf_action_egdev_lookup(dev);
  1338. if (egdev)
  1339. goto inc_ref;
  1340. egdev = kzalloc(sizeof(*egdev), GFP_KERNEL);
  1341. if (!egdev)
  1342. return NULL;
  1343. INIT_LIST_HEAD(&egdev->cb_list);
  1344. egdev->dev = dev;
  1345. tan = net_generic(dev_net(dev), tcf_action_net_id);
  1346. rhashtable_insert_fast(&tan->egdev_ht, &egdev->ht_node,
  1347. tcf_action_egdev_ht_params);
  1348. inc_ref:
  1349. egdev->refcnt++;
  1350. return egdev;
  1351. }
  1352. static void tcf_action_egdev_put(struct tcf_action_egdev *egdev)
  1353. {
  1354. struct tcf_action_net *tan;
  1355. if (--egdev->refcnt)
  1356. return;
  1357. tan = net_generic(dev_net(egdev->dev), tcf_action_net_id);
  1358. rhashtable_remove_fast(&tan->egdev_ht, &egdev->ht_node,
  1359. tcf_action_egdev_ht_params);
  1360. kfree(egdev);
  1361. }
  1362. static struct tcf_action_egdev_cb *
  1363. tcf_action_egdev_cb_lookup(struct tcf_action_egdev *egdev,
  1364. tc_setup_cb_t *cb, void *cb_priv)
  1365. {
  1366. struct tcf_action_egdev_cb *egdev_cb;
  1367. list_for_each_entry(egdev_cb, &egdev->cb_list, list)
  1368. if (egdev_cb->cb == cb && egdev_cb->cb_priv == cb_priv)
  1369. return egdev_cb;
  1370. return NULL;
  1371. }
  1372. static int tcf_action_egdev_cb_call(struct tcf_action_egdev *egdev,
  1373. enum tc_setup_type type,
  1374. void *type_data, bool err_stop)
  1375. {
  1376. struct tcf_action_egdev_cb *egdev_cb;
  1377. int ok_count = 0;
  1378. int err;
  1379. list_for_each_entry(egdev_cb, &egdev->cb_list, list) {
  1380. err = egdev_cb->cb(type, type_data, egdev_cb->cb_priv);
  1381. if (err) {
  1382. if (err_stop)
  1383. return err;
  1384. } else {
  1385. ok_count++;
  1386. }
  1387. }
  1388. return ok_count;
  1389. }
  1390. static int tcf_action_egdev_cb_add(struct tcf_action_egdev *egdev,
  1391. tc_setup_cb_t *cb, void *cb_priv)
  1392. {
  1393. struct tcf_action_egdev_cb *egdev_cb;
  1394. egdev_cb = tcf_action_egdev_cb_lookup(egdev, cb, cb_priv);
  1395. if (WARN_ON(egdev_cb))
  1396. return -EEXIST;
  1397. egdev_cb = kzalloc(sizeof(*egdev_cb), GFP_KERNEL);
  1398. if (!egdev_cb)
  1399. return -ENOMEM;
  1400. egdev_cb->cb = cb;
  1401. egdev_cb->cb_priv = cb_priv;
  1402. list_add(&egdev_cb->list, &egdev->cb_list);
  1403. return 0;
  1404. }
  1405. static void tcf_action_egdev_cb_del(struct tcf_action_egdev *egdev,
  1406. tc_setup_cb_t *cb, void *cb_priv)
  1407. {
  1408. struct tcf_action_egdev_cb *egdev_cb;
  1409. egdev_cb = tcf_action_egdev_cb_lookup(egdev, cb, cb_priv);
  1410. if (WARN_ON(!egdev_cb))
  1411. return;
  1412. list_del(&egdev_cb->list);
  1413. kfree(egdev_cb);
  1414. }
  1415. static int __tc_setup_cb_egdev_register(const struct net_device *dev,
  1416. tc_setup_cb_t *cb, void *cb_priv)
  1417. {
  1418. struct tcf_action_egdev *egdev = tcf_action_egdev_get(dev);
  1419. int err;
  1420. if (!egdev)
  1421. return -ENOMEM;
  1422. err = tcf_action_egdev_cb_add(egdev, cb, cb_priv);
  1423. if (err)
  1424. goto err_cb_add;
  1425. return 0;
  1426. err_cb_add:
  1427. tcf_action_egdev_put(egdev);
  1428. return err;
  1429. }
  1430. int tc_setup_cb_egdev_register(const struct net_device *dev,
  1431. tc_setup_cb_t *cb, void *cb_priv)
  1432. {
  1433. int err;
  1434. rtnl_lock();
  1435. err = __tc_setup_cb_egdev_register(dev, cb, cb_priv);
  1436. rtnl_unlock();
  1437. return err;
  1438. }
  1439. EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_register);
  1440. static void __tc_setup_cb_egdev_unregister(const struct net_device *dev,
  1441. tc_setup_cb_t *cb, void *cb_priv)
  1442. {
  1443. struct tcf_action_egdev *egdev = tcf_action_egdev_lookup(dev);
  1444. if (WARN_ON(!egdev))
  1445. return;
  1446. tcf_action_egdev_cb_del(egdev, cb, cb_priv);
  1447. tcf_action_egdev_put(egdev);
  1448. }
  1449. void tc_setup_cb_egdev_unregister(const struct net_device *dev,
  1450. tc_setup_cb_t *cb, void *cb_priv)
  1451. {
  1452. rtnl_lock();
  1453. __tc_setup_cb_egdev_unregister(dev, cb, cb_priv);
  1454. rtnl_unlock();
  1455. }
  1456. EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_unregister);
  1457. int tc_setup_cb_egdev_call(const struct net_device *dev,
  1458. enum tc_setup_type type, void *type_data,
  1459. bool err_stop)
  1460. {
  1461. struct tcf_action_egdev *egdev = tcf_action_egdev_lookup(dev);
  1462. if (!egdev)
  1463. return 0;
  1464. return tcf_action_egdev_cb_call(egdev, type, type_data, err_stop);
  1465. }
  1466. EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_call);
  1467. static __net_init int tcf_action_net_init(struct net *net)
  1468. {
  1469. struct tcf_action_net *tan = net_generic(net, tcf_action_net_id);
  1470. return rhashtable_init(&tan->egdev_ht, &tcf_action_egdev_ht_params);
  1471. }
  1472. static void __net_exit tcf_action_net_exit(struct net *net)
  1473. {
  1474. struct tcf_action_net *tan = net_generic(net, tcf_action_net_id);
  1475. rhashtable_destroy(&tan->egdev_ht);
  1476. }
  1477. static struct pernet_operations tcf_action_net_ops = {
  1478. .init = tcf_action_net_init,
  1479. .exit = tcf_action_net_exit,
  1480. .id = &tcf_action_net_id,
  1481. .size = sizeof(struct tcf_action_net),
  1482. };
  1483. static int __init tc_action_init(void)
  1484. {
  1485. int err;
  1486. err = register_pernet_subsys(&tcf_action_net_ops);
  1487. if (err)
  1488. return err;
  1489. rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
  1490. rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
  1491. rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
  1492. 0);
  1493. return 0;
  1494. }
  1495. subsys_initcall(tc_action_init);