cls_api.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075
  1. /*
  2. * net/sched/cls_api.c Packet classifier API.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10. *
  11. * Changes:
  12. *
  13. * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
  14. *
  15. */
  16. #include <linux/module.h>
  17. #include <linux/types.h>
  18. #include <linux/kernel.h>
  19. #include <linux/string.h>
  20. #include <linux/errno.h>
  21. #include <linux/err.h>
  22. #include <linux/skbuff.h>
  23. #include <linux/init.h>
  24. #include <linux/kmod.h>
  25. #include <linux/err.h>
  26. #include <linux/slab.h>
  27. #include <net/net_namespace.h>
  28. #include <net/sock.h>
  29. #include <net/netlink.h>
  30. #include <net/pkt_sched.h>
  31. #include <net/pkt_cls.h>
  32. /* The list of all installed classifier types */
  33. static LIST_HEAD(tcf_proto_base);
  34. /* Protects list of registered TC modules. It is pure SMP lock. */
  35. static DEFINE_RWLOCK(cls_mod_lock);
  36. /* Find classifier type by string name */
  37. static const struct tcf_proto_ops *tcf_proto_lookup_ops(const char *kind)
  38. {
  39. const struct tcf_proto_ops *t, *res = NULL;
  40. if (kind) {
  41. read_lock(&cls_mod_lock);
  42. list_for_each_entry(t, &tcf_proto_base, head) {
  43. if (strcmp(kind, t->kind) == 0) {
  44. if (try_module_get(t->owner))
  45. res = t;
  46. break;
  47. }
  48. }
  49. read_unlock(&cls_mod_lock);
  50. }
  51. return res;
  52. }
  53. /* Register(unregister) new classifier type */
  54. int register_tcf_proto_ops(struct tcf_proto_ops *ops)
  55. {
  56. struct tcf_proto_ops *t;
  57. int rc = -EEXIST;
  58. write_lock(&cls_mod_lock);
  59. list_for_each_entry(t, &tcf_proto_base, head)
  60. if (!strcmp(ops->kind, t->kind))
  61. goto out;
  62. list_add_tail(&ops->head, &tcf_proto_base);
  63. rc = 0;
  64. out:
  65. write_unlock(&cls_mod_lock);
  66. return rc;
  67. }
  68. EXPORT_SYMBOL(register_tcf_proto_ops);
  69. static struct workqueue_struct *tc_filter_wq;
  70. int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
  71. {
  72. struct tcf_proto_ops *t;
  73. int rc = -ENOENT;
  74. /* Wait for outstanding call_rcu()s, if any, from a
  75. * tcf_proto_ops's destroy() handler.
  76. */
  77. rcu_barrier();
  78. flush_workqueue(tc_filter_wq);
  79. write_lock(&cls_mod_lock);
  80. list_for_each_entry(t, &tcf_proto_base, head) {
  81. if (t == ops) {
  82. list_del(&t->head);
  83. rc = 0;
  84. break;
  85. }
  86. }
  87. write_unlock(&cls_mod_lock);
  88. return rc;
  89. }
  90. EXPORT_SYMBOL(unregister_tcf_proto_ops);
  91. bool tcf_queue_work(struct work_struct *work)
  92. {
  93. return queue_work(tc_filter_wq, work);
  94. }
  95. EXPORT_SYMBOL(tcf_queue_work);
  96. /* Select new prio value from the range, managed by kernel. */
  97. static inline u32 tcf_auto_prio(struct tcf_proto *tp)
  98. {
  99. u32 first = TC_H_MAKE(0xC0000000U, 0U);
  100. if (tp)
  101. first = tp->prio - 1;
  102. return TC_H_MAJ(first);
  103. }
  104. static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
  105. u32 prio, u32 parent, struct Qdisc *q,
  106. struct tcf_chain *chain)
  107. {
  108. struct tcf_proto *tp;
  109. int err;
  110. tp = kzalloc(sizeof(*tp), GFP_KERNEL);
  111. if (!tp)
  112. return ERR_PTR(-ENOBUFS);
  113. err = -ENOENT;
  114. tp->ops = tcf_proto_lookup_ops(kind);
  115. if (!tp->ops) {
  116. #ifdef CONFIG_MODULES
  117. rtnl_unlock();
  118. request_module("cls_%s", kind);
  119. rtnl_lock();
  120. tp->ops = tcf_proto_lookup_ops(kind);
  121. /* We dropped the RTNL semaphore in order to perform
  122. * the module load. So, even if we succeeded in loading
  123. * the module we have to replay the request. We indicate
  124. * this using -EAGAIN.
  125. */
  126. if (tp->ops) {
  127. module_put(tp->ops->owner);
  128. err = -EAGAIN;
  129. } else {
  130. err = -ENOENT;
  131. }
  132. goto errout;
  133. #endif
  134. }
  135. tp->classify = tp->ops->classify;
  136. tp->protocol = protocol;
  137. tp->prio = prio;
  138. tp->classid = parent;
  139. tp->q = q;
  140. tp->chain = chain;
  141. err = tp->ops->init(tp);
  142. if (err) {
  143. module_put(tp->ops->owner);
  144. goto errout;
  145. }
  146. return tp;
  147. errout:
  148. kfree(tp);
  149. return ERR_PTR(err);
  150. }
  151. static void tcf_proto_destroy(struct tcf_proto *tp)
  152. {
  153. tp->ops->destroy(tp);
  154. module_put(tp->ops->owner);
  155. kfree_rcu(tp, rcu);
  156. }
  157. static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
  158. u32 chain_index)
  159. {
  160. struct tcf_chain *chain;
  161. chain = kzalloc(sizeof(*chain), GFP_KERNEL);
  162. if (!chain)
  163. return NULL;
  164. list_add_tail(&chain->list, &block->chain_list);
  165. chain->block = block;
  166. chain->index = chain_index;
  167. chain->refcnt = 1;
  168. return chain;
  169. }
  170. static void tcf_chain_flush(struct tcf_chain *chain)
  171. {
  172. struct tcf_proto *tp;
  173. if (chain->p_filter_chain)
  174. RCU_INIT_POINTER(*chain->p_filter_chain, NULL);
  175. while ((tp = rtnl_dereference(chain->filter_chain)) != NULL) {
  176. RCU_INIT_POINTER(chain->filter_chain, tp->next);
  177. tcf_chain_put(chain);
  178. tcf_proto_destroy(tp);
  179. }
  180. }
  181. static void tcf_chain_destroy(struct tcf_chain *chain)
  182. {
  183. list_del(&chain->list);
  184. kfree(chain);
  185. }
  186. static void tcf_chain_hold(struct tcf_chain *chain)
  187. {
  188. ++chain->refcnt;
  189. }
  190. struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
  191. bool create)
  192. {
  193. struct tcf_chain *chain;
  194. list_for_each_entry(chain, &block->chain_list, list) {
  195. if (chain->index == chain_index) {
  196. tcf_chain_hold(chain);
  197. return chain;
  198. }
  199. }
  200. return create ? tcf_chain_create(block, chain_index) : NULL;
  201. }
  202. EXPORT_SYMBOL(tcf_chain_get);
  203. void tcf_chain_put(struct tcf_chain *chain)
  204. {
  205. if (--chain->refcnt == 0)
  206. tcf_chain_destroy(chain);
  207. }
  208. EXPORT_SYMBOL(tcf_chain_put);
  209. static void
  210. tcf_chain_filter_chain_ptr_set(struct tcf_chain *chain,
  211. struct tcf_proto __rcu **p_filter_chain)
  212. {
  213. chain->p_filter_chain = p_filter_chain;
  214. }
  215. int tcf_block_get(struct tcf_block **p_block,
  216. struct tcf_proto __rcu **p_filter_chain)
  217. {
  218. struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
  219. struct tcf_chain *chain;
  220. int err;
  221. if (!block)
  222. return -ENOMEM;
  223. INIT_LIST_HEAD(&block->chain_list);
  224. /* Create chain 0 by default, it has to be always present. */
  225. chain = tcf_chain_create(block, 0);
  226. if (!chain) {
  227. err = -ENOMEM;
  228. goto err_chain_create;
  229. }
  230. tcf_chain_filter_chain_ptr_set(chain, p_filter_chain);
  231. *p_block = block;
  232. return 0;
  233. err_chain_create:
  234. kfree(block);
  235. return err;
  236. }
  237. EXPORT_SYMBOL(tcf_block_get);
  238. static void tcf_block_put_final(struct work_struct *work)
  239. {
  240. struct tcf_block *block = container_of(work, struct tcf_block, work);
  241. struct tcf_chain *chain, *tmp;
  242. /* At this point, all the chains should have refcnt == 1. */
  243. rtnl_lock();
  244. list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
  245. tcf_chain_put(chain);
  246. rtnl_unlock();
  247. kfree(block);
  248. }
  249. /* XXX: Standalone actions are not allowed to jump to any chain, and bound
  250. * actions should be all removed after flushing. However, filters are destroyed
  251. * in RCU callbacks, we have to hold the chains first, otherwise we would
  252. * always race with RCU callbacks on this list without proper locking.
  253. */
  254. static void tcf_block_put_deferred(struct work_struct *work)
  255. {
  256. struct tcf_block *block = container_of(work, struct tcf_block, work);
  257. struct tcf_chain *chain;
  258. rtnl_lock();
  259. /* Hold a refcnt for all chains, except 0, in case they are gone. */
  260. list_for_each_entry(chain, &block->chain_list, list)
  261. if (chain->index)
  262. tcf_chain_hold(chain);
  263. /* No race on the list, because no chain could be destroyed. */
  264. list_for_each_entry(chain, &block->chain_list, list)
  265. tcf_chain_flush(chain);
  266. INIT_WORK(&block->work, tcf_block_put_final);
  267. /* Wait for RCU callbacks to release the reference count and make
  268. * sure their works have been queued before this.
  269. */
  270. rcu_barrier();
  271. tcf_queue_work(&block->work);
  272. rtnl_unlock();
  273. }
  274. void tcf_block_put(struct tcf_block *block)
  275. {
  276. if (!block)
  277. return;
  278. INIT_WORK(&block->work, tcf_block_put_deferred);
  279. /* Wait for existing RCU callbacks to cool down, make sure their works
  280. * have been queued before this. We can not flush pending works here
  281. * because we are holding the RTNL lock.
  282. */
  283. rcu_barrier();
  284. tcf_queue_work(&block->work);
  285. }
  286. EXPORT_SYMBOL(tcf_block_put);
  287. /* Main classifier routine: scans classifier chain attached
  288. * to this qdisc, (optionally) tests for protocol and asks
  289. * specific classifiers.
  290. */
  291. int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  292. struct tcf_result *res, bool compat_mode)
  293. {
  294. __be16 protocol = tc_skb_protocol(skb);
  295. #ifdef CONFIG_NET_CLS_ACT
  296. const int max_reclassify_loop = 4;
  297. const struct tcf_proto *orig_tp = tp;
  298. const struct tcf_proto *first_tp;
  299. int limit = 0;
  300. reclassify:
  301. #endif
  302. for (; tp; tp = rcu_dereference_bh(tp->next)) {
  303. int err;
  304. if (tp->protocol != protocol &&
  305. tp->protocol != htons(ETH_P_ALL))
  306. continue;
  307. err = tp->classify(skb, tp, res);
  308. #ifdef CONFIG_NET_CLS_ACT
  309. if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
  310. first_tp = orig_tp;
  311. goto reset;
  312. } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
  313. first_tp = res->goto_tp;
  314. goto reset;
  315. }
  316. #endif
  317. if (err >= 0)
  318. return err;
  319. }
  320. return TC_ACT_UNSPEC; /* signal: continue lookup */
  321. #ifdef CONFIG_NET_CLS_ACT
  322. reset:
  323. if (unlikely(limit++ >= max_reclassify_loop)) {
  324. net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
  325. tp->q->ops->id, tp->prio & 0xffff,
  326. ntohs(tp->protocol));
  327. return TC_ACT_SHOT;
  328. }
  329. tp = first_tp;
  330. protocol = tc_skb_protocol(skb);
  331. goto reclassify;
  332. #endif
  333. }
  334. EXPORT_SYMBOL(tcf_classify);
  335. struct tcf_chain_info {
  336. struct tcf_proto __rcu **pprev;
  337. struct tcf_proto __rcu *next;
  338. };
  339. static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info)
  340. {
  341. return rtnl_dereference(*chain_info->pprev);
  342. }
  343. static void tcf_chain_tp_insert(struct tcf_chain *chain,
  344. struct tcf_chain_info *chain_info,
  345. struct tcf_proto *tp)
  346. {
  347. if (chain->p_filter_chain &&
  348. *chain_info->pprev == chain->filter_chain)
  349. rcu_assign_pointer(*chain->p_filter_chain, tp);
  350. RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info));
  351. rcu_assign_pointer(*chain_info->pprev, tp);
  352. tcf_chain_hold(chain);
  353. }
  354. static void tcf_chain_tp_remove(struct tcf_chain *chain,
  355. struct tcf_chain_info *chain_info,
  356. struct tcf_proto *tp)
  357. {
  358. struct tcf_proto *next = rtnl_dereference(chain_info->next);
  359. if (chain->p_filter_chain && tp == chain->filter_chain)
  360. RCU_INIT_POINTER(*chain->p_filter_chain, next);
  361. RCU_INIT_POINTER(*chain_info->pprev, next);
  362. tcf_chain_put(chain);
  363. }
  364. static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
  365. struct tcf_chain_info *chain_info,
  366. u32 protocol, u32 prio,
  367. bool prio_allocate)
  368. {
  369. struct tcf_proto **pprev;
  370. struct tcf_proto *tp;
  371. /* Check the chain for existence of proto-tcf with this priority */
  372. for (pprev = &chain->filter_chain;
  373. (tp = rtnl_dereference(*pprev)); pprev = &tp->next) {
  374. if (tp->prio >= prio) {
  375. if (tp->prio == prio) {
  376. if (prio_allocate ||
  377. (tp->protocol != protocol && protocol))
  378. return ERR_PTR(-EINVAL);
  379. } else {
  380. tp = NULL;
  381. }
  382. break;
  383. }
  384. }
  385. chain_info->pprev = pprev;
  386. chain_info->next = tp ? tp->next : NULL;
  387. return tp;
  388. }
  389. static int tcf_fill_node(struct net *net, struct sk_buff *skb,
  390. struct tcf_proto *tp, void *fh, u32 portid,
  391. u32 seq, u16 flags, int event)
  392. {
  393. struct tcmsg *tcm;
  394. struct nlmsghdr *nlh;
  395. unsigned char *b = skb_tail_pointer(skb);
  396. nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
  397. if (!nlh)
  398. goto out_nlmsg_trim;
  399. tcm = nlmsg_data(nlh);
  400. tcm->tcm_family = AF_UNSPEC;
  401. tcm->tcm__pad1 = 0;
  402. tcm->tcm__pad2 = 0;
  403. tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex;
  404. tcm->tcm_parent = tp->classid;
  405. tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
  406. if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
  407. goto nla_put_failure;
  408. if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
  409. goto nla_put_failure;
  410. if (!fh) {
  411. tcm->tcm_handle = 0;
  412. } else {
  413. if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0)
  414. goto nla_put_failure;
  415. }
  416. nlh->nlmsg_len = skb_tail_pointer(skb) - b;
  417. return skb->len;
  418. out_nlmsg_trim:
  419. nla_put_failure:
  420. nlmsg_trim(skb, b);
  421. return -1;
  422. }
  423. static int tfilter_notify(struct net *net, struct sk_buff *oskb,
  424. struct nlmsghdr *n, struct tcf_proto *tp,
  425. void *fh, int event, bool unicast)
  426. {
  427. struct sk_buff *skb;
  428. u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
  429. skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
  430. if (!skb)
  431. return -ENOBUFS;
  432. if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq,
  433. n->nlmsg_flags, event) <= 0) {
  434. kfree_skb(skb);
  435. return -EINVAL;
  436. }
  437. if (unicast)
  438. return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
  439. return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
  440. n->nlmsg_flags & NLM_F_ECHO);
  441. }
  442. static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
  443. struct nlmsghdr *n, struct tcf_proto *tp,
  444. void *fh, bool unicast, bool *last)
  445. {
  446. struct sk_buff *skb;
  447. u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
  448. int err;
  449. skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
  450. if (!skb)
  451. return -ENOBUFS;
  452. if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq,
  453. n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
  454. kfree_skb(skb);
  455. return -EINVAL;
  456. }
  457. err = tp->ops->delete(tp, fh, last);
  458. if (err) {
  459. kfree_skb(skb);
  460. return err;
  461. }
  462. if (unicast)
  463. return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
  464. return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
  465. n->nlmsg_flags & NLM_F_ECHO);
  466. }
  467. static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
  468. struct nlmsghdr *n,
  469. struct tcf_chain *chain, int event)
  470. {
  471. struct tcf_proto *tp;
  472. for (tp = rtnl_dereference(chain->filter_chain);
  473. tp; tp = rtnl_dereference(tp->next))
  474. tfilter_notify(net, oskb, n, tp, 0, event, false);
  475. }
  476. /* Add/change/delete/get a filter node */
  477. static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
  478. struct netlink_ext_ack *extack)
  479. {
  480. struct net *net = sock_net(skb->sk);
  481. struct nlattr *tca[TCA_MAX + 1];
  482. struct tcmsg *t;
  483. u32 protocol;
  484. u32 prio;
  485. bool prio_allocate;
  486. u32 parent;
  487. u32 chain_index;
  488. struct net_device *dev;
  489. struct Qdisc *q;
  490. struct tcf_chain_info chain_info;
  491. struct tcf_chain *chain = NULL;
  492. struct tcf_block *block;
  493. struct tcf_proto *tp;
  494. const struct Qdisc_class_ops *cops;
  495. unsigned long cl;
  496. void *fh;
  497. int err;
  498. int tp_created;
  499. if ((n->nlmsg_type != RTM_GETTFILTER) &&
  500. !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
  501. return -EPERM;
  502. replay:
  503. tp_created = 0;
  504. err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
  505. if (err < 0)
  506. return err;
  507. t = nlmsg_data(n);
  508. protocol = TC_H_MIN(t->tcm_info);
  509. prio = TC_H_MAJ(t->tcm_info);
  510. prio_allocate = false;
  511. parent = t->tcm_parent;
  512. cl = 0;
  513. if (prio == 0) {
  514. switch (n->nlmsg_type) {
  515. case RTM_DELTFILTER:
  516. if (protocol || t->tcm_handle || tca[TCA_KIND])
  517. return -ENOENT;
  518. break;
  519. case RTM_NEWTFILTER:
  520. /* If no priority is provided by the user,
  521. * we allocate one.
  522. */
  523. if (n->nlmsg_flags & NLM_F_CREATE) {
  524. prio = TC_H_MAKE(0x80000000U, 0U);
  525. prio_allocate = true;
  526. break;
  527. }
  528. /* fall-through */
  529. default:
  530. return -ENOENT;
  531. }
  532. }
  533. /* Find head of filter chain. */
  534. /* Find link */
  535. dev = __dev_get_by_index(net, t->tcm_ifindex);
  536. if (dev == NULL)
  537. return -ENODEV;
  538. /* Find qdisc */
  539. if (!parent) {
  540. q = dev->qdisc;
  541. parent = q->handle;
  542. } else {
  543. q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
  544. if (q == NULL)
  545. return -EINVAL;
  546. }
  547. /* Is it classful? */
  548. cops = q->ops->cl_ops;
  549. if (!cops)
  550. return -EINVAL;
  551. if (!cops->tcf_block)
  552. return -EOPNOTSUPP;
  553. /* Do we search for filter, attached to class? */
  554. if (TC_H_MIN(parent)) {
  555. cl = cops->find(q, parent);
  556. if (cl == 0)
  557. return -ENOENT;
  558. }
  559. /* And the last stroke */
  560. block = cops->tcf_block(q, cl);
  561. if (!block) {
  562. err = -EINVAL;
  563. goto errout;
  564. }
  565. chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
  566. if (chain_index > TC_ACT_EXT_VAL_MASK) {
  567. err = -EINVAL;
  568. goto errout;
  569. }
  570. chain = tcf_chain_get(block, chain_index,
  571. n->nlmsg_type == RTM_NEWTFILTER);
  572. if (!chain) {
  573. err = n->nlmsg_type == RTM_NEWTFILTER ? -ENOMEM : -EINVAL;
  574. goto errout;
  575. }
  576. if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) {
  577. tfilter_notify_chain(net, skb, n, chain, RTM_DELTFILTER);
  578. tcf_chain_flush(chain);
  579. err = 0;
  580. goto errout;
  581. }
  582. tp = tcf_chain_tp_find(chain, &chain_info, protocol,
  583. prio, prio_allocate);
  584. if (IS_ERR(tp)) {
  585. err = PTR_ERR(tp);
  586. goto errout;
  587. }
  588. if (tp == NULL) {
  589. /* Proto-tcf does not exist, create new one */
  590. if (tca[TCA_KIND] == NULL || !protocol) {
  591. err = -EINVAL;
  592. goto errout;
  593. }
  594. if (n->nlmsg_type != RTM_NEWTFILTER ||
  595. !(n->nlmsg_flags & NLM_F_CREATE)) {
  596. err = -ENOENT;
  597. goto errout;
  598. }
  599. if (prio_allocate)
  600. prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info));
  601. tp = tcf_proto_create(nla_data(tca[TCA_KIND]),
  602. protocol, prio, parent, q, chain);
  603. if (IS_ERR(tp)) {
  604. err = PTR_ERR(tp);
  605. goto errout;
  606. }
  607. tp_created = 1;
  608. } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
  609. err = -EINVAL;
  610. goto errout;
  611. }
  612. fh = tp->ops->get(tp, t->tcm_handle);
  613. if (!fh) {
  614. if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
  615. tcf_chain_tp_remove(chain, &chain_info, tp);
  616. tfilter_notify(net, skb, n, tp, fh,
  617. RTM_DELTFILTER, false);
  618. tcf_proto_destroy(tp);
  619. err = 0;
  620. goto errout;
  621. }
  622. if (n->nlmsg_type != RTM_NEWTFILTER ||
  623. !(n->nlmsg_flags & NLM_F_CREATE)) {
  624. err = -ENOENT;
  625. goto errout;
  626. }
  627. } else {
  628. bool last;
  629. switch (n->nlmsg_type) {
  630. case RTM_NEWTFILTER:
  631. if (n->nlmsg_flags & NLM_F_EXCL) {
  632. if (tp_created)
  633. tcf_proto_destroy(tp);
  634. err = -EEXIST;
  635. goto errout;
  636. }
  637. break;
  638. case RTM_DELTFILTER:
  639. err = tfilter_del_notify(net, skb, n, tp, fh, false,
  640. &last);
  641. if (err)
  642. goto errout;
  643. if (last) {
  644. tcf_chain_tp_remove(chain, &chain_info, tp);
  645. tcf_proto_destroy(tp);
  646. }
  647. goto errout;
  648. case RTM_GETTFILTER:
  649. err = tfilter_notify(net, skb, n, tp, fh,
  650. RTM_NEWTFILTER, true);
  651. goto errout;
  652. default:
  653. err = -EINVAL;
  654. goto errout;
  655. }
  656. }
  657. err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
  658. n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE);
  659. if (err == 0) {
  660. if (tp_created)
  661. tcf_chain_tp_insert(chain, &chain_info, tp);
  662. tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER, false);
  663. } else {
  664. if (tp_created)
  665. tcf_proto_destroy(tp);
  666. }
  667. errout:
  668. if (chain)
  669. tcf_chain_put(chain);
  670. if (err == -EAGAIN)
  671. /* Replay the request. */
  672. goto replay;
  673. return err;
  674. }
  675. struct tcf_dump_args {
  676. struct tcf_walker w;
  677. struct sk_buff *skb;
  678. struct netlink_callback *cb;
  679. };
  680. static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
  681. {
  682. struct tcf_dump_args *a = (void *)arg;
  683. struct net *net = sock_net(a->skb->sk);
  684. return tcf_fill_node(net, a->skb, tp, n, NETLINK_CB(a->cb->skb).portid,
  685. a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
  686. RTM_NEWTFILTER);
  687. }
  688. static bool tcf_chain_dump(struct tcf_chain *chain, struct sk_buff *skb,
  689. struct netlink_callback *cb,
  690. long index_start, long *p_index)
  691. {
  692. struct net *net = sock_net(skb->sk);
  693. struct tcmsg *tcm = nlmsg_data(cb->nlh);
  694. struct tcf_dump_args arg;
  695. struct tcf_proto *tp;
  696. for (tp = rtnl_dereference(chain->filter_chain);
  697. tp; tp = rtnl_dereference(tp->next), (*p_index)++) {
  698. if (*p_index < index_start)
  699. continue;
  700. if (TC_H_MAJ(tcm->tcm_info) &&
  701. TC_H_MAJ(tcm->tcm_info) != tp->prio)
  702. continue;
  703. if (TC_H_MIN(tcm->tcm_info) &&
  704. TC_H_MIN(tcm->tcm_info) != tp->protocol)
  705. continue;
  706. if (*p_index > index_start)
  707. memset(&cb->args[1], 0,
  708. sizeof(cb->args) - sizeof(cb->args[0]));
  709. if (cb->args[1] == 0) {
  710. if (tcf_fill_node(net, skb, tp, 0,
  711. NETLINK_CB(cb->skb).portid,
  712. cb->nlh->nlmsg_seq, NLM_F_MULTI,
  713. RTM_NEWTFILTER) <= 0)
  714. return false;
  715. cb->args[1] = 1;
  716. }
  717. if (!tp->ops->walk)
  718. continue;
  719. arg.w.fn = tcf_node_dump;
  720. arg.skb = skb;
  721. arg.cb = cb;
  722. arg.w.stop = 0;
  723. arg.w.skip = cb->args[1] - 1;
  724. arg.w.count = 0;
  725. tp->ops->walk(tp, &arg.w);
  726. cb->args[1] = arg.w.count + 1;
  727. if (arg.w.stop)
  728. return false;
  729. }
  730. return true;
  731. }
  732. /* called with RTNL */
  733. static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
  734. {
  735. struct net *net = sock_net(skb->sk);
  736. struct nlattr *tca[TCA_MAX + 1];
  737. struct net_device *dev;
  738. struct Qdisc *q;
  739. struct tcf_block *block;
  740. struct tcf_chain *chain;
  741. struct tcmsg *tcm = nlmsg_data(cb->nlh);
  742. unsigned long cl = 0;
  743. const struct Qdisc_class_ops *cops;
  744. long index_start;
  745. long index;
  746. int err;
  747. if (nlmsg_len(cb->nlh) < sizeof(*tcm))
  748. return skb->len;
  749. err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
  750. if (err)
  751. return err;
  752. dev = __dev_get_by_index(net, tcm->tcm_ifindex);
  753. if (!dev)
  754. return skb->len;
  755. if (!tcm->tcm_parent)
  756. q = dev->qdisc;
  757. else
  758. q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
  759. if (!q)
  760. goto out;
  761. cops = q->ops->cl_ops;
  762. if (!cops)
  763. goto out;
  764. if (!cops->tcf_block)
  765. goto out;
  766. if (TC_H_MIN(tcm->tcm_parent)) {
  767. cl = cops->find(q, tcm->tcm_parent);
  768. if (cl == 0)
  769. goto out;
  770. }
  771. block = cops->tcf_block(q, cl);
  772. if (!block)
  773. goto out;
  774. index_start = cb->args[0];
  775. index = 0;
  776. list_for_each_entry(chain, &block->chain_list, list) {
  777. if (tca[TCA_CHAIN] &&
  778. nla_get_u32(tca[TCA_CHAIN]) != chain->index)
  779. continue;
  780. if (!tcf_chain_dump(chain, skb, cb, index_start, &index))
  781. break;
  782. }
  783. cb->args[0] = index;
  784. out:
  785. return skb->len;
  786. }
  787. void tcf_exts_destroy(struct tcf_exts *exts)
  788. {
  789. #ifdef CONFIG_NET_CLS_ACT
  790. LIST_HEAD(actions);
  791. tcf_exts_to_list(exts, &actions);
  792. tcf_action_destroy(&actions, TCA_ACT_UNBIND);
  793. kfree(exts->actions);
  794. exts->nr_actions = 0;
  795. #endif
  796. }
  797. EXPORT_SYMBOL(tcf_exts_destroy);
  798. int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
  799. struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr)
  800. {
  801. #ifdef CONFIG_NET_CLS_ACT
  802. {
  803. struct tc_action *act;
  804. if (exts->police && tb[exts->police]) {
  805. act = tcf_action_init_1(net, tp, tb[exts->police],
  806. rate_tlv, "police", ovr,
  807. TCA_ACT_BIND);
  808. if (IS_ERR(act))
  809. return PTR_ERR(act);
  810. act->type = exts->type = TCA_OLD_COMPAT;
  811. exts->actions[0] = act;
  812. exts->nr_actions = 1;
  813. } else if (exts->action && tb[exts->action]) {
  814. LIST_HEAD(actions);
  815. int err, i = 0;
  816. err = tcf_action_init(net, tp, tb[exts->action],
  817. rate_tlv, NULL, ovr, TCA_ACT_BIND,
  818. &actions);
  819. if (err)
  820. return err;
  821. list_for_each_entry(act, &actions, list)
  822. exts->actions[i++] = act;
  823. exts->nr_actions = i;
  824. }
  825. }
  826. #else
  827. if ((exts->action && tb[exts->action]) ||
  828. (exts->police && tb[exts->police]))
  829. return -EOPNOTSUPP;
  830. #endif
  831. return 0;
  832. }
  833. EXPORT_SYMBOL(tcf_exts_validate);
  834. void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
  835. {
  836. #ifdef CONFIG_NET_CLS_ACT
  837. struct tcf_exts old = *dst;
  838. *dst = *src;
  839. tcf_exts_destroy(&old);
  840. #endif
  841. }
  842. EXPORT_SYMBOL(tcf_exts_change);
  843. #ifdef CONFIG_NET_CLS_ACT
  844. static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
  845. {
  846. if (exts->nr_actions == 0)
  847. return NULL;
  848. else
  849. return exts->actions[0];
  850. }
  851. #endif
  852. int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
  853. {
  854. #ifdef CONFIG_NET_CLS_ACT
  855. struct nlattr *nest;
  856. if (exts->action && tcf_exts_has_actions(exts)) {
  857. /*
  858. * again for backward compatible mode - we want
  859. * to work with both old and new modes of entering
  860. * tc data even if iproute2 was newer - jhs
  861. */
  862. if (exts->type != TCA_OLD_COMPAT) {
  863. LIST_HEAD(actions);
  864. nest = nla_nest_start(skb, exts->action);
  865. if (nest == NULL)
  866. goto nla_put_failure;
  867. tcf_exts_to_list(exts, &actions);
  868. if (tcf_action_dump(skb, &actions, 0, 0) < 0)
  869. goto nla_put_failure;
  870. nla_nest_end(skb, nest);
  871. } else if (exts->police) {
  872. struct tc_action *act = tcf_exts_first_act(exts);
  873. nest = nla_nest_start(skb, exts->police);
  874. if (nest == NULL || !act)
  875. goto nla_put_failure;
  876. if (tcf_action_dump_old(skb, act, 0, 0) < 0)
  877. goto nla_put_failure;
  878. nla_nest_end(skb, nest);
  879. }
  880. }
  881. return 0;
  882. nla_put_failure:
  883. nla_nest_cancel(skb, nest);
  884. return -1;
  885. #else
  886. return 0;
  887. #endif
  888. }
  889. EXPORT_SYMBOL(tcf_exts_dump);
  890. int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
  891. {
  892. #ifdef CONFIG_NET_CLS_ACT
  893. struct tc_action *a = tcf_exts_first_act(exts);
  894. if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
  895. return -1;
  896. #endif
  897. return 0;
  898. }
  899. EXPORT_SYMBOL(tcf_exts_dump_stats);
  900. int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts,
  901. struct net_device **hw_dev)
  902. {
  903. #ifdef CONFIG_NET_CLS_ACT
  904. const struct tc_action *a;
  905. LIST_HEAD(actions);
  906. if (!tcf_exts_has_actions(exts))
  907. return -EINVAL;
  908. tcf_exts_to_list(exts, &actions);
  909. list_for_each_entry(a, &actions, list) {
  910. if (a->ops->get_dev) {
  911. a->ops->get_dev(a, dev_net(dev), hw_dev);
  912. break;
  913. }
  914. }
  915. if (*hw_dev)
  916. return 0;
  917. #endif
  918. return -EOPNOTSUPP;
  919. }
  920. EXPORT_SYMBOL(tcf_exts_get_dev);
  921. static int __init tc_filter_init(void)
  922. {
  923. tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
  924. if (!tc_filter_wq)
  925. return -ENOMEM;
  926. rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0);
  927. rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0);
  928. rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
  929. tc_dump_tfilter, 0);
  930. return 0;
  931. }
  932. subsys_initcall(tc_filter_init);