cls_api.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266
  1. /*
  2. * net/sched/cls_api.c Packet classifier API.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10. *
  11. * Changes:
  12. *
  13. * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
  14. *
  15. */
  16. #include <linux/module.h>
  17. #include <linux/types.h>
  18. #include <linux/kernel.h>
  19. #include <linux/string.h>
  20. #include <linux/errno.h>
  21. #include <linux/err.h>
  22. #include <linux/skbuff.h>
  23. #include <linux/init.h>
  24. #include <linux/kmod.h>
  25. #include <linux/err.h>
  26. #include <linux/slab.h>
  27. #include <net/net_namespace.h>
  28. #include <net/sock.h>
  29. #include <net/netlink.h>
  30. #include <net/pkt_sched.h>
  31. #include <net/pkt_cls.h>
  32. /* The list of all installed classifier types */
  33. static LIST_HEAD(tcf_proto_base);
  34. /* Protects list of registered TC modules. It is pure SMP lock. */
  35. static DEFINE_RWLOCK(cls_mod_lock);
  36. /* Find classifier type by string name */
  37. static const struct tcf_proto_ops *tcf_proto_lookup_ops(const char *kind)
  38. {
  39. const struct tcf_proto_ops *t, *res = NULL;
  40. if (kind) {
  41. read_lock(&cls_mod_lock);
  42. list_for_each_entry(t, &tcf_proto_base, head) {
  43. if (strcmp(kind, t->kind) == 0) {
  44. if (try_module_get(t->owner))
  45. res = t;
  46. break;
  47. }
  48. }
  49. read_unlock(&cls_mod_lock);
  50. }
  51. return res;
  52. }
  53. /* Register(unregister) new classifier type */
  54. int register_tcf_proto_ops(struct tcf_proto_ops *ops)
  55. {
  56. struct tcf_proto_ops *t;
  57. int rc = -EEXIST;
  58. write_lock(&cls_mod_lock);
  59. list_for_each_entry(t, &tcf_proto_base, head)
  60. if (!strcmp(ops->kind, t->kind))
  61. goto out;
  62. list_add_tail(&ops->head, &tcf_proto_base);
  63. rc = 0;
  64. out:
  65. write_unlock(&cls_mod_lock);
  66. return rc;
  67. }
  68. EXPORT_SYMBOL(register_tcf_proto_ops);
  69. static struct workqueue_struct *tc_filter_wq;
  70. int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
  71. {
  72. struct tcf_proto_ops *t;
  73. int rc = -ENOENT;
  74. /* Wait for outstanding call_rcu()s, if any, from a
  75. * tcf_proto_ops's destroy() handler.
  76. */
  77. rcu_barrier();
  78. flush_workqueue(tc_filter_wq);
  79. write_lock(&cls_mod_lock);
  80. list_for_each_entry(t, &tcf_proto_base, head) {
  81. if (t == ops) {
  82. list_del(&t->head);
  83. rc = 0;
  84. break;
  85. }
  86. }
  87. write_unlock(&cls_mod_lock);
  88. return rc;
  89. }
  90. EXPORT_SYMBOL(unregister_tcf_proto_ops);
  91. bool tcf_queue_work(struct work_struct *work)
  92. {
  93. return queue_work(tc_filter_wq, work);
  94. }
  95. EXPORT_SYMBOL(tcf_queue_work);
  96. /* Select new prio value from the range, managed by kernel. */
  97. static inline u32 tcf_auto_prio(struct tcf_proto *tp)
  98. {
  99. u32 first = TC_H_MAKE(0xC0000000U, 0U);
  100. if (tp)
  101. first = tp->prio - 1;
  102. return TC_H_MAJ(first);
  103. }
  104. static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
  105. u32 prio, u32 parent, struct Qdisc *q,
  106. struct tcf_chain *chain)
  107. {
  108. struct tcf_proto *tp;
  109. int err;
  110. tp = kzalloc(sizeof(*tp), GFP_KERNEL);
  111. if (!tp)
  112. return ERR_PTR(-ENOBUFS);
  113. err = -ENOENT;
  114. tp->ops = tcf_proto_lookup_ops(kind);
  115. if (!tp->ops) {
  116. #ifdef CONFIG_MODULES
  117. rtnl_unlock();
  118. request_module("cls_%s", kind);
  119. rtnl_lock();
  120. tp->ops = tcf_proto_lookup_ops(kind);
  121. /* We dropped the RTNL semaphore in order to perform
  122. * the module load. So, even if we succeeded in loading
  123. * the module we have to replay the request. We indicate
  124. * this using -EAGAIN.
  125. */
  126. if (tp->ops) {
  127. module_put(tp->ops->owner);
  128. err = -EAGAIN;
  129. } else {
  130. err = -ENOENT;
  131. }
  132. goto errout;
  133. #endif
  134. }
  135. tp->classify = tp->ops->classify;
  136. tp->protocol = protocol;
  137. tp->prio = prio;
  138. tp->classid = parent;
  139. tp->q = q;
  140. tp->chain = chain;
  141. err = tp->ops->init(tp);
  142. if (err) {
  143. module_put(tp->ops->owner);
  144. goto errout;
  145. }
  146. return tp;
  147. errout:
  148. kfree(tp);
  149. return ERR_PTR(err);
  150. }
  151. static void tcf_proto_destroy(struct tcf_proto *tp)
  152. {
  153. tp->ops->destroy(tp);
  154. module_put(tp->ops->owner);
  155. kfree_rcu(tp, rcu);
  156. }
  157. static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
  158. u32 chain_index)
  159. {
  160. struct tcf_chain *chain;
  161. chain = kzalloc(sizeof(*chain), GFP_KERNEL);
  162. if (!chain)
  163. return NULL;
  164. list_add_tail(&chain->list, &block->chain_list);
  165. chain->block = block;
  166. chain->index = chain_index;
  167. chain->refcnt = 1;
  168. return chain;
  169. }
  170. static void tcf_chain_head_change(struct tcf_chain *chain,
  171. struct tcf_proto *tp_head)
  172. {
  173. if (chain->chain_head_change)
  174. chain->chain_head_change(tp_head,
  175. chain->chain_head_change_priv);
  176. }
  177. static void tcf_chain_flush(struct tcf_chain *chain)
  178. {
  179. struct tcf_proto *tp;
  180. tcf_chain_head_change(chain, NULL);
  181. while ((tp = rtnl_dereference(chain->filter_chain)) != NULL) {
  182. RCU_INIT_POINTER(chain->filter_chain, tp->next);
  183. tcf_chain_put(chain);
  184. tcf_proto_destroy(tp);
  185. }
  186. }
  187. static void tcf_chain_destroy(struct tcf_chain *chain)
  188. {
  189. list_del(&chain->list);
  190. kfree(chain);
  191. }
  192. static void tcf_chain_hold(struct tcf_chain *chain)
  193. {
  194. ++chain->refcnt;
  195. }
  196. struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
  197. bool create)
  198. {
  199. struct tcf_chain *chain;
  200. list_for_each_entry(chain, &block->chain_list, list) {
  201. if (chain->index == chain_index) {
  202. tcf_chain_hold(chain);
  203. return chain;
  204. }
  205. }
  206. return create ? tcf_chain_create(block, chain_index) : NULL;
  207. }
  208. EXPORT_SYMBOL(tcf_chain_get);
  209. void tcf_chain_put(struct tcf_chain *chain)
  210. {
  211. if (--chain->refcnt == 0)
  212. tcf_chain_destroy(chain);
  213. }
  214. EXPORT_SYMBOL(tcf_chain_put);
  215. static void tcf_block_offload_cmd(struct tcf_block *block, struct Qdisc *q,
  216. struct tcf_block_ext_info *ei,
  217. enum tc_block_command command)
  218. {
  219. struct net_device *dev = q->dev_queue->dev;
  220. struct tc_block_offload bo = {};
  221. if (!dev->netdev_ops->ndo_setup_tc)
  222. return;
  223. bo.command = command;
  224. bo.binder_type = ei->binder_type;
  225. bo.block = block;
  226. dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
  227. }
  228. static void tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
  229. struct tcf_block_ext_info *ei)
  230. {
  231. tcf_block_offload_cmd(block, q, ei, TC_BLOCK_BIND);
  232. }
  233. static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
  234. struct tcf_block_ext_info *ei)
  235. {
  236. tcf_block_offload_cmd(block, q, ei, TC_BLOCK_UNBIND);
  237. }
  238. int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
  239. struct tcf_block_ext_info *ei)
  240. {
  241. struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
  242. struct tcf_chain *chain;
  243. int err;
  244. if (!block)
  245. return -ENOMEM;
  246. INIT_LIST_HEAD(&block->chain_list);
  247. INIT_LIST_HEAD(&block->cb_list);
  248. /* Create chain 0 by default, it has to be always present. */
  249. chain = tcf_chain_create(block, 0);
  250. if (!chain) {
  251. err = -ENOMEM;
  252. goto err_chain_create;
  253. }
  254. WARN_ON(!ei->chain_head_change);
  255. chain->chain_head_change = ei->chain_head_change;
  256. chain->chain_head_change_priv = ei->chain_head_change_priv;
  257. block->net = qdisc_net(q);
  258. block->q = q;
  259. tcf_block_offload_bind(block, q, ei);
  260. *p_block = block;
  261. return 0;
  262. err_chain_create:
  263. kfree(block);
  264. return err;
  265. }
  266. EXPORT_SYMBOL(tcf_block_get_ext);
  267. static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
  268. {
  269. struct tcf_proto __rcu **p_filter_chain = priv;
  270. rcu_assign_pointer(*p_filter_chain, tp_head);
  271. }
  272. int tcf_block_get(struct tcf_block **p_block,
  273. struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q)
  274. {
  275. struct tcf_block_ext_info ei = {
  276. .chain_head_change = tcf_chain_head_change_dflt,
  277. .chain_head_change_priv = p_filter_chain,
  278. };
  279. WARN_ON(!p_filter_chain);
  280. return tcf_block_get_ext(p_block, q, &ei);
  281. }
  282. EXPORT_SYMBOL(tcf_block_get);
  283. static void tcf_block_put_final(struct work_struct *work)
  284. {
  285. struct tcf_block *block = container_of(work, struct tcf_block, work);
  286. struct tcf_chain *chain, *tmp;
  287. rtnl_lock();
  288. /* Only chain 0 should be still here. */
  289. list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
  290. tcf_chain_put(chain);
  291. rtnl_unlock();
  292. kfree(block);
  293. }
  294. /* XXX: Standalone actions are not allowed to jump to any chain, and bound
  295. * actions should be all removed after flushing. However, filters are now
  296. * destroyed in tc filter workqueue with RTNL lock, they can not race here.
  297. */
  298. void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
  299. struct tcf_block_ext_info *ei)
  300. {
  301. struct tcf_chain *chain, *tmp;
  302. list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
  303. tcf_chain_flush(chain);
  304. tcf_block_offload_unbind(block, q, ei);
  305. INIT_WORK(&block->work, tcf_block_put_final);
  306. /* Wait for existing RCU callbacks to cool down, make sure their works
  307. * have been queued before this. We can not flush pending works here
  308. * because we are holding the RTNL lock.
  309. */
  310. rcu_barrier();
  311. tcf_queue_work(&block->work);
  312. }
  313. EXPORT_SYMBOL(tcf_block_put_ext);
  314. void tcf_block_put(struct tcf_block *block)
  315. {
  316. struct tcf_block_ext_info ei = {0, };
  317. if (!block)
  318. return;
  319. tcf_block_put_ext(block, block->q, &ei);
  320. }
  321. EXPORT_SYMBOL(tcf_block_put);
  322. struct tcf_block_cb {
  323. struct list_head list;
  324. tc_setup_cb_t *cb;
  325. void *cb_ident;
  326. void *cb_priv;
  327. unsigned int refcnt;
  328. };
  329. void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
  330. {
  331. return block_cb->cb_priv;
  332. }
  333. EXPORT_SYMBOL(tcf_block_cb_priv);
  334. struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
  335. tc_setup_cb_t *cb, void *cb_ident)
  336. { struct tcf_block_cb *block_cb;
  337. list_for_each_entry(block_cb, &block->cb_list, list)
  338. if (block_cb->cb == cb && block_cb->cb_ident == cb_ident)
  339. return block_cb;
  340. return NULL;
  341. }
  342. EXPORT_SYMBOL(tcf_block_cb_lookup);
  343. void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
  344. {
  345. block_cb->refcnt++;
  346. }
  347. EXPORT_SYMBOL(tcf_block_cb_incref);
  348. unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
  349. {
  350. return --block_cb->refcnt;
  351. }
  352. EXPORT_SYMBOL(tcf_block_cb_decref);
  353. struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
  354. tc_setup_cb_t *cb, void *cb_ident,
  355. void *cb_priv)
  356. {
  357. struct tcf_block_cb *block_cb;
  358. block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
  359. if (!block_cb)
  360. return NULL;
  361. block_cb->cb = cb;
  362. block_cb->cb_ident = cb_ident;
  363. block_cb->cb_priv = cb_priv;
  364. list_add(&block_cb->list, &block->cb_list);
  365. return block_cb;
  366. }
  367. EXPORT_SYMBOL(__tcf_block_cb_register);
  368. int tcf_block_cb_register(struct tcf_block *block,
  369. tc_setup_cb_t *cb, void *cb_ident,
  370. void *cb_priv)
  371. {
  372. struct tcf_block_cb *block_cb;
  373. block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv);
  374. return block_cb ? 0 : -ENOMEM;
  375. }
  376. EXPORT_SYMBOL(tcf_block_cb_register);
  377. void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb)
  378. {
  379. list_del(&block_cb->list);
  380. kfree(block_cb);
  381. }
  382. EXPORT_SYMBOL(__tcf_block_cb_unregister);
  383. void tcf_block_cb_unregister(struct tcf_block *block,
  384. tc_setup_cb_t *cb, void *cb_ident)
  385. {
  386. struct tcf_block_cb *block_cb;
  387. block_cb = tcf_block_cb_lookup(block, cb, cb_ident);
  388. if (!block_cb)
  389. return;
  390. __tcf_block_cb_unregister(block_cb);
  391. }
  392. EXPORT_SYMBOL(tcf_block_cb_unregister);
  393. static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type,
  394. void *type_data, bool err_stop)
  395. {
  396. struct tcf_block_cb *block_cb;
  397. int ok_count = 0;
  398. int err;
  399. list_for_each_entry(block_cb, &block->cb_list, list) {
  400. err = block_cb->cb(type, type_data, block_cb->cb_priv);
  401. if (err) {
  402. if (err_stop)
  403. return err;
  404. } else {
  405. ok_count++;
  406. }
  407. }
  408. return ok_count;
  409. }
  410. /* Main classifier routine: scans classifier chain attached
  411. * to this qdisc, (optionally) tests for protocol and asks
  412. * specific classifiers.
  413. */
  414. int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  415. struct tcf_result *res, bool compat_mode)
  416. {
  417. __be16 protocol = tc_skb_protocol(skb);
  418. #ifdef CONFIG_NET_CLS_ACT
  419. const int max_reclassify_loop = 4;
  420. const struct tcf_proto *orig_tp = tp;
  421. const struct tcf_proto *first_tp;
  422. int limit = 0;
  423. reclassify:
  424. #endif
  425. for (; tp; tp = rcu_dereference_bh(tp->next)) {
  426. int err;
  427. if (tp->protocol != protocol &&
  428. tp->protocol != htons(ETH_P_ALL))
  429. continue;
  430. err = tp->classify(skb, tp, res);
  431. #ifdef CONFIG_NET_CLS_ACT
  432. if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
  433. first_tp = orig_tp;
  434. goto reset;
  435. } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
  436. first_tp = res->goto_tp;
  437. goto reset;
  438. }
  439. #endif
  440. if (err >= 0)
  441. return err;
  442. }
  443. return TC_ACT_UNSPEC; /* signal: continue lookup */
  444. #ifdef CONFIG_NET_CLS_ACT
  445. reset:
  446. if (unlikely(limit++ >= max_reclassify_loop)) {
  447. net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
  448. tp->q->ops->id, tp->prio & 0xffff,
  449. ntohs(tp->protocol));
  450. return TC_ACT_SHOT;
  451. }
  452. tp = first_tp;
  453. protocol = tc_skb_protocol(skb);
  454. goto reclassify;
  455. #endif
  456. }
  457. EXPORT_SYMBOL(tcf_classify);
  458. struct tcf_chain_info {
  459. struct tcf_proto __rcu **pprev;
  460. struct tcf_proto __rcu *next;
  461. };
  462. static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info)
  463. {
  464. return rtnl_dereference(*chain_info->pprev);
  465. }
  466. static void tcf_chain_tp_insert(struct tcf_chain *chain,
  467. struct tcf_chain_info *chain_info,
  468. struct tcf_proto *tp)
  469. {
  470. if (*chain_info->pprev == chain->filter_chain)
  471. tcf_chain_head_change(chain, tp);
  472. RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info));
  473. rcu_assign_pointer(*chain_info->pprev, tp);
  474. tcf_chain_hold(chain);
  475. }
  476. static void tcf_chain_tp_remove(struct tcf_chain *chain,
  477. struct tcf_chain_info *chain_info,
  478. struct tcf_proto *tp)
  479. {
  480. struct tcf_proto *next = rtnl_dereference(chain_info->next);
  481. if (tp == chain->filter_chain)
  482. tcf_chain_head_change(chain, next);
  483. RCU_INIT_POINTER(*chain_info->pprev, next);
  484. tcf_chain_put(chain);
  485. }
  486. static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
  487. struct tcf_chain_info *chain_info,
  488. u32 protocol, u32 prio,
  489. bool prio_allocate)
  490. {
  491. struct tcf_proto **pprev;
  492. struct tcf_proto *tp;
  493. /* Check the chain for existence of proto-tcf with this priority */
  494. for (pprev = &chain->filter_chain;
  495. (tp = rtnl_dereference(*pprev)); pprev = &tp->next) {
  496. if (tp->prio >= prio) {
  497. if (tp->prio == prio) {
  498. if (prio_allocate ||
  499. (tp->protocol != protocol && protocol))
  500. return ERR_PTR(-EINVAL);
  501. } else {
  502. tp = NULL;
  503. }
  504. break;
  505. }
  506. }
  507. chain_info->pprev = pprev;
  508. chain_info->next = tp ? tp->next : NULL;
  509. return tp;
  510. }
  511. static int tcf_fill_node(struct net *net, struct sk_buff *skb,
  512. struct tcf_proto *tp, struct Qdisc *q, u32 parent,
  513. void *fh, u32 portid, u32 seq, u16 flags, int event)
  514. {
  515. struct tcmsg *tcm;
  516. struct nlmsghdr *nlh;
  517. unsigned char *b = skb_tail_pointer(skb);
  518. nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
  519. if (!nlh)
  520. goto out_nlmsg_trim;
  521. tcm = nlmsg_data(nlh);
  522. tcm->tcm_family = AF_UNSPEC;
  523. tcm->tcm__pad1 = 0;
  524. tcm->tcm__pad2 = 0;
  525. tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
  526. tcm->tcm_parent = parent;
  527. tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
  528. if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
  529. goto nla_put_failure;
  530. if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
  531. goto nla_put_failure;
  532. if (!fh) {
  533. tcm->tcm_handle = 0;
  534. } else {
  535. if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0)
  536. goto nla_put_failure;
  537. }
  538. nlh->nlmsg_len = skb_tail_pointer(skb) - b;
  539. return skb->len;
  540. out_nlmsg_trim:
  541. nla_put_failure:
  542. nlmsg_trim(skb, b);
  543. return -1;
  544. }
  545. static int tfilter_notify(struct net *net, struct sk_buff *oskb,
  546. struct nlmsghdr *n, struct tcf_proto *tp,
  547. struct Qdisc *q, u32 parent,
  548. void *fh, int event, bool unicast)
  549. {
  550. struct sk_buff *skb;
  551. u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
  552. skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
  553. if (!skb)
  554. return -ENOBUFS;
  555. if (tcf_fill_node(net, skb, tp, q, parent, fh, portid, n->nlmsg_seq,
  556. n->nlmsg_flags, event) <= 0) {
  557. kfree_skb(skb);
  558. return -EINVAL;
  559. }
  560. if (unicast)
  561. return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
  562. return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
  563. n->nlmsg_flags & NLM_F_ECHO);
  564. }
  565. static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
  566. struct nlmsghdr *n, struct tcf_proto *tp,
  567. struct Qdisc *q, u32 parent,
  568. void *fh, bool unicast, bool *last)
  569. {
  570. struct sk_buff *skb;
  571. u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
  572. int err;
  573. skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
  574. if (!skb)
  575. return -ENOBUFS;
  576. if (tcf_fill_node(net, skb, tp, q, parent, fh, portid, n->nlmsg_seq,
  577. n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
  578. kfree_skb(skb);
  579. return -EINVAL;
  580. }
  581. err = tp->ops->delete(tp, fh, last);
  582. if (err) {
  583. kfree_skb(skb);
  584. return err;
  585. }
  586. if (unicast)
  587. return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
  588. return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
  589. n->nlmsg_flags & NLM_F_ECHO);
  590. }
  591. static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
  592. struct Qdisc *q, u32 parent,
  593. struct nlmsghdr *n,
  594. struct tcf_chain *chain, int event)
  595. {
  596. struct tcf_proto *tp;
  597. for (tp = rtnl_dereference(chain->filter_chain);
  598. tp; tp = rtnl_dereference(tp->next))
  599. tfilter_notify(net, oskb, n, tp, q, parent, 0, event, false);
  600. }
  601. /* Add/change/delete/get a filter node */
  602. static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
  603. struct netlink_ext_ack *extack)
  604. {
  605. struct net *net = sock_net(skb->sk);
  606. struct nlattr *tca[TCA_MAX + 1];
  607. struct tcmsg *t;
  608. u32 protocol;
  609. u32 prio;
  610. bool prio_allocate;
  611. u32 parent;
  612. u32 chain_index;
  613. struct net_device *dev;
  614. struct Qdisc *q;
  615. struct tcf_chain_info chain_info;
  616. struct tcf_chain *chain = NULL;
  617. struct tcf_block *block;
  618. struct tcf_proto *tp;
  619. const struct Qdisc_class_ops *cops;
  620. unsigned long cl;
  621. void *fh;
  622. int err;
  623. int tp_created;
  624. if ((n->nlmsg_type != RTM_GETTFILTER) &&
  625. !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
  626. return -EPERM;
  627. replay:
  628. tp_created = 0;
  629. err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
  630. if (err < 0)
  631. return err;
  632. t = nlmsg_data(n);
  633. protocol = TC_H_MIN(t->tcm_info);
  634. prio = TC_H_MAJ(t->tcm_info);
  635. prio_allocate = false;
  636. parent = t->tcm_parent;
  637. cl = 0;
  638. if (prio == 0) {
  639. switch (n->nlmsg_type) {
  640. case RTM_DELTFILTER:
  641. if (protocol || t->tcm_handle || tca[TCA_KIND])
  642. return -ENOENT;
  643. break;
  644. case RTM_NEWTFILTER:
  645. /* If no priority is provided by the user,
  646. * we allocate one.
  647. */
  648. if (n->nlmsg_flags & NLM_F_CREATE) {
  649. prio = TC_H_MAKE(0x80000000U, 0U);
  650. prio_allocate = true;
  651. break;
  652. }
  653. /* fall-through */
  654. default:
  655. return -ENOENT;
  656. }
  657. }
  658. /* Find head of filter chain. */
  659. /* Find link */
  660. dev = __dev_get_by_index(net, t->tcm_ifindex);
  661. if (dev == NULL)
  662. return -ENODEV;
  663. /* Find qdisc */
  664. if (!parent) {
  665. q = dev->qdisc;
  666. parent = q->handle;
  667. } else {
  668. q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
  669. if (q == NULL)
  670. return -EINVAL;
  671. }
  672. /* Is it classful? */
  673. cops = q->ops->cl_ops;
  674. if (!cops)
  675. return -EINVAL;
  676. if (!cops->tcf_block)
  677. return -EOPNOTSUPP;
  678. /* Do we search for filter, attached to class? */
  679. if (TC_H_MIN(parent)) {
  680. cl = cops->find(q, parent);
  681. if (cl == 0)
  682. return -ENOENT;
  683. }
  684. /* And the last stroke */
  685. block = cops->tcf_block(q, cl);
  686. if (!block) {
  687. err = -EINVAL;
  688. goto errout;
  689. }
  690. chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
  691. if (chain_index > TC_ACT_EXT_VAL_MASK) {
  692. err = -EINVAL;
  693. goto errout;
  694. }
  695. chain = tcf_chain_get(block, chain_index,
  696. n->nlmsg_type == RTM_NEWTFILTER);
  697. if (!chain) {
  698. err = n->nlmsg_type == RTM_NEWTFILTER ? -ENOMEM : -EINVAL;
  699. goto errout;
  700. }
  701. if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) {
  702. tfilter_notify_chain(net, skb, q, parent, n,
  703. chain, RTM_DELTFILTER);
  704. tcf_chain_flush(chain);
  705. err = 0;
  706. goto errout;
  707. }
  708. tp = tcf_chain_tp_find(chain, &chain_info, protocol,
  709. prio, prio_allocate);
  710. if (IS_ERR(tp)) {
  711. err = PTR_ERR(tp);
  712. goto errout;
  713. }
  714. if (tp == NULL) {
  715. /* Proto-tcf does not exist, create new one */
  716. if (tca[TCA_KIND] == NULL || !protocol) {
  717. err = -EINVAL;
  718. goto errout;
  719. }
  720. if (n->nlmsg_type != RTM_NEWTFILTER ||
  721. !(n->nlmsg_flags & NLM_F_CREATE)) {
  722. err = -ENOENT;
  723. goto errout;
  724. }
  725. if (prio_allocate)
  726. prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info));
  727. tp = tcf_proto_create(nla_data(tca[TCA_KIND]),
  728. protocol, prio, parent, q, chain);
  729. if (IS_ERR(tp)) {
  730. err = PTR_ERR(tp);
  731. goto errout;
  732. }
  733. tp_created = 1;
  734. } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
  735. err = -EINVAL;
  736. goto errout;
  737. }
  738. fh = tp->ops->get(tp, t->tcm_handle);
  739. if (!fh) {
  740. if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
  741. tcf_chain_tp_remove(chain, &chain_info, tp);
  742. tfilter_notify(net, skb, n, tp, q, parent, fh,
  743. RTM_DELTFILTER, false);
  744. tcf_proto_destroy(tp);
  745. err = 0;
  746. goto errout;
  747. }
  748. if (n->nlmsg_type != RTM_NEWTFILTER ||
  749. !(n->nlmsg_flags & NLM_F_CREATE)) {
  750. err = -ENOENT;
  751. goto errout;
  752. }
  753. } else {
  754. bool last;
  755. switch (n->nlmsg_type) {
  756. case RTM_NEWTFILTER:
  757. if (n->nlmsg_flags & NLM_F_EXCL) {
  758. if (tp_created)
  759. tcf_proto_destroy(tp);
  760. err = -EEXIST;
  761. goto errout;
  762. }
  763. break;
  764. case RTM_DELTFILTER:
  765. err = tfilter_del_notify(net, skb, n, tp, q, parent,
  766. fh, false, &last);
  767. if (err)
  768. goto errout;
  769. if (last) {
  770. tcf_chain_tp_remove(chain, &chain_info, tp);
  771. tcf_proto_destroy(tp);
  772. }
  773. goto errout;
  774. case RTM_GETTFILTER:
  775. err = tfilter_notify(net, skb, n, tp, q, parent, fh,
  776. RTM_NEWTFILTER, true);
  777. goto errout;
  778. default:
  779. err = -EINVAL;
  780. goto errout;
  781. }
  782. }
  783. err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
  784. n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE);
  785. if (err == 0) {
  786. if (tp_created)
  787. tcf_chain_tp_insert(chain, &chain_info, tp);
  788. tfilter_notify(net, skb, n, tp, q, parent, fh,
  789. RTM_NEWTFILTER, false);
  790. } else {
  791. if (tp_created)
  792. tcf_proto_destroy(tp);
  793. }
  794. errout:
  795. if (chain)
  796. tcf_chain_put(chain);
  797. if (err == -EAGAIN)
  798. /* Replay the request. */
  799. goto replay;
  800. return err;
  801. }
  802. struct tcf_dump_args {
  803. struct tcf_walker w;
  804. struct sk_buff *skb;
  805. struct netlink_callback *cb;
  806. struct Qdisc *q;
  807. u32 parent;
  808. };
  809. static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
  810. {
  811. struct tcf_dump_args *a = (void *)arg;
  812. struct net *net = sock_net(a->skb->sk);
  813. return tcf_fill_node(net, a->skb, tp, a->q, a->parent,
  814. n, NETLINK_CB(a->cb->skb).portid,
  815. a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
  816. RTM_NEWTFILTER);
  817. }
  818. static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
  819. struct sk_buff *skb, struct netlink_callback *cb,
  820. long index_start, long *p_index)
  821. {
  822. struct net *net = sock_net(skb->sk);
  823. struct tcmsg *tcm = nlmsg_data(cb->nlh);
  824. struct tcf_dump_args arg;
  825. struct tcf_proto *tp;
  826. for (tp = rtnl_dereference(chain->filter_chain);
  827. tp; tp = rtnl_dereference(tp->next), (*p_index)++) {
  828. if (*p_index < index_start)
  829. continue;
  830. if (TC_H_MAJ(tcm->tcm_info) &&
  831. TC_H_MAJ(tcm->tcm_info) != tp->prio)
  832. continue;
  833. if (TC_H_MIN(tcm->tcm_info) &&
  834. TC_H_MIN(tcm->tcm_info) != tp->protocol)
  835. continue;
  836. if (*p_index > index_start)
  837. memset(&cb->args[1], 0,
  838. sizeof(cb->args) - sizeof(cb->args[0]));
  839. if (cb->args[1] == 0) {
  840. if (tcf_fill_node(net, skb, tp, q, parent, 0,
  841. NETLINK_CB(cb->skb).portid,
  842. cb->nlh->nlmsg_seq, NLM_F_MULTI,
  843. RTM_NEWTFILTER) <= 0)
  844. return false;
  845. cb->args[1] = 1;
  846. }
  847. if (!tp->ops->walk)
  848. continue;
  849. arg.w.fn = tcf_node_dump;
  850. arg.skb = skb;
  851. arg.cb = cb;
  852. arg.q = q;
  853. arg.parent = parent;
  854. arg.w.stop = 0;
  855. arg.w.skip = cb->args[1] - 1;
  856. arg.w.count = 0;
  857. tp->ops->walk(tp, &arg.w);
  858. cb->args[1] = arg.w.count + 1;
  859. if (arg.w.stop)
  860. return false;
  861. }
  862. return true;
  863. }
  864. /* called with RTNL */
  865. static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
  866. {
  867. struct net *net = sock_net(skb->sk);
  868. struct nlattr *tca[TCA_MAX + 1];
  869. struct net_device *dev;
  870. struct Qdisc *q;
  871. struct tcf_block *block;
  872. struct tcf_chain *chain;
  873. struct tcmsg *tcm = nlmsg_data(cb->nlh);
  874. unsigned long cl = 0;
  875. const struct Qdisc_class_ops *cops;
  876. long index_start;
  877. long index;
  878. u32 parent;
  879. int err;
  880. if (nlmsg_len(cb->nlh) < sizeof(*tcm))
  881. return skb->len;
  882. err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
  883. if (err)
  884. return err;
  885. dev = __dev_get_by_index(net, tcm->tcm_ifindex);
  886. if (!dev)
  887. return skb->len;
  888. parent = tcm->tcm_parent;
  889. if (!parent) {
  890. q = dev->qdisc;
  891. parent = q->handle;
  892. } else {
  893. q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
  894. }
  895. if (!q)
  896. goto out;
  897. cops = q->ops->cl_ops;
  898. if (!cops)
  899. goto out;
  900. if (!cops->tcf_block)
  901. goto out;
  902. if (TC_H_MIN(tcm->tcm_parent)) {
  903. cl = cops->find(q, tcm->tcm_parent);
  904. if (cl == 0)
  905. goto out;
  906. }
  907. block = cops->tcf_block(q, cl);
  908. if (!block)
  909. goto out;
  910. index_start = cb->args[0];
  911. index = 0;
  912. list_for_each_entry(chain, &block->chain_list, list) {
  913. if (tca[TCA_CHAIN] &&
  914. nla_get_u32(tca[TCA_CHAIN]) != chain->index)
  915. continue;
  916. if (!tcf_chain_dump(chain, q, parent, skb, cb,
  917. index_start, &index))
  918. break;
  919. }
  920. cb->args[0] = index;
  921. out:
  922. return skb->len;
  923. }
  924. void tcf_exts_destroy(struct tcf_exts *exts)
  925. {
  926. #ifdef CONFIG_NET_CLS_ACT
  927. LIST_HEAD(actions);
  928. ASSERT_RTNL();
  929. tcf_exts_to_list(exts, &actions);
  930. tcf_action_destroy(&actions, TCA_ACT_UNBIND);
  931. kfree(exts->actions);
  932. exts->nr_actions = 0;
  933. #endif
  934. }
  935. EXPORT_SYMBOL(tcf_exts_destroy);
  936. int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
  937. struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr)
  938. {
  939. #ifdef CONFIG_NET_CLS_ACT
  940. {
  941. struct tc_action *act;
  942. if (exts->police && tb[exts->police]) {
  943. act = tcf_action_init_1(net, tp, tb[exts->police],
  944. rate_tlv, "police", ovr,
  945. TCA_ACT_BIND);
  946. if (IS_ERR(act))
  947. return PTR_ERR(act);
  948. act->type = exts->type = TCA_OLD_COMPAT;
  949. exts->actions[0] = act;
  950. exts->nr_actions = 1;
  951. } else if (exts->action && tb[exts->action]) {
  952. LIST_HEAD(actions);
  953. int err, i = 0;
  954. err = tcf_action_init(net, tp, tb[exts->action],
  955. rate_tlv, NULL, ovr, TCA_ACT_BIND,
  956. &actions);
  957. if (err)
  958. return err;
  959. list_for_each_entry(act, &actions, list)
  960. exts->actions[i++] = act;
  961. exts->nr_actions = i;
  962. }
  963. exts->net = net;
  964. }
  965. #else
  966. if ((exts->action && tb[exts->action]) ||
  967. (exts->police && tb[exts->police]))
  968. return -EOPNOTSUPP;
  969. #endif
  970. return 0;
  971. }
  972. EXPORT_SYMBOL(tcf_exts_validate);
  973. void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
  974. {
  975. #ifdef CONFIG_NET_CLS_ACT
  976. struct tcf_exts old = *dst;
  977. *dst = *src;
  978. tcf_exts_destroy(&old);
  979. #endif
  980. }
  981. EXPORT_SYMBOL(tcf_exts_change);
  982. #ifdef CONFIG_NET_CLS_ACT
  983. static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
  984. {
  985. if (exts->nr_actions == 0)
  986. return NULL;
  987. else
  988. return exts->actions[0];
  989. }
  990. #endif
  991. int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
  992. {
  993. #ifdef CONFIG_NET_CLS_ACT
  994. struct nlattr *nest;
  995. if (exts->action && tcf_exts_has_actions(exts)) {
  996. /*
  997. * again for backward compatible mode - we want
  998. * to work with both old and new modes of entering
  999. * tc data even if iproute2 was newer - jhs
  1000. */
  1001. if (exts->type != TCA_OLD_COMPAT) {
  1002. LIST_HEAD(actions);
  1003. nest = nla_nest_start(skb, exts->action);
  1004. if (nest == NULL)
  1005. goto nla_put_failure;
  1006. tcf_exts_to_list(exts, &actions);
  1007. if (tcf_action_dump(skb, &actions, 0, 0) < 0)
  1008. goto nla_put_failure;
  1009. nla_nest_end(skb, nest);
  1010. } else if (exts->police) {
  1011. struct tc_action *act = tcf_exts_first_act(exts);
  1012. nest = nla_nest_start(skb, exts->police);
  1013. if (nest == NULL || !act)
  1014. goto nla_put_failure;
  1015. if (tcf_action_dump_old(skb, act, 0, 0) < 0)
  1016. goto nla_put_failure;
  1017. nla_nest_end(skb, nest);
  1018. }
  1019. }
  1020. return 0;
  1021. nla_put_failure:
  1022. nla_nest_cancel(skb, nest);
  1023. return -1;
  1024. #else
  1025. return 0;
  1026. #endif
  1027. }
  1028. EXPORT_SYMBOL(tcf_exts_dump);
  1029. int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
  1030. {
  1031. #ifdef CONFIG_NET_CLS_ACT
  1032. struct tc_action *a = tcf_exts_first_act(exts);
  1033. if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
  1034. return -1;
  1035. #endif
  1036. return 0;
  1037. }
  1038. EXPORT_SYMBOL(tcf_exts_dump_stats);
  1039. static int tc_exts_setup_cb_egdev_call(struct tcf_exts *exts,
  1040. enum tc_setup_type type,
  1041. void *type_data, bool err_stop)
  1042. {
  1043. int ok_count = 0;
  1044. #ifdef CONFIG_NET_CLS_ACT
  1045. const struct tc_action *a;
  1046. struct net_device *dev;
  1047. int i, ret;
  1048. if (!tcf_exts_has_actions(exts))
  1049. return 0;
  1050. for (i = 0; i < exts->nr_actions; i++) {
  1051. a = exts->actions[i];
  1052. if (!a->ops->get_dev)
  1053. continue;
  1054. dev = a->ops->get_dev(a);
  1055. if (!dev)
  1056. continue;
  1057. ret = tc_setup_cb_egdev_call(dev, type, type_data, err_stop);
  1058. if (ret < 0)
  1059. return ret;
  1060. ok_count += ret;
  1061. }
  1062. #endif
  1063. return ok_count;
  1064. }
  1065. int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
  1066. enum tc_setup_type type, void *type_data, bool err_stop)
  1067. {
  1068. int ok_count;
  1069. int ret;
  1070. ret = tcf_block_cb_call(block, type, type_data, err_stop);
  1071. if (ret < 0)
  1072. return ret;
  1073. ok_count = ret;
  1074. if (!exts)
  1075. return ok_count;
  1076. ret = tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop);
  1077. if (ret < 0)
  1078. return ret;
  1079. ok_count += ret;
  1080. return ok_count;
  1081. }
  1082. EXPORT_SYMBOL(tc_setup_cb_call);
  1083. static int __init tc_filter_init(void)
  1084. {
  1085. tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
  1086. if (!tc_filter_wq)
  1087. return -ENOMEM;
  1088. rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0);
  1089. rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0);
  1090. rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
  1091. tc_dump_tfilter, 0);
  1092. return 0;
  1093. }
  1094. subsys_initcall(tc_filter_init);