cls_api.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842
  1. /*
  2. * net/sched/cls_api.c Packet classifier API.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10. *
  11. * Changes:
  12. *
  13. * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
  14. *
  15. */
  16. #include <linux/module.h>
  17. #include <linux/types.h>
  18. #include <linux/kernel.h>
  19. #include <linux/string.h>
  20. #include <linux/errno.h>
  21. #include <linux/err.h>
  22. #include <linux/skbuff.h>
  23. #include <linux/init.h>
  24. #include <linux/kmod.h>
  25. #include <linux/slab.h>
  26. #include <linux/idr.h>
  27. #include <net/net_namespace.h>
  28. #include <net/sock.h>
  29. #include <net/netlink.h>
  30. #include <net/pkt_sched.h>
  31. #include <net/pkt_cls.h>
  32. /* The list of all installed classifier types */
  33. static LIST_HEAD(tcf_proto_base);
  34. /* Protects list of registered TC modules. It is pure SMP lock. */
  35. static DEFINE_RWLOCK(cls_mod_lock);
  36. /* Find classifier type by string name */
  37. static const struct tcf_proto_ops *tcf_proto_lookup_ops(const char *kind)
  38. {
  39. const struct tcf_proto_ops *t, *res = NULL;
  40. if (kind) {
  41. read_lock(&cls_mod_lock);
  42. list_for_each_entry(t, &tcf_proto_base, head) {
  43. if (strcmp(kind, t->kind) == 0) {
  44. if (try_module_get(t->owner))
  45. res = t;
  46. break;
  47. }
  48. }
  49. read_unlock(&cls_mod_lock);
  50. }
  51. return res;
  52. }
  53. /* Register(unregister) new classifier type */
  54. int register_tcf_proto_ops(struct tcf_proto_ops *ops)
  55. {
  56. struct tcf_proto_ops *t;
  57. int rc = -EEXIST;
  58. write_lock(&cls_mod_lock);
  59. list_for_each_entry(t, &tcf_proto_base, head)
  60. if (!strcmp(ops->kind, t->kind))
  61. goto out;
  62. list_add_tail(&ops->head, &tcf_proto_base);
  63. rc = 0;
  64. out:
  65. write_unlock(&cls_mod_lock);
  66. return rc;
  67. }
  68. EXPORT_SYMBOL(register_tcf_proto_ops);
  69. static struct workqueue_struct *tc_filter_wq;
  70. int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
  71. {
  72. struct tcf_proto_ops *t;
  73. int rc = -ENOENT;
  74. /* Wait for outstanding call_rcu()s, if any, from a
  75. * tcf_proto_ops's destroy() handler.
  76. */
  77. rcu_barrier();
  78. flush_workqueue(tc_filter_wq);
  79. write_lock(&cls_mod_lock);
  80. list_for_each_entry(t, &tcf_proto_base, head) {
  81. if (t == ops) {
  82. list_del(&t->head);
  83. rc = 0;
  84. break;
  85. }
  86. }
  87. write_unlock(&cls_mod_lock);
  88. return rc;
  89. }
  90. EXPORT_SYMBOL(unregister_tcf_proto_ops);
  91. bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
  92. {
  93. INIT_RCU_WORK(rwork, func);
  94. return queue_rcu_work(tc_filter_wq, rwork);
  95. }
  96. EXPORT_SYMBOL(tcf_queue_work);
  97. /* Select new prio value from the range, managed by kernel. */
  98. static inline u32 tcf_auto_prio(struct tcf_proto *tp)
  99. {
  100. u32 first = TC_H_MAKE(0xC0000000U, 0U);
  101. if (tp)
  102. first = tp->prio - 1;
  103. return TC_H_MAJ(first);
  104. }
  105. static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
  106. u32 prio, struct tcf_chain *chain,
  107. struct netlink_ext_ack *extack)
  108. {
  109. struct tcf_proto *tp;
  110. int err;
  111. tp = kzalloc(sizeof(*tp), GFP_KERNEL);
  112. if (!tp)
  113. return ERR_PTR(-ENOBUFS);
  114. err = -ENOENT;
  115. tp->ops = tcf_proto_lookup_ops(kind);
  116. if (!tp->ops) {
  117. #ifdef CONFIG_MODULES
  118. rtnl_unlock();
  119. request_module("cls_%s", kind);
  120. rtnl_lock();
  121. tp->ops = tcf_proto_lookup_ops(kind);
  122. /* We dropped the RTNL semaphore in order to perform
  123. * the module load. So, even if we succeeded in loading
  124. * the module we have to replay the request. We indicate
  125. * this using -EAGAIN.
  126. */
  127. if (tp->ops) {
  128. module_put(tp->ops->owner);
  129. err = -EAGAIN;
  130. } else {
  131. NL_SET_ERR_MSG(extack, "TC classifier not found");
  132. err = -ENOENT;
  133. }
  134. #endif
  135. goto errout;
  136. }
  137. tp->classify = tp->ops->classify;
  138. tp->protocol = protocol;
  139. tp->prio = prio;
  140. tp->chain = chain;
  141. err = tp->ops->init(tp);
  142. if (err) {
  143. module_put(tp->ops->owner);
  144. goto errout;
  145. }
  146. return tp;
  147. errout:
  148. kfree(tp);
  149. return ERR_PTR(err);
  150. }
  151. static void tcf_proto_destroy(struct tcf_proto *tp,
  152. struct netlink_ext_ack *extack)
  153. {
  154. tp->ops->destroy(tp, extack);
  155. module_put(tp->ops->owner);
  156. kfree_rcu(tp, rcu);
  157. }
  158. struct tcf_filter_chain_list_item {
  159. struct list_head list;
  160. tcf_chain_head_change_t *chain_head_change;
  161. void *chain_head_change_priv;
  162. };
  163. static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
  164. u32 chain_index)
  165. {
  166. struct tcf_chain *chain;
  167. chain = kzalloc(sizeof(*chain), GFP_KERNEL);
  168. if (!chain)
  169. return NULL;
  170. INIT_LIST_HEAD(&chain->filter_chain_list);
  171. list_add_tail(&chain->list, &block->chain_list);
  172. chain->block = block;
  173. chain->index = chain_index;
  174. chain->refcnt = 1;
  175. return chain;
  176. }
  177. static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
  178. struct tcf_proto *tp_head)
  179. {
  180. if (item->chain_head_change)
  181. item->chain_head_change(tp_head, item->chain_head_change_priv);
  182. }
  183. static void tcf_chain_head_change(struct tcf_chain *chain,
  184. struct tcf_proto *tp_head)
  185. {
  186. struct tcf_filter_chain_list_item *item;
  187. list_for_each_entry(item, &chain->filter_chain_list, list)
  188. tcf_chain_head_change_item(item, tp_head);
  189. }
  190. static void tcf_chain_flush(struct tcf_chain *chain)
  191. {
  192. struct tcf_proto *tp = rtnl_dereference(chain->filter_chain);
  193. tcf_chain_head_change(chain, NULL);
  194. while (tp) {
  195. RCU_INIT_POINTER(chain->filter_chain, tp->next);
  196. tcf_proto_destroy(tp, NULL);
  197. tp = rtnl_dereference(chain->filter_chain);
  198. tcf_chain_put(chain);
  199. }
  200. }
  201. static void tcf_chain_destroy(struct tcf_chain *chain)
  202. {
  203. struct tcf_block *block = chain->block;
  204. list_del(&chain->list);
  205. kfree(chain);
  206. if (list_empty(&block->chain_list))
  207. kfree(block);
  208. }
  209. static void tcf_chain_hold(struct tcf_chain *chain)
  210. {
  211. ++chain->refcnt;
  212. }
  213. struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
  214. bool create)
  215. {
  216. struct tcf_chain *chain;
  217. list_for_each_entry(chain, &block->chain_list, list) {
  218. if (chain->index == chain_index) {
  219. tcf_chain_hold(chain);
  220. return chain;
  221. }
  222. }
  223. return create ? tcf_chain_create(block, chain_index) : NULL;
  224. }
  225. EXPORT_SYMBOL(tcf_chain_get);
  226. void tcf_chain_put(struct tcf_chain *chain)
  227. {
  228. if (--chain->refcnt == 0)
  229. tcf_chain_destroy(chain);
  230. }
  231. EXPORT_SYMBOL(tcf_chain_put);
  232. static bool tcf_block_offload_in_use(struct tcf_block *block)
  233. {
  234. return block->offloadcnt;
  235. }
  236. static int tcf_block_offload_cmd(struct tcf_block *block,
  237. struct net_device *dev,
  238. struct tcf_block_ext_info *ei,
  239. enum tc_block_command command,
  240. struct netlink_ext_ack *extack)
  241. {
  242. struct tc_block_offload bo = {};
  243. bo.command = command;
  244. bo.binder_type = ei->binder_type;
  245. bo.block = block;
  246. bo.extack = extack;
  247. return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
  248. }
  249. static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
  250. struct tcf_block_ext_info *ei,
  251. struct netlink_ext_ack *extack)
  252. {
  253. struct net_device *dev = q->dev_queue->dev;
  254. int err;
  255. if (!dev->netdev_ops->ndo_setup_tc)
  256. goto no_offload_dev_inc;
  257. /* If tc offload feature is disabled and the block we try to bind
  258. * to already has some offloaded filters, forbid to bind.
  259. */
  260. if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
  261. NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
  262. return -EOPNOTSUPP;
  263. }
  264. err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND, extack);
  265. if (err == -EOPNOTSUPP)
  266. goto no_offload_dev_inc;
  267. return err;
  268. no_offload_dev_inc:
  269. if (tcf_block_offload_in_use(block))
  270. return -EOPNOTSUPP;
  271. block->nooffloaddevcnt++;
  272. return 0;
  273. }
  274. static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
  275. struct tcf_block_ext_info *ei)
  276. {
  277. struct net_device *dev = q->dev_queue->dev;
  278. int err;
  279. if (!dev->netdev_ops->ndo_setup_tc)
  280. goto no_offload_dev_dec;
  281. err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND, NULL);
  282. if (err == -EOPNOTSUPP)
  283. goto no_offload_dev_dec;
  284. return;
  285. no_offload_dev_dec:
  286. WARN_ON(block->nooffloaddevcnt-- == 0);
  287. }
  288. static int
  289. tcf_chain_head_change_cb_add(struct tcf_chain *chain,
  290. struct tcf_block_ext_info *ei,
  291. struct netlink_ext_ack *extack)
  292. {
  293. struct tcf_filter_chain_list_item *item;
  294. item = kmalloc(sizeof(*item), GFP_KERNEL);
  295. if (!item) {
  296. NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
  297. return -ENOMEM;
  298. }
  299. item->chain_head_change = ei->chain_head_change;
  300. item->chain_head_change_priv = ei->chain_head_change_priv;
  301. if (chain->filter_chain)
  302. tcf_chain_head_change_item(item, chain->filter_chain);
  303. list_add(&item->list, &chain->filter_chain_list);
  304. return 0;
  305. }
  306. static void
  307. tcf_chain_head_change_cb_del(struct tcf_chain *chain,
  308. struct tcf_block_ext_info *ei)
  309. {
  310. struct tcf_filter_chain_list_item *item;
  311. list_for_each_entry(item, &chain->filter_chain_list, list) {
  312. if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
  313. (item->chain_head_change == ei->chain_head_change &&
  314. item->chain_head_change_priv == ei->chain_head_change_priv)) {
  315. tcf_chain_head_change_item(item, NULL);
  316. list_del(&item->list);
  317. kfree(item);
  318. return;
  319. }
  320. }
  321. WARN_ON(1);
  322. }
  323. struct tcf_net {
  324. struct idr idr;
  325. };
  326. static unsigned int tcf_net_id;
  327. static int tcf_block_insert(struct tcf_block *block, struct net *net,
  328. struct netlink_ext_ack *extack)
  329. {
  330. struct tcf_net *tn = net_generic(net, tcf_net_id);
  331. return idr_alloc_u32(&tn->idr, block, &block->index, block->index,
  332. GFP_KERNEL);
  333. }
  334. static void tcf_block_remove(struct tcf_block *block, struct net *net)
  335. {
  336. struct tcf_net *tn = net_generic(net, tcf_net_id);
  337. idr_remove(&tn->idr, block->index);
  338. }
  339. static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
  340. u32 block_index,
  341. struct netlink_ext_ack *extack)
  342. {
  343. struct tcf_block *block;
  344. struct tcf_chain *chain;
  345. int err;
  346. block = kzalloc(sizeof(*block), GFP_KERNEL);
  347. if (!block) {
  348. NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
  349. return ERR_PTR(-ENOMEM);
  350. }
  351. INIT_LIST_HEAD(&block->chain_list);
  352. INIT_LIST_HEAD(&block->cb_list);
  353. INIT_LIST_HEAD(&block->owner_list);
  354. /* Create chain 0 by default, it has to be always present. */
  355. chain = tcf_chain_create(block, 0);
  356. if (!chain) {
  357. NL_SET_ERR_MSG(extack, "Failed to create new tcf chain");
  358. err = -ENOMEM;
  359. goto err_chain_create;
  360. }
  361. block->refcnt = 1;
  362. block->net = net;
  363. block->index = block_index;
  364. /* Don't store q pointer for blocks which are shared */
  365. if (!tcf_block_shared(block))
  366. block->q = q;
  367. return block;
  368. err_chain_create:
  369. kfree(block);
  370. return ERR_PTR(err);
  371. }
  372. static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
  373. {
  374. struct tcf_net *tn = net_generic(net, tcf_net_id);
  375. return idr_find(&tn->idr, block_index);
  376. }
  377. /* Find tcf block.
  378. * Set q, parent, cl when appropriate.
  379. */
  380. static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
  381. u32 *parent, unsigned long *cl,
  382. int ifindex, u32 block_index,
  383. struct netlink_ext_ack *extack)
  384. {
  385. struct tcf_block *block;
  386. if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
  387. block = tcf_block_lookup(net, block_index);
  388. if (!block) {
  389. NL_SET_ERR_MSG(extack, "Block of given index was not found");
  390. return ERR_PTR(-EINVAL);
  391. }
  392. } else {
  393. const struct Qdisc_class_ops *cops;
  394. struct net_device *dev;
  395. /* Find link */
  396. dev = __dev_get_by_index(net, ifindex);
  397. if (!dev)
  398. return ERR_PTR(-ENODEV);
  399. /* Find qdisc */
  400. if (!*parent) {
  401. *q = dev->qdisc;
  402. *parent = (*q)->handle;
  403. } else {
  404. *q = qdisc_lookup(dev, TC_H_MAJ(*parent));
  405. if (!*q) {
  406. NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
  407. return ERR_PTR(-EINVAL);
  408. }
  409. }
  410. /* Is it classful? */
  411. cops = (*q)->ops->cl_ops;
  412. if (!cops) {
  413. NL_SET_ERR_MSG(extack, "Qdisc not classful");
  414. return ERR_PTR(-EINVAL);
  415. }
  416. if (!cops->tcf_block) {
  417. NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
  418. return ERR_PTR(-EOPNOTSUPP);
  419. }
  420. /* Do we search for filter, attached to class? */
  421. if (TC_H_MIN(*parent)) {
  422. *cl = cops->find(*q, *parent);
  423. if (*cl == 0) {
  424. NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
  425. return ERR_PTR(-ENOENT);
  426. }
  427. }
  428. /* And the last stroke */
  429. block = cops->tcf_block(*q, *cl, extack);
  430. if (!block)
  431. return ERR_PTR(-EINVAL);
  432. if (tcf_block_shared(block)) {
  433. NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
  434. return ERR_PTR(-EOPNOTSUPP);
  435. }
  436. }
  437. return block;
  438. }
  439. static struct tcf_chain *tcf_block_chain_zero(struct tcf_block *block)
  440. {
  441. return list_first_entry(&block->chain_list, struct tcf_chain, list);
  442. }
  443. struct tcf_block_owner_item {
  444. struct list_head list;
  445. struct Qdisc *q;
  446. enum tcf_block_binder_type binder_type;
  447. };
  448. static void
  449. tcf_block_owner_netif_keep_dst(struct tcf_block *block,
  450. struct Qdisc *q,
  451. enum tcf_block_binder_type binder_type)
  452. {
  453. if (block->keep_dst &&
  454. binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
  455. binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
  456. netif_keep_dst(qdisc_dev(q));
  457. }
  458. void tcf_block_netif_keep_dst(struct tcf_block *block)
  459. {
  460. struct tcf_block_owner_item *item;
  461. block->keep_dst = true;
  462. list_for_each_entry(item, &block->owner_list, list)
  463. tcf_block_owner_netif_keep_dst(block, item->q,
  464. item->binder_type);
  465. }
  466. EXPORT_SYMBOL(tcf_block_netif_keep_dst);
  467. static int tcf_block_owner_add(struct tcf_block *block,
  468. struct Qdisc *q,
  469. enum tcf_block_binder_type binder_type)
  470. {
  471. struct tcf_block_owner_item *item;
  472. item = kmalloc(sizeof(*item), GFP_KERNEL);
  473. if (!item)
  474. return -ENOMEM;
  475. item->q = q;
  476. item->binder_type = binder_type;
  477. list_add(&item->list, &block->owner_list);
  478. return 0;
  479. }
  480. static void tcf_block_owner_del(struct tcf_block *block,
  481. struct Qdisc *q,
  482. enum tcf_block_binder_type binder_type)
  483. {
  484. struct tcf_block_owner_item *item;
  485. list_for_each_entry(item, &block->owner_list, list) {
  486. if (item->q == q && item->binder_type == binder_type) {
  487. list_del(&item->list);
  488. kfree(item);
  489. return;
  490. }
  491. }
  492. WARN_ON(1);
  493. }
  494. int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
  495. struct tcf_block_ext_info *ei,
  496. struct netlink_ext_ack *extack)
  497. {
  498. struct net *net = qdisc_net(q);
  499. struct tcf_block *block = NULL;
  500. bool created = false;
  501. int err;
  502. if (ei->block_index) {
  503. /* block_index not 0 means the shared block is requested */
  504. block = tcf_block_lookup(net, ei->block_index);
  505. if (block)
  506. block->refcnt++;
  507. }
  508. if (!block) {
  509. block = tcf_block_create(net, q, ei->block_index, extack);
  510. if (IS_ERR(block))
  511. return PTR_ERR(block);
  512. created = true;
  513. if (tcf_block_shared(block)) {
  514. err = tcf_block_insert(block, net, extack);
  515. if (err)
  516. goto err_block_insert;
  517. }
  518. }
  519. err = tcf_block_owner_add(block, q, ei->binder_type);
  520. if (err)
  521. goto err_block_owner_add;
  522. tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
  523. err = tcf_chain_head_change_cb_add(tcf_block_chain_zero(block),
  524. ei, extack);
  525. if (err)
  526. goto err_chain_head_change_cb_add;
  527. err = tcf_block_offload_bind(block, q, ei, extack);
  528. if (err)
  529. goto err_block_offload_bind;
  530. *p_block = block;
  531. return 0;
  532. err_block_offload_bind:
  533. tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei);
  534. err_chain_head_change_cb_add:
  535. tcf_block_owner_del(block, q, ei->binder_type);
  536. err_block_owner_add:
  537. if (created) {
  538. if (tcf_block_shared(block))
  539. tcf_block_remove(block, net);
  540. err_block_insert:
  541. kfree(tcf_block_chain_zero(block));
  542. kfree(block);
  543. } else {
  544. block->refcnt--;
  545. }
  546. return err;
  547. }
  548. EXPORT_SYMBOL(tcf_block_get_ext);
  549. static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
  550. {
  551. struct tcf_proto __rcu **p_filter_chain = priv;
  552. rcu_assign_pointer(*p_filter_chain, tp_head);
  553. }
  554. int tcf_block_get(struct tcf_block **p_block,
  555. struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
  556. struct netlink_ext_ack *extack)
  557. {
  558. struct tcf_block_ext_info ei = {
  559. .chain_head_change = tcf_chain_head_change_dflt,
  560. .chain_head_change_priv = p_filter_chain,
  561. };
  562. WARN_ON(!p_filter_chain);
  563. return tcf_block_get_ext(p_block, q, &ei, extack);
  564. }
  565. EXPORT_SYMBOL(tcf_block_get);
  566. /* XXX: Standalone actions are not allowed to jump to any chain, and bound
  567. * actions should be all removed after flushing.
  568. */
  569. void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
  570. struct tcf_block_ext_info *ei)
  571. {
  572. struct tcf_chain *chain, *tmp;
  573. if (!block)
  574. return;
  575. tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei);
  576. tcf_block_owner_del(block, q, ei->binder_type);
  577. if (--block->refcnt == 0) {
  578. if (tcf_block_shared(block))
  579. tcf_block_remove(block, block->net);
  580. /* Hold a refcnt for all chains, so that they don't disappear
  581. * while we are iterating.
  582. */
  583. list_for_each_entry(chain, &block->chain_list, list)
  584. tcf_chain_hold(chain);
  585. list_for_each_entry(chain, &block->chain_list, list)
  586. tcf_chain_flush(chain);
  587. }
  588. tcf_block_offload_unbind(block, q, ei);
  589. if (block->refcnt == 0) {
  590. /* At this point, all the chains should have refcnt >= 1. */
  591. list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
  592. tcf_chain_put(chain);
  593. /* Finally, put chain 0 and allow block to be freed. */
  594. tcf_chain_put(tcf_block_chain_zero(block));
  595. }
  596. }
  597. EXPORT_SYMBOL(tcf_block_put_ext);
  598. void tcf_block_put(struct tcf_block *block)
  599. {
  600. struct tcf_block_ext_info ei = {0, };
  601. if (!block)
  602. return;
  603. tcf_block_put_ext(block, block->q, &ei);
  604. }
  605. EXPORT_SYMBOL(tcf_block_put);
  606. struct tcf_block_cb {
  607. struct list_head list;
  608. tc_setup_cb_t *cb;
  609. void *cb_ident;
  610. void *cb_priv;
  611. unsigned int refcnt;
  612. };
  613. void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
  614. {
  615. return block_cb->cb_priv;
  616. }
  617. EXPORT_SYMBOL(tcf_block_cb_priv);
  618. struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
  619. tc_setup_cb_t *cb, void *cb_ident)
  620. { struct tcf_block_cb *block_cb;
  621. list_for_each_entry(block_cb, &block->cb_list, list)
  622. if (block_cb->cb == cb && block_cb->cb_ident == cb_ident)
  623. return block_cb;
  624. return NULL;
  625. }
  626. EXPORT_SYMBOL(tcf_block_cb_lookup);
  627. void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
  628. {
  629. block_cb->refcnt++;
  630. }
  631. EXPORT_SYMBOL(tcf_block_cb_incref);
  632. unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
  633. {
  634. return --block_cb->refcnt;
  635. }
  636. EXPORT_SYMBOL(tcf_block_cb_decref);
  637. static int
  638. tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb,
  639. void *cb_priv, bool add, bool offload_in_use,
  640. struct netlink_ext_ack *extack)
  641. {
  642. struct tcf_chain *chain;
  643. struct tcf_proto *tp;
  644. int err;
  645. list_for_each_entry(chain, &block->chain_list, list) {
  646. for (tp = rtnl_dereference(chain->filter_chain); tp;
  647. tp = rtnl_dereference(tp->next)) {
  648. if (tp->ops->reoffload) {
  649. err = tp->ops->reoffload(tp, add, cb, cb_priv,
  650. extack);
  651. if (err && add)
  652. goto err_playback_remove;
  653. } else if (add && offload_in_use) {
  654. err = -EOPNOTSUPP;
  655. NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
  656. goto err_playback_remove;
  657. }
  658. }
  659. }
  660. return 0;
  661. err_playback_remove:
  662. tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
  663. extack);
  664. return err;
  665. }
  666. struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
  667. tc_setup_cb_t *cb, void *cb_ident,
  668. void *cb_priv,
  669. struct netlink_ext_ack *extack)
  670. {
  671. struct tcf_block_cb *block_cb;
  672. int err;
  673. /* Replay any already present rules */
  674. err = tcf_block_playback_offloads(block, cb, cb_priv, true,
  675. tcf_block_offload_in_use(block),
  676. extack);
  677. if (err)
  678. return ERR_PTR(err);
  679. block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
  680. if (!block_cb)
  681. return ERR_PTR(-ENOMEM);
  682. block_cb->cb = cb;
  683. block_cb->cb_ident = cb_ident;
  684. block_cb->cb_priv = cb_priv;
  685. list_add(&block_cb->list, &block->cb_list);
  686. return block_cb;
  687. }
  688. EXPORT_SYMBOL(__tcf_block_cb_register);
  689. int tcf_block_cb_register(struct tcf_block *block,
  690. tc_setup_cb_t *cb, void *cb_ident,
  691. void *cb_priv, struct netlink_ext_ack *extack)
  692. {
  693. struct tcf_block_cb *block_cb;
  694. block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv,
  695. extack);
  696. return IS_ERR(block_cb) ? PTR_ERR(block_cb) : 0;
  697. }
  698. EXPORT_SYMBOL(tcf_block_cb_register);
  699. void __tcf_block_cb_unregister(struct tcf_block *block,
  700. struct tcf_block_cb *block_cb)
  701. {
  702. tcf_block_playback_offloads(block, block_cb->cb, block_cb->cb_priv,
  703. false, tcf_block_offload_in_use(block),
  704. NULL);
  705. list_del(&block_cb->list);
  706. kfree(block_cb);
  707. }
  708. EXPORT_SYMBOL(__tcf_block_cb_unregister);
  709. void tcf_block_cb_unregister(struct tcf_block *block,
  710. tc_setup_cb_t *cb, void *cb_ident)
  711. {
  712. struct tcf_block_cb *block_cb;
  713. block_cb = tcf_block_cb_lookup(block, cb, cb_ident);
  714. if (!block_cb)
  715. return;
  716. __tcf_block_cb_unregister(block, block_cb);
  717. }
  718. EXPORT_SYMBOL(tcf_block_cb_unregister);
  719. static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type,
  720. void *type_data, bool err_stop)
  721. {
  722. struct tcf_block_cb *block_cb;
  723. int ok_count = 0;
  724. int err;
  725. /* Make sure all netdevs sharing this block are offload-capable. */
  726. if (block->nooffloaddevcnt && err_stop)
  727. return -EOPNOTSUPP;
  728. list_for_each_entry(block_cb, &block->cb_list, list) {
  729. err = block_cb->cb(type, type_data, block_cb->cb_priv);
  730. if (err) {
  731. if (err_stop)
  732. return err;
  733. } else {
  734. ok_count++;
  735. }
  736. }
  737. return ok_count;
  738. }
  739. /* Main classifier routine: scans classifier chain attached
  740. * to this qdisc, (optionally) tests for protocol and asks
  741. * specific classifiers.
  742. */
  743. int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  744. struct tcf_result *res, bool compat_mode)
  745. {
  746. __be16 protocol = tc_skb_protocol(skb);
  747. #ifdef CONFIG_NET_CLS_ACT
  748. const int max_reclassify_loop = 4;
  749. const struct tcf_proto *orig_tp = tp;
  750. const struct tcf_proto *first_tp;
  751. int limit = 0;
  752. reclassify:
  753. #endif
  754. for (; tp; tp = rcu_dereference_bh(tp->next)) {
  755. int err;
  756. if (tp->protocol != protocol &&
  757. tp->protocol != htons(ETH_P_ALL))
  758. continue;
  759. err = tp->classify(skb, tp, res);
  760. #ifdef CONFIG_NET_CLS_ACT
  761. if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
  762. first_tp = orig_tp;
  763. goto reset;
  764. } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
  765. first_tp = res->goto_tp;
  766. goto reset;
  767. }
  768. #endif
  769. if (err >= 0)
  770. return err;
  771. }
  772. return TC_ACT_UNSPEC; /* signal: continue lookup */
  773. #ifdef CONFIG_NET_CLS_ACT
  774. reset:
  775. if (unlikely(limit++ >= max_reclassify_loop)) {
  776. net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
  777. tp->chain->block->index,
  778. tp->prio & 0xffff,
  779. ntohs(tp->protocol));
  780. return TC_ACT_SHOT;
  781. }
  782. tp = first_tp;
  783. protocol = tc_skb_protocol(skb);
  784. goto reclassify;
  785. #endif
  786. }
  787. EXPORT_SYMBOL(tcf_classify);
  788. struct tcf_chain_info {
  789. struct tcf_proto __rcu **pprev;
  790. struct tcf_proto __rcu *next;
  791. };
  792. static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info)
  793. {
  794. return rtnl_dereference(*chain_info->pprev);
  795. }
  796. static void tcf_chain_tp_insert(struct tcf_chain *chain,
  797. struct tcf_chain_info *chain_info,
  798. struct tcf_proto *tp)
  799. {
  800. if (*chain_info->pprev == chain->filter_chain)
  801. tcf_chain_head_change(chain, tp);
  802. RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info));
  803. rcu_assign_pointer(*chain_info->pprev, tp);
  804. tcf_chain_hold(chain);
  805. }
  806. static void tcf_chain_tp_remove(struct tcf_chain *chain,
  807. struct tcf_chain_info *chain_info,
  808. struct tcf_proto *tp)
  809. {
  810. struct tcf_proto *next = rtnl_dereference(chain_info->next);
  811. if (tp == chain->filter_chain)
  812. tcf_chain_head_change(chain, next);
  813. RCU_INIT_POINTER(*chain_info->pprev, next);
  814. tcf_chain_put(chain);
  815. }
  816. static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
  817. struct tcf_chain_info *chain_info,
  818. u32 protocol, u32 prio,
  819. bool prio_allocate)
  820. {
  821. struct tcf_proto **pprev;
  822. struct tcf_proto *tp;
  823. /* Check the chain for existence of proto-tcf with this priority */
  824. for (pprev = &chain->filter_chain;
  825. (tp = rtnl_dereference(*pprev)); pprev = &tp->next) {
  826. if (tp->prio >= prio) {
  827. if (tp->prio == prio) {
  828. if (prio_allocate ||
  829. (tp->protocol != protocol && protocol))
  830. return ERR_PTR(-EINVAL);
  831. } else {
  832. tp = NULL;
  833. }
  834. break;
  835. }
  836. }
  837. chain_info->pprev = pprev;
  838. chain_info->next = tp ? tp->next : NULL;
  839. return tp;
  840. }
  841. static int tcf_fill_node(struct net *net, struct sk_buff *skb,
  842. struct tcf_proto *tp, struct tcf_block *block,
  843. struct Qdisc *q, u32 parent, void *fh,
  844. u32 portid, u32 seq, u16 flags, int event)
  845. {
  846. struct tcmsg *tcm;
  847. struct nlmsghdr *nlh;
  848. unsigned char *b = skb_tail_pointer(skb);
  849. nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
  850. if (!nlh)
  851. goto out_nlmsg_trim;
  852. tcm = nlmsg_data(nlh);
  853. tcm->tcm_family = AF_UNSPEC;
  854. tcm->tcm__pad1 = 0;
  855. tcm->tcm__pad2 = 0;
  856. if (q) {
  857. tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
  858. tcm->tcm_parent = parent;
  859. } else {
  860. tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
  861. tcm->tcm_block_index = block->index;
  862. }
  863. tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
  864. if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
  865. goto nla_put_failure;
  866. if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
  867. goto nla_put_failure;
  868. if (!fh) {
  869. tcm->tcm_handle = 0;
  870. } else {
  871. if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0)
  872. goto nla_put_failure;
  873. }
  874. nlh->nlmsg_len = skb_tail_pointer(skb) - b;
  875. return skb->len;
  876. out_nlmsg_trim:
  877. nla_put_failure:
  878. nlmsg_trim(skb, b);
  879. return -1;
  880. }
  881. static int tfilter_notify(struct net *net, struct sk_buff *oskb,
  882. struct nlmsghdr *n, struct tcf_proto *tp,
  883. struct tcf_block *block, struct Qdisc *q,
  884. u32 parent, void *fh, int event, bool unicast)
  885. {
  886. struct sk_buff *skb;
  887. u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
  888. skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
  889. if (!skb)
  890. return -ENOBUFS;
  891. if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
  892. n->nlmsg_seq, n->nlmsg_flags, event) <= 0) {
  893. kfree_skb(skb);
  894. return -EINVAL;
  895. }
  896. if (unicast)
  897. return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
  898. return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
  899. n->nlmsg_flags & NLM_F_ECHO);
  900. }
  901. static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
  902. struct nlmsghdr *n, struct tcf_proto *tp,
  903. struct tcf_block *block, struct Qdisc *q,
  904. u32 parent, void *fh, bool unicast, bool *last,
  905. struct netlink_ext_ack *extack)
  906. {
  907. struct sk_buff *skb;
  908. u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
  909. int err;
  910. skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
  911. if (!skb)
  912. return -ENOBUFS;
  913. if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
  914. n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
  915. NL_SET_ERR_MSG(extack, "Failed to build del event notification");
  916. kfree_skb(skb);
  917. return -EINVAL;
  918. }
  919. err = tp->ops->delete(tp, fh, last, extack);
  920. if (err) {
  921. kfree_skb(skb);
  922. return err;
  923. }
  924. if (unicast)
  925. return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
  926. err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
  927. n->nlmsg_flags & NLM_F_ECHO);
  928. if (err < 0)
  929. NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
  930. return err;
  931. }
  932. static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
  933. struct tcf_block *block, struct Qdisc *q,
  934. u32 parent, struct nlmsghdr *n,
  935. struct tcf_chain *chain, int event)
  936. {
  937. struct tcf_proto *tp;
  938. for (tp = rtnl_dereference(chain->filter_chain);
  939. tp; tp = rtnl_dereference(tp->next))
  940. tfilter_notify(net, oskb, n, tp, block,
  941. q, parent, 0, event, false);
  942. }
  943. static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
  944. struct netlink_ext_ack *extack)
  945. {
  946. struct net *net = sock_net(skb->sk);
  947. struct nlattr *tca[TCA_MAX + 1];
  948. struct tcmsg *t;
  949. u32 protocol;
  950. u32 prio;
  951. bool prio_allocate;
  952. u32 parent;
  953. u32 chain_index;
  954. struct Qdisc *q = NULL;
  955. struct tcf_chain_info chain_info;
  956. struct tcf_chain *chain = NULL;
  957. struct tcf_block *block;
  958. struct tcf_proto *tp;
  959. unsigned long cl;
  960. void *fh;
  961. int err;
  962. int tp_created;
  963. if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
  964. return -EPERM;
  965. replay:
  966. tp_created = 0;
  967. err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
  968. if (err < 0)
  969. return err;
  970. t = nlmsg_data(n);
  971. protocol = TC_H_MIN(t->tcm_info);
  972. prio = TC_H_MAJ(t->tcm_info);
  973. prio_allocate = false;
  974. parent = t->tcm_parent;
  975. cl = 0;
  976. if (prio == 0) {
  977. /* If no priority is provided by the user,
  978. * we allocate one.
  979. */
  980. if (n->nlmsg_flags & NLM_F_CREATE) {
  981. prio = TC_H_MAKE(0x80000000U, 0U);
  982. prio_allocate = true;
  983. } else {
  984. NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
  985. return -ENOENT;
  986. }
  987. }
  988. /* Find head of filter chain. */
  989. block = tcf_block_find(net, &q, &parent, &cl,
  990. t->tcm_ifindex, t->tcm_block_index, extack);
  991. if (IS_ERR(block)) {
  992. err = PTR_ERR(block);
  993. goto errout;
  994. }
  995. chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
  996. if (chain_index > TC_ACT_EXT_VAL_MASK) {
  997. NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
  998. err = -EINVAL;
  999. goto errout;
  1000. }
  1001. chain = tcf_chain_get(block, chain_index, true);
  1002. if (!chain) {
  1003. NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
  1004. err = -ENOMEM;
  1005. goto errout;
  1006. }
  1007. tp = tcf_chain_tp_find(chain, &chain_info, protocol,
  1008. prio, prio_allocate);
  1009. if (IS_ERR(tp)) {
  1010. NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
  1011. err = PTR_ERR(tp);
  1012. goto errout;
  1013. }
  1014. if (tp == NULL) {
  1015. /* Proto-tcf does not exist, create new one */
  1016. if (tca[TCA_KIND] == NULL || !protocol) {
  1017. NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
  1018. err = -EINVAL;
  1019. goto errout;
  1020. }
  1021. if (!(n->nlmsg_flags & NLM_F_CREATE)) {
  1022. NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
  1023. err = -ENOENT;
  1024. goto errout;
  1025. }
  1026. if (prio_allocate)
  1027. prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info));
  1028. tp = tcf_proto_create(nla_data(tca[TCA_KIND]),
  1029. protocol, prio, chain, extack);
  1030. if (IS_ERR(tp)) {
  1031. err = PTR_ERR(tp);
  1032. goto errout;
  1033. }
  1034. tp_created = 1;
  1035. } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
  1036. NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
  1037. err = -EINVAL;
  1038. goto errout;
  1039. }
  1040. fh = tp->ops->get(tp, t->tcm_handle);
  1041. if (!fh) {
  1042. if (!(n->nlmsg_flags & NLM_F_CREATE)) {
  1043. NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
  1044. err = -ENOENT;
  1045. goto errout;
  1046. }
  1047. } else if (n->nlmsg_flags & NLM_F_EXCL) {
  1048. NL_SET_ERR_MSG(extack, "Filter already exists");
  1049. err = -EEXIST;
  1050. goto errout;
  1051. }
  1052. err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
  1053. n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
  1054. extack);
  1055. if (err == 0) {
  1056. if (tp_created)
  1057. tcf_chain_tp_insert(chain, &chain_info, tp);
  1058. tfilter_notify(net, skb, n, tp, block, q, parent, fh,
  1059. RTM_NEWTFILTER, false);
  1060. } else {
  1061. if (tp_created)
  1062. tcf_proto_destroy(tp, NULL);
  1063. }
  1064. errout:
  1065. if (chain)
  1066. tcf_chain_put(chain);
  1067. if (err == -EAGAIN)
  1068. /* Replay the request. */
  1069. goto replay;
  1070. return err;
  1071. }
  1072. static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
  1073. struct netlink_ext_ack *extack)
  1074. {
  1075. struct net *net = sock_net(skb->sk);
  1076. struct nlattr *tca[TCA_MAX + 1];
  1077. struct tcmsg *t;
  1078. u32 protocol;
  1079. u32 prio;
  1080. u32 parent;
  1081. u32 chain_index;
  1082. struct Qdisc *q = NULL;
  1083. struct tcf_chain_info chain_info;
  1084. struct tcf_chain *chain = NULL;
  1085. struct tcf_block *block;
  1086. struct tcf_proto *tp = NULL;
  1087. unsigned long cl = 0;
  1088. void *fh = NULL;
  1089. int err;
  1090. if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
  1091. return -EPERM;
  1092. err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
  1093. if (err < 0)
  1094. return err;
  1095. t = nlmsg_data(n);
  1096. protocol = TC_H_MIN(t->tcm_info);
  1097. prio = TC_H_MAJ(t->tcm_info);
  1098. parent = t->tcm_parent;
  1099. if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
  1100. NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
  1101. return -ENOENT;
  1102. }
  1103. /* Find head of filter chain. */
  1104. block = tcf_block_find(net, &q, &parent, &cl,
  1105. t->tcm_ifindex, t->tcm_block_index, extack);
  1106. if (IS_ERR(block)) {
  1107. err = PTR_ERR(block);
  1108. goto errout;
  1109. }
  1110. chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
  1111. if (chain_index > TC_ACT_EXT_VAL_MASK) {
  1112. NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
  1113. err = -EINVAL;
  1114. goto errout;
  1115. }
  1116. chain = tcf_chain_get(block, chain_index, false);
  1117. if (!chain) {
  1118. NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
  1119. err = -EINVAL;
  1120. goto errout;
  1121. }
  1122. if (prio == 0) {
  1123. tfilter_notify_chain(net, skb, block, q, parent, n,
  1124. chain, RTM_DELTFILTER);
  1125. tcf_chain_flush(chain);
  1126. err = 0;
  1127. goto errout;
  1128. }
  1129. tp = tcf_chain_tp_find(chain, &chain_info, protocol,
  1130. prio, false);
  1131. if (!tp || IS_ERR(tp)) {
  1132. NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
  1133. err = tp ? PTR_ERR(tp) : -ENOENT;
  1134. goto errout;
  1135. } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
  1136. NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
  1137. err = -EINVAL;
  1138. goto errout;
  1139. }
  1140. fh = tp->ops->get(tp, t->tcm_handle);
  1141. if (!fh) {
  1142. if (t->tcm_handle == 0) {
  1143. tcf_chain_tp_remove(chain, &chain_info, tp);
  1144. tfilter_notify(net, skb, n, tp, block, q, parent, fh,
  1145. RTM_DELTFILTER, false);
  1146. tcf_proto_destroy(tp, extack);
  1147. err = 0;
  1148. } else {
  1149. NL_SET_ERR_MSG(extack, "Specified filter handle not found");
  1150. err = -ENOENT;
  1151. }
  1152. } else {
  1153. bool last;
  1154. err = tfilter_del_notify(net, skb, n, tp, block,
  1155. q, parent, fh, false, &last,
  1156. extack);
  1157. if (err)
  1158. goto errout;
  1159. if (last) {
  1160. tcf_chain_tp_remove(chain, &chain_info, tp);
  1161. tcf_proto_destroy(tp, extack);
  1162. }
  1163. }
  1164. errout:
  1165. if (chain)
  1166. tcf_chain_put(chain);
  1167. return err;
  1168. }
  1169. static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
  1170. struct netlink_ext_ack *extack)
  1171. {
  1172. struct net *net = sock_net(skb->sk);
  1173. struct nlattr *tca[TCA_MAX + 1];
  1174. struct tcmsg *t;
  1175. u32 protocol;
  1176. u32 prio;
  1177. u32 parent;
  1178. u32 chain_index;
  1179. struct Qdisc *q = NULL;
  1180. struct tcf_chain_info chain_info;
  1181. struct tcf_chain *chain = NULL;
  1182. struct tcf_block *block;
  1183. struct tcf_proto *tp = NULL;
  1184. unsigned long cl = 0;
  1185. void *fh = NULL;
  1186. int err;
  1187. err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
  1188. if (err < 0)
  1189. return err;
  1190. t = nlmsg_data(n);
  1191. protocol = TC_H_MIN(t->tcm_info);
  1192. prio = TC_H_MAJ(t->tcm_info);
  1193. parent = t->tcm_parent;
  1194. if (prio == 0) {
  1195. NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
  1196. return -ENOENT;
  1197. }
  1198. /* Find head of filter chain. */
  1199. block = tcf_block_find(net, &q, &parent, &cl,
  1200. t->tcm_ifindex, t->tcm_block_index, extack);
  1201. if (IS_ERR(block)) {
  1202. err = PTR_ERR(block);
  1203. goto errout;
  1204. }
  1205. chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
  1206. if (chain_index > TC_ACT_EXT_VAL_MASK) {
  1207. NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
  1208. err = -EINVAL;
  1209. goto errout;
  1210. }
  1211. chain = tcf_chain_get(block, chain_index, false);
  1212. if (!chain) {
  1213. NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
  1214. err = -EINVAL;
  1215. goto errout;
  1216. }
  1217. tp = tcf_chain_tp_find(chain, &chain_info, protocol,
  1218. prio, false);
  1219. if (!tp || IS_ERR(tp)) {
  1220. NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
  1221. err = tp ? PTR_ERR(tp) : -ENOENT;
  1222. goto errout;
  1223. } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
  1224. NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
  1225. err = -EINVAL;
  1226. goto errout;
  1227. }
  1228. fh = tp->ops->get(tp, t->tcm_handle);
  1229. if (!fh) {
  1230. NL_SET_ERR_MSG(extack, "Specified filter handle not found");
  1231. err = -ENOENT;
  1232. } else {
  1233. err = tfilter_notify(net, skb, n, tp, block, q, parent,
  1234. fh, RTM_NEWTFILTER, true);
  1235. if (err < 0)
  1236. NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
  1237. }
  1238. errout:
  1239. if (chain)
  1240. tcf_chain_put(chain);
  1241. return err;
  1242. }
  1243. struct tcf_dump_args {
  1244. struct tcf_walker w;
  1245. struct sk_buff *skb;
  1246. struct netlink_callback *cb;
  1247. struct tcf_block *block;
  1248. struct Qdisc *q;
  1249. u32 parent;
  1250. };
  1251. static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
  1252. {
  1253. struct tcf_dump_args *a = (void *)arg;
  1254. struct net *net = sock_net(a->skb->sk);
  1255. return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
  1256. n, NETLINK_CB(a->cb->skb).portid,
  1257. a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
  1258. RTM_NEWTFILTER);
  1259. }
  1260. static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
  1261. struct sk_buff *skb, struct netlink_callback *cb,
  1262. long index_start, long *p_index)
  1263. {
  1264. struct net *net = sock_net(skb->sk);
  1265. struct tcf_block *block = chain->block;
  1266. struct tcmsg *tcm = nlmsg_data(cb->nlh);
  1267. struct tcf_dump_args arg;
  1268. struct tcf_proto *tp;
  1269. for (tp = rtnl_dereference(chain->filter_chain);
  1270. tp; tp = rtnl_dereference(tp->next), (*p_index)++) {
  1271. if (*p_index < index_start)
  1272. continue;
  1273. if (TC_H_MAJ(tcm->tcm_info) &&
  1274. TC_H_MAJ(tcm->tcm_info) != tp->prio)
  1275. continue;
  1276. if (TC_H_MIN(tcm->tcm_info) &&
  1277. TC_H_MIN(tcm->tcm_info) != tp->protocol)
  1278. continue;
  1279. if (*p_index > index_start)
  1280. memset(&cb->args[1], 0,
  1281. sizeof(cb->args) - sizeof(cb->args[0]));
  1282. if (cb->args[1] == 0) {
  1283. if (tcf_fill_node(net, skb, tp, block, q, parent, 0,
  1284. NETLINK_CB(cb->skb).portid,
  1285. cb->nlh->nlmsg_seq, NLM_F_MULTI,
  1286. RTM_NEWTFILTER) <= 0)
  1287. return false;
  1288. cb->args[1] = 1;
  1289. }
  1290. if (!tp->ops->walk)
  1291. continue;
  1292. arg.w.fn = tcf_node_dump;
  1293. arg.skb = skb;
  1294. arg.cb = cb;
  1295. arg.block = block;
  1296. arg.q = q;
  1297. arg.parent = parent;
  1298. arg.w.stop = 0;
  1299. arg.w.skip = cb->args[1] - 1;
  1300. arg.w.count = 0;
  1301. tp->ops->walk(tp, &arg.w);
  1302. cb->args[1] = arg.w.count + 1;
  1303. if (arg.w.stop)
  1304. return false;
  1305. }
  1306. return true;
  1307. }
  1308. /* called with RTNL */
  1309. static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
  1310. {
  1311. struct net *net = sock_net(skb->sk);
  1312. struct nlattr *tca[TCA_MAX + 1];
  1313. struct Qdisc *q = NULL;
  1314. struct tcf_block *block;
  1315. struct tcf_chain *chain;
  1316. struct tcmsg *tcm = nlmsg_data(cb->nlh);
  1317. long index_start;
  1318. long index;
  1319. u32 parent;
  1320. int err;
  1321. if (nlmsg_len(cb->nlh) < sizeof(*tcm))
  1322. return skb->len;
  1323. err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
  1324. if (err)
  1325. return err;
  1326. if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
  1327. block = tcf_block_lookup(net, tcm->tcm_block_index);
  1328. if (!block)
  1329. goto out;
  1330. /* If we work with block index, q is NULL and parent value
  1331. * will never be used in the following code. The check
  1332. * in tcf_fill_node prevents it. However, compiler does not
  1333. * see that far, so set parent to zero to silence the warning
  1334. * about parent being uninitialized.
  1335. */
  1336. parent = 0;
  1337. } else {
  1338. const struct Qdisc_class_ops *cops;
  1339. struct net_device *dev;
  1340. unsigned long cl = 0;
  1341. dev = __dev_get_by_index(net, tcm->tcm_ifindex);
  1342. if (!dev)
  1343. return skb->len;
  1344. parent = tcm->tcm_parent;
  1345. if (!parent) {
  1346. q = dev->qdisc;
  1347. parent = q->handle;
  1348. } else {
  1349. q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
  1350. }
  1351. if (!q)
  1352. goto out;
  1353. cops = q->ops->cl_ops;
  1354. if (!cops)
  1355. goto out;
  1356. if (!cops->tcf_block)
  1357. goto out;
  1358. if (TC_H_MIN(tcm->tcm_parent)) {
  1359. cl = cops->find(q, tcm->tcm_parent);
  1360. if (cl == 0)
  1361. goto out;
  1362. }
  1363. block = cops->tcf_block(q, cl, NULL);
  1364. if (!block)
  1365. goto out;
  1366. if (tcf_block_shared(block))
  1367. q = NULL;
  1368. }
  1369. index_start = cb->args[0];
  1370. index = 0;
  1371. list_for_each_entry(chain, &block->chain_list, list) {
  1372. if (tca[TCA_CHAIN] &&
  1373. nla_get_u32(tca[TCA_CHAIN]) != chain->index)
  1374. continue;
  1375. if (!tcf_chain_dump(chain, q, parent, skb, cb,
  1376. index_start, &index)) {
  1377. err = -EMSGSIZE;
  1378. break;
  1379. }
  1380. }
  1381. cb->args[0] = index;
  1382. out:
  1383. /* If we did no progress, the error (EMSGSIZE) is real */
  1384. if (skb->len == 0 && err)
  1385. return err;
  1386. return skb->len;
  1387. }
  1388. void tcf_exts_destroy(struct tcf_exts *exts)
  1389. {
  1390. #ifdef CONFIG_NET_CLS_ACT
  1391. LIST_HEAD(actions);
  1392. ASSERT_RTNL();
  1393. tcf_exts_to_list(exts, &actions);
  1394. tcf_action_destroy(&actions, TCA_ACT_UNBIND);
  1395. kfree(exts->actions);
  1396. exts->nr_actions = 0;
  1397. #endif
  1398. }
  1399. EXPORT_SYMBOL(tcf_exts_destroy);
  1400. int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
  1401. struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
  1402. struct netlink_ext_ack *extack)
  1403. {
  1404. #ifdef CONFIG_NET_CLS_ACT
  1405. {
  1406. struct tc_action *act;
  1407. size_t attr_size = 0;
  1408. if (exts->police && tb[exts->police]) {
  1409. act = tcf_action_init_1(net, tp, tb[exts->police],
  1410. rate_tlv, "police", ovr,
  1411. TCA_ACT_BIND, extack);
  1412. if (IS_ERR(act))
  1413. return PTR_ERR(act);
  1414. act->type = exts->type = TCA_OLD_COMPAT;
  1415. exts->actions[0] = act;
  1416. exts->nr_actions = 1;
  1417. } else if (exts->action && tb[exts->action]) {
  1418. LIST_HEAD(actions);
  1419. int err, i = 0;
  1420. err = tcf_action_init(net, tp, tb[exts->action],
  1421. rate_tlv, NULL, ovr, TCA_ACT_BIND,
  1422. &actions, &attr_size, extack);
  1423. if (err)
  1424. return err;
  1425. list_for_each_entry(act, &actions, list)
  1426. exts->actions[i++] = act;
  1427. exts->nr_actions = i;
  1428. }
  1429. exts->net = net;
  1430. }
  1431. #else
  1432. if ((exts->action && tb[exts->action]) ||
  1433. (exts->police && tb[exts->police])) {
  1434. NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
  1435. return -EOPNOTSUPP;
  1436. }
  1437. #endif
  1438. return 0;
  1439. }
  1440. EXPORT_SYMBOL(tcf_exts_validate);
  1441. void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
  1442. {
  1443. #ifdef CONFIG_NET_CLS_ACT
  1444. struct tcf_exts old = *dst;
  1445. *dst = *src;
  1446. tcf_exts_destroy(&old);
  1447. #endif
  1448. }
  1449. EXPORT_SYMBOL(tcf_exts_change);
  1450. #ifdef CONFIG_NET_CLS_ACT
  1451. static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
  1452. {
  1453. if (exts->nr_actions == 0)
  1454. return NULL;
  1455. else
  1456. return exts->actions[0];
  1457. }
  1458. #endif
  1459. int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
  1460. {
  1461. #ifdef CONFIG_NET_CLS_ACT
  1462. struct nlattr *nest;
  1463. if (exts->action && tcf_exts_has_actions(exts)) {
  1464. /*
  1465. * again for backward compatible mode - we want
  1466. * to work with both old and new modes of entering
  1467. * tc data even if iproute2 was newer - jhs
  1468. */
  1469. if (exts->type != TCA_OLD_COMPAT) {
  1470. LIST_HEAD(actions);
  1471. nest = nla_nest_start(skb, exts->action);
  1472. if (nest == NULL)
  1473. goto nla_put_failure;
  1474. tcf_exts_to_list(exts, &actions);
  1475. if (tcf_action_dump(skb, &actions, 0, 0) < 0)
  1476. goto nla_put_failure;
  1477. nla_nest_end(skb, nest);
  1478. } else if (exts->police) {
  1479. struct tc_action *act = tcf_exts_first_act(exts);
  1480. nest = nla_nest_start(skb, exts->police);
  1481. if (nest == NULL || !act)
  1482. goto nla_put_failure;
  1483. if (tcf_action_dump_old(skb, act, 0, 0) < 0)
  1484. goto nla_put_failure;
  1485. nla_nest_end(skb, nest);
  1486. }
  1487. }
  1488. return 0;
  1489. nla_put_failure:
  1490. nla_nest_cancel(skb, nest);
  1491. return -1;
  1492. #else
  1493. return 0;
  1494. #endif
  1495. }
  1496. EXPORT_SYMBOL(tcf_exts_dump);
  1497. int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
  1498. {
  1499. #ifdef CONFIG_NET_CLS_ACT
  1500. struct tc_action *a = tcf_exts_first_act(exts);
  1501. if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
  1502. return -1;
  1503. #endif
  1504. return 0;
  1505. }
  1506. EXPORT_SYMBOL(tcf_exts_dump_stats);
  1507. static int tc_exts_setup_cb_egdev_call(struct tcf_exts *exts,
  1508. enum tc_setup_type type,
  1509. void *type_data, bool err_stop)
  1510. {
  1511. int ok_count = 0;
  1512. #ifdef CONFIG_NET_CLS_ACT
  1513. const struct tc_action *a;
  1514. struct net_device *dev;
  1515. int i, ret;
  1516. if (!tcf_exts_has_actions(exts))
  1517. return 0;
  1518. for (i = 0; i < exts->nr_actions; i++) {
  1519. a = exts->actions[i];
  1520. if (!a->ops->get_dev)
  1521. continue;
  1522. dev = a->ops->get_dev(a);
  1523. if (!dev)
  1524. continue;
  1525. ret = tc_setup_cb_egdev_call(dev, type, type_data, err_stop);
  1526. if (ret < 0)
  1527. return ret;
  1528. ok_count += ret;
  1529. }
  1530. #endif
  1531. return ok_count;
  1532. }
  1533. int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
  1534. enum tc_setup_type type, void *type_data, bool err_stop)
  1535. {
  1536. int ok_count;
  1537. int ret;
  1538. ret = tcf_block_cb_call(block, type, type_data, err_stop);
  1539. if (ret < 0)
  1540. return ret;
  1541. ok_count = ret;
  1542. if (!exts || ok_count)
  1543. return ok_count;
  1544. ret = tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop);
  1545. if (ret < 0)
  1546. return ret;
  1547. ok_count += ret;
  1548. return ok_count;
  1549. }
  1550. EXPORT_SYMBOL(tc_setup_cb_call);
  1551. static __net_init int tcf_net_init(struct net *net)
  1552. {
  1553. struct tcf_net *tn = net_generic(net, tcf_net_id);
  1554. idr_init(&tn->idr);
  1555. return 0;
  1556. }
  1557. static void __net_exit tcf_net_exit(struct net *net)
  1558. {
  1559. struct tcf_net *tn = net_generic(net, tcf_net_id);
  1560. idr_destroy(&tn->idr);
  1561. }
  1562. static struct pernet_operations tcf_net_ops = {
  1563. .init = tcf_net_init,
  1564. .exit = tcf_net_exit,
  1565. .id = &tcf_net_id,
  1566. .size = sizeof(struct tcf_net),
  1567. };
  1568. static int __init tc_filter_init(void)
  1569. {
  1570. int err;
  1571. tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
  1572. if (!tc_filter_wq)
  1573. return -ENOMEM;
  1574. err = register_pernet_subsys(&tcf_net_ops);
  1575. if (err)
  1576. goto err_register_pernet_subsys;
  1577. rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 0);
  1578. rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 0);
  1579. rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
  1580. tc_dump_tfilter, 0);
  1581. return 0;
  1582. err_register_pernet_subsys:
  1583. destroy_workqueue(tc_filter_wq);
  1584. return err;
  1585. }
  1586. subsys_initcall(tc_filter_init);