cls_api.c 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374
  1. /*
  2. * net/sched/cls_api.c Packet classifier API.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10. *
  11. * Changes:
  12. *
  13. * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
  14. *
  15. */
  16. #include <linux/module.h>
  17. #include <linux/types.h>
  18. #include <linux/kernel.h>
  19. #include <linux/string.h>
  20. #include <linux/errno.h>
  21. #include <linux/err.h>
  22. #include <linux/skbuff.h>
  23. #include <linux/init.h>
  24. #include <linux/kmod.h>
  25. #include <linux/slab.h>
  26. #include <linux/idr.h>
  27. #include <net/net_namespace.h>
  28. #include <net/sock.h>
  29. #include <net/netlink.h>
  30. #include <net/pkt_sched.h>
  31. #include <net/pkt_cls.h>
  32. extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
  33. /* The list of all installed classifier types */
  34. static LIST_HEAD(tcf_proto_base);
  35. /* Protects list of registered TC modules. It is pure SMP lock. */
  36. static DEFINE_RWLOCK(cls_mod_lock);
  37. /* Find classifier type by string name */
  38. static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
  39. {
  40. const struct tcf_proto_ops *t, *res = NULL;
  41. if (kind) {
  42. read_lock(&cls_mod_lock);
  43. list_for_each_entry(t, &tcf_proto_base, head) {
  44. if (strcmp(kind, t->kind) == 0) {
  45. if (try_module_get(t->owner))
  46. res = t;
  47. break;
  48. }
  49. }
  50. read_unlock(&cls_mod_lock);
  51. }
  52. return res;
  53. }
  54. static const struct tcf_proto_ops *
  55. tcf_proto_lookup_ops(const char *kind, struct netlink_ext_ack *extack)
  56. {
  57. const struct tcf_proto_ops *ops;
  58. ops = __tcf_proto_lookup_ops(kind);
  59. if (ops)
  60. return ops;
  61. #ifdef CONFIG_MODULES
  62. rtnl_unlock();
  63. request_module("cls_%s", kind);
  64. rtnl_lock();
  65. ops = __tcf_proto_lookup_ops(kind);
  66. /* We dropped the RTNL semaphore in order to perform
  67. * the module load. So, even if we succeeded in loading
  68. * the module we have to replay the request. We indicate
  69. * this using -EAGAIN.
  70. */
  71. if (ops) {
  72. module_put(ops->owner);
  73. return ERR_PTR(-EAGAIN);
  74. }
  75. #endif
  76. NL_SET_ERR_MSG(extack, "TC classifier not found");
  77. return ERR_PTR(-ENOENT);
  78. }
  79. /* Register(unregister) new classifier type */
  80. int register_tcf_proto_ops(struct tcf_proto_ops *ops)
  81. {
  82. struct tcf_proto_ops *t;
  83. int rc = -EEXIST;
  84. write_lock(&cls_mod_lock);
  85. list_for_each_entry(t, &tcf_proto_base, head)
  86. if (!strcmp(ops->kind, t->kind))
  87. goto out;
  88. list_add_tail(&ops->head, &tcf_proto_base);
  89. rc = 0;
  90. out:
  91. write_unlock(&cls_mod_lock);
  92. return rc;
  93. }
  94. EXPORT_SYMBOL(register_tcf_proto_ops);
  95. static struct workqueue_struct *tc_filter_wq;
  96. int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
  97. {
  98. struct tcf_proto_ops *t;
  99. int rc = -ENOENT;
  100. /* Wait for outstanding call_rcu()s, if any, from a
  101. * tcf_proto_ops's destroy() handler.
  102. */
  103. rcu_barrier();
  104. flush_workqueue(tc_filter_wq);
  105. write_lock(&cls_mod_lock);
  106. list_for_each_entry(t, &tcf_proto_base, head) {
  107. if (t == ops) {
  108. list_del(&t->head);
  109. rc = 0;
  110. break;
  111. }
  112. }
  113. write_unlock(&cls_mod_lock);
  114. return rc;
  115. }
  116. EXPORT_SYMBOL(unregister_tcf_proto_ops);
  117. bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
  118. {
  119. INIT_RCU_WORK(rwork, func);
  120. return queue_rcu_work(tc_filter_wq, rwork);
  121. }
  122. EXPORT_SYMBOL(tcf_queue_work);
  123. /* Select new prio value from the range, managed by kernel. */
  124. static inline u32 tcf_auto_prio(struct tcf_proto *tp)
  125. {
  126. u32 first = TC_H_MAKE(0xC0000000U, 0U);
  127. if (tp)
  128. first = tp->prio - 1;
  129. return TC_H_MAJ(first);
  130. }
  131. static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
  132. u32 prio, struct tcf_chain *chain,
  133. struct netlink_ext_ack *extack)
  134. {
  135. struct tcf_proto *tp;
  136. int err;
  137. tp = kzalloc(sizeof(*tp), GFP_KERNEL);
  138. if (!tp)
  139. return ERR_PTR(-ENOBUFS);
  140. tp->ops = tcf_proto_lookup_ops(kind, extack);
  141. if (IS_ERR(tp->ops)) {
  142. err = PTR_ERR(tp->ops);
  143. goto errout;
  144. }
  145. tp->classify = tp->ops->classify;
  146. tp->protocol = protocol;
  147. tp->prio = prio;
  148. tp->chain = chain;
  149. err = tp->ops->init(tp);
  150. if (err) {
  151. module_put(tp->ops->owner);
  152. goto errout;
  153. }
  154. return tp;
  155. errout:
  156. kfree(tp);
  157. return ERR_PTR(err);
  158. }
  159. static void tcf_proto_destroy(struct tcf_proto *tp,
  160. struct netlink_ext_ack *extack)
  161. {
  162. tp->ops->destroy(tp, extack);
  163. module_put(tp->ops->owner);
  164. kfree_rcu(tp, rcu);
  165. }
  166. struct tcf_filter_chain_list_item {
  167. struct list_head list;
  168. tcf_chain_head_change_t *chain_head_change;
  169. void *chain_head_change_priv;
  170. };
  171. static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
  172. u32 chain_index)
  173. {
  174. struct tcf_chain *chain;
  175. chain = kzalloc(sizeof(*chain), GFP_KERNEL);
  176. if (!chain)
  177. return NULL;
  178. list_add_tail(&chain->list, &block->chain_list);
  179. chain->block = block;
  180. chain->index = chain_index;
  181. chain->refcnt = 1;
  182. if (!chain->index)
  183. block->chain0.chain = chain;
  184. return chain;
  185. }
  186. static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
  187. struct tcf_proto *tp_head)
  188. {
  189. if (item->chain_head_change)
  190. item->chain_head_change(tp_head, item->chain_head_change_priv);
  191. }
  192. static void tcf_chain0_head_change(struct tcf_chain *chain,
  193. struct tcf_proto *tp_head)
  194. {
  195. struct tcf_filter_chain_list_item *item;
  196. struct tcf_block *block = chain->block;
  197. if (chain->index)
  198. return;
  199. list_for_each_entry(item, &block->chain0.filter_chain_list, list)
  200. tcf_chain_head_change_item(item, tp_head);
  201. }
  202. static void tcf_chain_destroy(struct tcf_chain *chain)
  203. {
  204. struct tcf_block *block = chain->block;
  205. list_del(&chain->list);
  206. if (!chain->index)
  207. block->chain0.chain = NULL;
  208. kfree(chain);
  209. if (list_empty(&block->chain_list) && !refcount_read(&block->refcnt))
  210. kfree_rcu(block, rcu);
  211. }
  212. static void tcf_chain_hold(struct tcf_chain *chain)
  213. {
  214. ++chain->refcnt;
  215. }
  216. static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
  217. {
  218. /* In case all the references are action references, this
  219. * chain should not be shown to the user.
  220. */
  221. return chain->refcnt == chain->action_refcnt;
  222. }
  223. static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
  224. u32 chain_index)
  225. {
  226. struct tcf_chain *chain;
  227. list_for_each_entry(chain, &block->chain_list, list) {
  228. if (chain->index == chain_index)
  229. return chain;
  230. }
  231. return NULL;
  232. }
  233. static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
  234. u32 seq, u16 flags, int event, bool unicast);
  235. static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
  236. u32 chain_index, bool create,
  237. bool by_act)
  238. {
  239. struct tcf_chain *chain = tcf_chain_lookup(block, chain_index);
  240. if (chain) {
  241. tcf_chain_hold(chain);
  242. } else {
  243. if (!create)
  244. return NULL;
  245. chain = tcf_chain_create(block, chain_index);
  246. if (!chain)
  247. return NULL;
  248. }
  249. if (by_act)
  250. ++chain->action_refcnt;
  251. /* Send notification only in case we got the first
  252. * non-action reference. Until then, the chain acts only as
  253. * a placeholder for actions pointing to it and user ought
  254. * not know about them.
  255. */
  256. if (chain->refcnt - chain->action_refcnt == 1 && !by_act)
  257. tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
  258. RTM_NEWCHAIN, false);
  259. return chain;
  260. }
  261. static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
  262. bool create)
  263. {
  264. return __tcf_chain_get(block, chain_index, create, false);
  265. }
  266. struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
  267. {
  268. return __tcf_chain_get(block, chain_index, true, true);
  269. }
  270. EXPORT_SYMBOL(tcf_chain_get_by_act);
  271. static void tc_chain_tmplt_del(struct tcf_chain *chain);
  272. static void __tcf_chain_put(struct tcf_chain *chain, bool by_act)
  273. {
  274. if (by_act)
  275. chain->action_refcnt--;
  276. chain->refcnt--;
  277. /* The last dropped non-action reference will trigger notification. */
  278. if (chain->refcnt - chain->action_refcnt == 0 && !by_act)
  279. tc_chain_notify(chain, NULL, 0, 0, RTM_DELCHAIN, false);
  280. if (chain->refcnt == 0) {
  281. tc_chain_tmplt_del(chain);
  282. tcf_chain_destroy(chain);
  283. }
  284. }
  285. static void tcf_chain_put(struct tcf_chain *chain)
  286. {
  287. __tcf_chain_put(chain, false);
  288. }
  289. void tcf_chain_put_by_act(struct tcf_chain *chain)
  290. {
  291. __tcf_chain_put(chain, true);
  292. }
  293. EXPORT_SYMBOL(tcf_chain_put_by_act);
  294. static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
  295. {
  296. if (chain->explicitly_created)
  297. tcf_chain_put(chain);
  298. }
  299. static void tcf_chain_flush(struct tcf_chain *chain)
  300. {
  301. struct tcf_proto *tp = rtnl_dereference(chain->filter_chain);
  302. tcf_chain0_head_change(chain, NULL);
  303. while (tp) {
  304. RCU_INIT_POINTER(chain->filter_chain, tp->next);
  305. tcf_proto_destroy(tp, NULL);
  306. tp = rtnl_dereference(chain->filter_chain);
  307. tcf_chain_put(chain);
  308. }
  309. }
  310. static bool tcf_block_offload_in_use(struct tcf_block *block)
  311. {
  312. return block->offloadcnt;
  313. }
  314. static int tcf_block_offload_cmd(struct tcf_block *block,
  315. struct net_device *dev,
  316. struct tcf_block_ext_info *ei,
  317. enum tc_block_command command,
  318. struct netlink_ext_ack *extack)
  319. {
  320. struct tc_block_offload bo = {};
  321. bo.command = command;
  322. bo.binder_type = ei->binder_type;
  323. bo.block = block;
  324. bo.extack = extack;
  325. return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
  326. }
  327. static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
  328. struct tcf_block_ext_info *ei,
  329. struct netlink_ext_ack *extack)
  330. {
  331. struct net_device *dev = q->dev_queue->dev;
  332. int err;
  333. if (!dev->netdev_ops->ndo_setup_tc)
  334. goto no_offload_dev_inc;
  335. /* If tc offload feature is disabled and the block we try to bind
  336. * to already has some offloaded filters, forbid to bind.
  337. */
  338. if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
  339. NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
  340. return -EOPNOTSUPP;
  341. }
  342. err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND, extack);
  343. if (err == -EOPNOTSUPP)
  344. goto no_offload_dev_inc;
  345. return err;
  346. no_offload_dev_inc:
  347. if (tcf_block_offload_in_use(block))
  348. return -EOPNOTSUPP;
  349. block->nooffloaddevcnt++;
  350. return 0;
  351. }
  352. static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
  353. struct tcf_block_ext_info *ei)
  354. {
  355. struct net_device *dev = q->dev_queue->dev;
  356. int err;
  357. if (!dev->netdev_ops->ndo_setup_tc)
  358. goto no_offload_dev_dec;
  359. err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND, NULL);
  360. if (err == -EOPNOTSUPP)
  361. goto no_offload_dev_dec;
  362. return;
  363. no_offload_dev_dec:
  364. WARN_ON(block->nooffloaddevcnt-- == 0);
  365. }
  366. static int
  367. tcf_chain0_head_change_cb_add(struct tcf_block *block,
  368. struct tcf_block_ext_info *ei,
  369. struct netlink_ext_ack *extack)
  370. {
  371. struct tcf_chain *chain0 = block->chain0.chain;
  372. struct tcf_filter_chain_list_item *item;
  373. item = kmalloc(sizeof(*item), GFP_KERNEL);
  374. if (!item) {
  375. NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
  376. return -ENOMEM;
  377. }
  378. item->chain_head_change = ei->chain_head_change;
  379. item->chain_head_change_priv = ei->chain_head_change_priv;
  380. if (chain0 && chain0->filter_chain)
  381. tcf_chain_head_change_item(item, chain0->filter_chain);
  382. list_add(&item->list, &block->chain0.filter_chain_list);
  383. return 0;
  384. }
  385. static void
  386. tcf_chain0_head_change_cb_del(struct tcf_block *block,
  387. struct tcf_block_ext_info *ei)
  388. {
  389. struct tcf_chain *chain0 = block->chain0.chain;
  390. struct tcf_filter_chain_list_item *item;
  391. list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
  392. if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
  393. (item->chain_head_change == ei->chain_head_change &&
  394. item->chain_head_change_priv == ei->chain_head_change_priv)) {
  395. if (chain0)
  396. tcf_chain_head_change_item(item, NULL);
  397. list_del(&item->list);
  398. kfree(item);
  399. return;
  400. }
  401. }
  402. WARN_ON(1);
  403. }
  404. struct tcf_net {
  405. spinlock_t idr_lock; /* Protects idr */
  406. struct idr idr;
  407. };
  408. static unsigned int tcf_net_id;
  409. static int tcf_block_insert(struct tcf_block *block, struct net *net,
  410. struct netlink_ext_ack *extack)
  411. {
  412. struct tcf_net *tn = net_generic(net, tcf_net_id);
  413. int err;
  414. idr_preload(GFP_KERNEL);
  415. spin_lock(&tn->idr_lock);
  416. err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
  417. GFP_NOWAIT);
  418. spin_unlock(&tn->idr_lock);
  419. idr_preload_end();
  420. return err;
  421. }
  422. static void tcf_block_remove(struct tcf_block *block, struct net *net)
  423. {
  424. struct tcf_net *tn = net_generic(net, tcf_net_id);
  425. spin_lock(&tn->idr_lock);
  426. idr_remove(&tn->idr, block->index);
  427. spin_unlock(&tn->idr_lock);
  428. }
  429. static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
  430. u32 block_index,
  431. struct netlink_ext_ack *extack)
  432. {
  433. struct tcf_block *block;
  434. block = kzalloc(sizeof(*block), GFP_KERNEL);
  435. if (!block) {
  436. NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
  437. return ERR_PTR(-ENOMEM);
  438. }
  439. INIT_LIST_HEAD(&block->chain_list);
  440. INIT_LIST_HEAD(&block->cb_list);
  441. INIT_LIST_HEAD(&block->owner_list);
  442. INIT_LIST_HEAD(&block->chain0.filter_chain_list);
  443. refcount_set(&block->refcnt, 1);
  444. block->net = net;
  445. block->index = block_index;
  446. /* Don't store q pointer for blocks which are shared */
  447. if (!tcf_block_shared(block))
  448. block->q = q;
  449. return block;
  450. }
  451. static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
  452. {
  453. struct tcf_net *tn = net_generic(net, tcf_net_id);
  454. return idr_find(&tn->idr, block_index);
  455. }
  456. static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
  457. {
  458. struct tcf_block *block;
  459. rcu_read_lock();
  460. block = tcf_block_lookup(net, block_index);
  461. if (block && !refcount_inc_not_zero(&block->refcnt))
  462. block = NULL;
  463. rcu_read_unlock();
  464. return block;
  465. }
  466. static void tcf_block_flush_all_chains(struct tcf_block *block)
  467. {
  468. struct tcf_chain *chain;
  469. /* Hold a refcnt for all chains, so that they don't disappear
  470. * while we are iterating.
  471. */
  472. list_for_each_entry(chain, &block->chain_list, list)
  473. tcf_chain_hold(chain);
  474. list_for_each_entry(chain, &block->chain_list, list)
  475. tcf_chain_flush(chain);
  476. }
  477. static void tcf_block_put_all_chains(struct tcf_block *block)
  478. {
  479. struct tcf_chain *chain, *tmp;
  480. /* At this point, all the chains should have refcnt >= 1. */
  481. list_for_each_entry_safe(chain, tmp, &block->chain_list, list) {
  482. tcf_chain_put_explicitly_created(chain);
  483. tcf_chain_put(chain);
  484. }
  485. }
  486. static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
  487. struct tcf_block_ext_info *ei)
  488. {
  489. if (refcount_dec_and_test(&block->refcnt)) {
  490. /* Flushing/putting all chains will cause the block to be
  491. * deallocated when last chain is freed. However, if chain_list
  492. * is empty, block has to be manually deallocated. After block
  493. * reference counter reached 0, it is no longer possible to
  494. * increment it or add new chains to block.
  495. */
  496. bool free_block = list_empty(&block->chain_list);
  497. if (tcf_block_shared(block))
  498. tcf_block_remove(block, block->net);
  499. if (!free_block)
  500. tcf_block_flush_all_chains(block);
  501. if (q)
  502. tcf_block_offload_unbind(block, q, ei);
  503. if (free_block)
  504. kfree_rcu(block, rcu);
  505. else
  506. tcf_block_put_all_chains(block);
  507. } else if (q) {
  508. tcf_block_offload_unbind(block, q, ei);
  509. }
  510. }
  511. static void tcf_block_refcnt_put(struct tcf_block *block)
  512. {
  513. __tcf_block_put(block, NULL, NULL);
  514. }
  515. /* Find tcf block.
  516. * Set q, parent, cl when appropriate.
  517. */
  518. static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
  519. u32 *parent, unsigned long *cl,
  520. int ifindex, u32 block_index,
  521. struct netlink_ext_ack *extack)
  522. {
  523. struct tcf_block *block;
  524. int err = 0;
  525. if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
  526. block = tcf_block_refcnt_get(net, block_index);
  527. if (!block) {
  528. NL_SET_ERR_MSG(extack, "Block of given index was not found");
  529. return ERR_PTR(-EINVAL);
  530. }
  531. } else {
  532. const struct Qdisc_class_ops *cops;
  533. struct net_device *dev;
  534. rcu_read_lock();
  535. /* Find link */
  536. dev = dev_get_by_index_rcu(net, ifindex);
  537. if (!dev) {
  538. rcu_read_unlock();
  539. return ERR_PTR(-ENODEV);
  540. }
  541. /* Find qdisc */
  542. if (!*parent) {
  543. *q = dev->qdisc;
  544. *parent = (*q)->handle;
  545. } else {
  546. *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
  547. if (!*q) {
  548. NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
  549. err = -EINVAL;
  550. goto errout_rcu;
  551. }
  552. }
  553. *q = qdisc_refcount_inc_nz(*q);
  554. if (!*q) {
  555. NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
  556. err = -EINVAL;
  557. goto errout_rcu;
  558. }
  559. /* Is it classful? */
  560. cops = (*q)->ops->cl_ops;
  561. if (!cops) {
  562. NL_SET_ERR_MSG(extack, "Qdisc not classful");
  563. err = -EINVAL;
  564. goto errout_rcu;
  565. }
  566. if (!cops->tcf_block) {
  567. NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
  568. err = -EOPNOTSUPP;
  569. goto errout_rcu;
  570. }
  571. /* At this point we know that qdisc is not noop_qdisc,
  572. * which means that qdisc holds a reference to net_device
  573. * and we hold a reference to qdisc, so it is safe to release
  574. * rcu read lock.
  575. */
  576. rcu_read_unlock();
  577. /* Do we search for filter, attached to class? */
  578. if (TC_H_MIN(*parent)) {
  579. *cl = cops->find(*q, *parent);
  580. if (*cl == 0) {
  581. NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
  582. err = -ENOENT;
  583. goto errout_qdisc;
  584. }
  585. }
  586. /* And the last stroke */
  587. block = cops->tcf_block(*q, *cl, extack);
  588. if (!block) {
  589. err = -EINVAL;
  590. goto errout_qdisc;
  591. }
  592. if (tcf_block_shared(block)) {
  593. NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
  594. err = -EOPNOTSUPP;
  595. goto errout_qdisc;
  596. }
  597. /* Always take reference to block in order to support execution
  598. * of rules update path of cls API without rtnl lock. Caller
  599. * must release block when it is finished using it. 'if' block
  600. * of this conditional obtain reference to block by calling
  601. * tcf_block_refcnt_get().
  602. */
  603. refcount_inc(&block->refcnt);
  604. }
  605. return block;
  606. errout_rcu:
  607. rcu_read_unlock();
  608. errout_qdisc:
  609. if (*q) {
  610. qdisc_put(*q);
  611. *q = NULL;
  612. }
  613. return ERR_PTR(err);
  614. }
  615. static void tcf_block_release(struct Qdisc *q, struct tcf_block *block)
  616. {
  617. if (!IS_ERR_OR_NULL(block))
  618. tcf_block_refcnt_put(block);
  619. if (q)
  620. qdisc_put(q);
  621. }
  622. struct tcf_block_owner_item {
  623. struct list_head list;
  624. struct Qdisc *q;
  625. enum tcf_block_binder_type binder_type;
  626. };
  627. static void
  628. tcf_block_owner_netif_keep_dst(struct tcf_block *block,
  629. struct Qdisc *q,
  630. enum tcf_block_binder_type binder_type)
  631. {
  632. if (block->keep_dst &&
  633. binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
  634. binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
  635. netif_keep_dst(qdisc_dev(q));
  636. }
  637. void tcf_block_netif_keep_dst(struct tcf_block *block)
  638. {
  639. struct tcf_block_owner_item *item;
  640. block->keep_dst = true;
  641. list_for_each_entry(item, &block->owner_list, list)
  642. tcf_block_owner_netif_keep_dst(block, item->q,
  643. item->binder_type);
  644. }
  645. EXPORT_SYMBOL(tcf_block_netif_keep_dst);
  646. static int tcf_block_owner_add(struct tcf_block *block,
  647. struct Qdisc *q,
  648. enum tcf_block_binder_type binder_type)
  649. {
  650. struct tcf_block_owner_item *item;
  651. item = kmalloc(sizeof(*item), GFP_KERNEL);
  652. if (!item)
  653. return -ENOMEM;
  654. item->q = q;
  655. item->binder_type = binder_type;
  656. list_add(&item->list, &block->owner_list);
  657. return 0;
  658. }
  659. static void tcf_block_owner_del(struct tcf_block *block,
  660. struct Qdisc *q,
  661. enum tcf_block_binder_type binder_type)
  662. {
  663. struct tcf_block_owner_item *item;
  664. list_for_each_entry(item, &block->owner_list, list) {
  665. if (item->q == q && item->binder_type == binder_type) {
  666. list_del(&item->list);
  667. kfree(item);
  668. return;
  669. }
  670. }
  671. WARN_ON(1);
  672. }
  673. int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
  674. struct tcf_block_ext_info *ei,
  675. struct netlink_ext_ack *extack)
  676. {
  677. struct net *net = qdisc_net(q);
  678. struct tcf_block *block = NULL;
  679. int err;
  680. if (ei->block_index)
  681. /* block_index not 0 means the shared block is requested */
  682. block = tcf_block_refcnt_get(net, ei->block_index);
  683. if (!block) {
  684. block = tcf_block_create(net, q, ei->block_index, extack);
  685. if (IS_ERR(block))
  686. return PTR_ERR(block);
  687. if (tcf_block_shared(block)) {
  688. err = tcf_block_insert(block, net, extack);
  689. if (err)
  690. goto err_block_insert;
  691. }
  692. }
  693. err = tcf_block_owner_add(block, q, ei->binder_type);
  694. if (err)
  695. goto err_block_owner_add;
  696. tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
  697. err = tcf_chain0_head_change_cb_add(block, ei, extack);
  698. if (err)
  699. goto err_chain0_head_change_cb_add;
  700. err = tcf_block_offload_bind(block, q, ei, extack);
  701. if (err)
  702. goto err_block_offload_bind;
  703. *p_block = block;
  704. return 0;
  705. err_block_offload_bind:
  706. tcf_chain0_head_change_cb_del(block, ei);
  707. err_chain0_head_change_cb_add:
  708. tcf_block_owner_del(block, q, ei->binder_type);
  709. err_block_owner_add:
  710. err_block_insert:
  711. tcf_block_refcnt_put(block);
  712. return err;
  713. }
  714. EXPORT_SYMBOL(tcf_block_get_ext);
  715. static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
  716. {
  717. struct tcf_proto __rcu **p_filter_chain = priv;
  718. rcu_assign_pointer(*p_filter_chain, tp_head);
  719. }
  720. int tcf_block_get(struct tcf_block **p_block,
  721. struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
  722. struct netlink_ext_ack *extack)
  723. {
  724. struct tcf_block_ext_info ei = {
  725. .chain_head_change = tcf_chain_head_change_dflt,
  726. .chain_head_change_priv = p_filter_chain,
  727. };
  728. WARN_ON(!p_filter_chain);
  729. return tcf_block_get_ext(p_block, q, &ei, extack);
  730. }
  731. EXPORT_SYMBOL(tcf_block_get);
  732. /* XXX: Standalone actions are not allowed to jump to any chain, and bound
  733. * actions should be all removed after flushing.
  734. */
  735. void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
  736. struct tcf_block_ext_info *ei)
  737. {
  738. if (!block)
  739. return;
  740. tcf_chain0_head_change_cb_del(block, ei);
  741. tcf_block_owner_del(block, q, ei->binder_type);
  742. __tcf_block_put(block, q, ei);
  743. }
  744. EXPORT_SYMBOL(tcf_block_put_ext);
  745. void tcf_block_put(struct tcf_block *block)
  746. {
  747. struct tcf_block_ext_info ei = {0, };
  748. if (!block)
  749. return;
  750. tcf_block_put_ext(block, block->q, &ei);
  751. }
  752. EXPORT_SYMBOL(tcf_block_put);
  753. struct tcf_block_cb {
  754. struct list_head list;
  755. tc_setup_cb_t *cb;
  756. void *cb_ident;
  757. void *cb_priv;
  758. unsigned int refcnt;
  759. };
  760. void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
  761. {
  762. return block_cb->cb_priv;
  763. }
  764. EXPORT_SYMBOL(tcf_block_cb_priv);
  765. struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
  766. tc_setup_cb_t *cb, void *cb_ident)
  767. { struct tcf_block_cb *block_cb;
  768. list_for_each_entry(block_cb, &block->cb_list, list)
  769. if (block_cb->cb == cb && block_cb->cb_ident == cb_ident)
  770. return block_cb;
  771. return NULL;
  772. }
  773. EXPORT_SYMBOL(tcf_block_cb_lookup);
  774. void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
  775. {
  776. block_cb->refcnt++;
  777. }
  778. EXPORT_SYMBOL(tcf_block_cb_incref);
  779. unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
  780. {
  781. return --block_cb->refcnt;
  782. }
  783. EXPORT_SYMBOL(tcf_block_cb_decref);
  784. static int
  785. tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb,
  786. void *cb_priv, bool add, bool offload_in_use,
  787. struct netlink_ext_ack *extack)
  788. {
  789. struct tcf_chain *chain;
  790. struct tcf_proto *tp;
  791. int err;
  792. list_for_each_entry(chain, &block->chain_list, list) {
  793. for (tp = rtnl_dereference(chain->filter_chain); tp;
  794. tp = rtnl_dereference(tp->next)) {
  795. if (tp->ops->reoffload) {
  796. err = tp->ops->reoffload(tp, add, cb, cb_priv,
  797. extack);
  798. if (err && add)
  799. goto err_playback_remove;
  800. } else if (add && offload_in_use) {
  801. err = -EOPNOTSUPP;
  802. NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
  803. goto err_playback_remove;
  804. }
  805. }
  806. }
  807. return 0;
  808. err_playback_remove:
  809. tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
  810. extack);
  811. return err;
  812. }
  813. struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
  814. tc_setup_cb_t *cb, void *cb_ident,
  815. void *cb_priv,
  816. struct netlink_ext_ack *extack)
  817. {
  818. struct tcf_block_cb *block_cb;
  819. int err;
  820. /* Replay any already present rules */
  821. err = tcf_block_playback_offloads(block, cb, cb_priv, true,
  822. tcf_block_offload_in_use(block),
  823. extack);
  824. if (err)
  825. return ERR_PTR(err);
  826. block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
  827. if (!block_cb)
  828. return ERR_PTR(-ENOMEM);
  829. block_cb->cb = cb;
  830. block_cb->cb_ident = cb_ident;
  831. block_cb->cb_priv = cb_priv;
  832. list_add(&block_cb->list, &block->cb_list);
  833. return block_cb;
  834. }
  835. EXPORT_SYMBOL(__tcf_block_cb_register);
  836. int tcf_block_cb_register(struct tcf_block *block,
  837. tc_setup_cb_t *cb, void *cb_ident,
  838. void *cb_priv, struct netlink_ext_ack *extack)
  839. {
  840. struct tcf_block_cb *block_cb;
  841. block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv,
  842. extack);
  843. return PTR_ERR_OR_ZERO(block_cb);
  844. }
  845. EXPORT_SYMBOL(tcf_block_cb_register);
  846. void __tcf_block_cb_unregister(struct tcf_block *block,
  847. struct tcf_block_cb *block_cb)
  848. {
  849. tcf_block_playback_offloads(block, block_cb->cb, block_cb->cb_priv,
  850. false, tcf_block_offload_in_use(block),
  851. NULL);
  852. list_del(&block_cb->list);
  853. kfree(block_cb);
  854. }
  855. EXPORT_SYMBOL(__tcf_block_cb_unregister);
  856. void tcf_block_cb_unregister(struct tcf_block *block,
  857. tc_setup_cb_t *cb, void *cb_ident)
  858. {
  859. struct tcf_block_cb *block_cb;
  860. block_cb = tcf_block_cb_lookup(block, cb, cb_ident);
  861. if (!block_cb)
  862. return;
  863. __tcf_block_cb_unregister(block, block_cb);
  864. }
  865. EXPORT_SYMBOL(tcf_block_cb_unregister);
  866. static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type,
  867. void *type_data, bool err_stop)
  868. {
  869. struct tcf_block_cb *block_cb;
  870. int ok_count = 0;
  871. int err;
  872. /* Make sure all netdevs sharing this block are offload-capable. */
  873. if (block->nooffloaddevcnt && err_stop)
  874. return -EOPNOTSUPP;
  875. list_for_each_entry(block_cb, &block->cb_list, list) {
  876. err = block_cb->cb(type, type_data, block_cb->cb_priv);
  877. if (err) {
  878. if (err_stop)
  879. return err;
  880. } else {
  881. ok_count++;
  882. }
  883. }
  884. return ok_count;
  885. }
  886. /* Main classifier routine: scans classifier chain attached
  887. * to this qdisc, (optionally) tests for protocol and asks
  888. * specific classifiers.
  889. */
  890. int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  891. struct tcf_result *res, bool compat_mode)
  892. {
  893. __be16 protocol = tc_skb_protocol(skb);
  894. #ifdef CONFIG_NET_CLS_ACT
  895. const int max_reclassify_loop = 4;
  896. const struct tcf_proto *orig_tp = tp;
  897. const struct tcf_proto *first_tp;
  898. int limit = 0;
  899. reclassify:
  900. #endif
  901. for (; tp; tp = rcu_dereference_bh(tp->next)) {
  902. int err;
  903. if (tp->protocol != protocol &&
  904. tp->protocol != htons(ETH_P_ALL))
  905. continue;
  906. err = tp->classify(skb, tp, res);
  907. #ifdef CONFIG_NET_CLS_ACT
  908. if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
  909. first_tp = orig_tp;
  910. goto reset;
  911. } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
  912. first_tp = res->goto_tp;
  913. goto reset;
  914. }
  915. #endif
  916. if (err >= 0)
  917. return err;
  918. }
  919. return TC_ACT_UNSPEC; /* signal: continue lookup */
  920. #ifdef CONFIG_NET_CLS_ACT
  921. reset:
  922. if (unlikely(limit++ >= max_reclassify_loop)) {
  923. net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
  924. tp->chain->block->index,
  925. tp->prio & 0xffff,
  926. ntohs(tp->protocol));
  927. return TC_ACT_SHOT;
  928. }
  929. tp = first_tp;
  930. protocol = tc_skb_protocol(skb);
  931. goto reclassify;
  932. #endif
  933. }
  934. EXPORT_SYMBOL(tcf_classify);
  935. struct tcf_chain_info {
  936. struct tcf_proto __rcu **pprev;
  937. struct tcf_proto __rcu *next;
  938. };
  939. static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info)
  940. {
  941. return rtnl_dereference(*chain_info->pprev);
  942. }
  943. static void tcf_chain_tp_insert(struct tcf_chain *chain,
  944. struct tcf_chain_info *chain_info,
  945. struct tcf_proto *tp)
  946. {
  947. if (*chain_info->pprev == chain->filter_chain)
  948. tcf_chain0_head_change(chain, tp);
  949. RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info));
  950. rcu_assign_pointer(*chain_info->pprev, tp);
  951. tcf_chain_hold(chain);
  952. }
  953. static void tcf_chain_tp_remove(struct tcf_chain *chain,
  954. struct tcf_chain_info *chain_info,
  955. struct tcf_proto *tp)
  956. {
  957. struct tcf_proto *next = rtnl_dereference(chain_info->next);
  958. if (tp == chain->filter_chain)
  959. tcf_chain0_head_change(chain, next);
  960. RCU_INIT_POINTER(*chain_info->pprev, next);
  961. tcf_chain_put(chain);
  962. }
  963. static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
  964. struct tcf_chain_info *chain_info,
  965. u32 protocol, u32 prio,
  966. bool prio_allocate)
  967. {
  968. struct tcf_proto **pprev;
  969. struct tcf_proto *tp;
  970. /* Check the chain for existence of proto-tcf with this priority */
  971. for (pprev = &chain->filter_chain;
  972. (tp = rtnl_dereference(*pprev)); pprev = &tp->next) {
  973. if (tp->prio >= prio) {
  974. if (tp->prio == prio) {
  975. if (prio_allocate ||
  976. (tp->protocol != protocol && protocol))
  977. return ERR_PTR(-EINVAL);
  978. } else {
  979. tp = NULL;
  980. }
  981. break;
  982. }
  983. }
  984. chain_info->pprev = pprev;
  985. chain_info->next = tp ? tp->next : NULL;
  986. return tp;
  987. }
  988. static int tcf_fill_node(struct net *net, struct sk_buff *skb,
  989. struct tcf_proto *tp, struct tcf_block *block,
  990. struct Qdisc *q, u32 parent, void *fh,
  991. u32 portid, u32 seq, u16 flags, int event)
  992. {
  993. struct tcmsg *tcm;
  994. struct nlmsghdr *nlh;
  995. unsigned char *b = skb_tail_pointer(skb);
  996. nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
  997. if (!nlh)
  998. goto out_nlmsg_trim;
  999. tcm = nlmsg_data(nlh);
  1000. tcm->tcm_family = AF_UNSPEC;
  1001. tcm->tcm__pad1 = 0;
  1002. tcm->tcm__pad2 = 0;
  1003. if (q) {
  1004. tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
  1005. tcm->tcm_parent = parent;
  1006. } else {
  1007. tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
  1008. tcm->tcm_block_index = block->index;
  1009. }
  1010. tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
  1011. if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
  1012. goto nla_put_failure;
  1013. if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
  1014. goto nla_put_failure;
  1015. if (!fh) {
  1016. tcm->tcm_handle = 0;
  1017. } else {
  1018. if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0)
  1019. goto nla_put_failure;
  1020. }
  1021. nlh->nlmsg_len = skb_tail_pointer(skb) - b;
  1022. return skb->len;
  1023. out_nlmsg_trim:
  1024. nla_put_failure:
  1025. nlmsg_trim(skb, b);
  1026. return -1;
  1027. }
  1028. static int tfilter_notify(struct net *net, struct sk_buff *oskb,
  1029. struct nlmsghdr *n, struct tcf_proto *tp,
  1030. struct tcf_block *block, struct Qdisc *q,
  1031. u32 parent, void *fh, int event, bool unicast)
  1032. {
  1033. struct sk_buff *skb;
  1034. u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
  1035. skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
  1036. if (!skb)
  1037. return -ENOBUFS;
  1038. if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
  1039. n->nlmsg_seq, n->nlmsg_flags, event) <= 0) {
  1040. kfree_skb(skb);
  1041. return -EINVAL;
  1042. }
  1043. if (unicast)
  1044. return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
  1045. return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
  1046. n->nlmsg_flags & NLM_F_ECHO);
  1047. }
  1048. static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
  1049. struct nlmsghdr *n, struct tcf_proto *tp,
  1050. struct tcf_block *block, struct Qdisc *q,
  1051. u32 parent, void *fh, bool unicast, bool *last,
  1052. struct netlink_ext_ack *extack)
  1053. {
  1054. struct sk_buff *skb;
  1055. u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
  1056. int err;
  1057. skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
  1058. if (!skb)
  1059. return -ENOBUFS;
  1060. if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
  1061. n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
  1062. NL_SET_ERR_MSG(extack, "Failed to build del event notification");
  1063. kfree_skb(skb);
  1064. return -EINVAL;
  1065. }
  1066. err = tp->ops->delete(tp, fh, last, extack);
  1067. if (err) {
  1068. kfree_skb(skb);
  1069. return err;
  1070. }
  1071. if (unicast)
  1072. return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
  1073. err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
  1074. n->nlmsg_flags & NLM_F_ECHO);
  1075. if (err < 0)
  1076. NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
  1077. return err;
  1078. }
  1079. static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
  1080. struct tcf_block *block, struct Qdisc *q,
  1081. u32 parent, struct nlmsghdr *n,
  1082. struct tcf_chain *chain, int event)
  1083. {
  1084. struct tcf_proto *tp;
  1085. for (tp = rtnl_dereference(chain->filter_chain);
  1086. tp; tp = rtnl_dereference(tp->next))
  1087. tfilter_notify(net, oskb, n, tp, block,
  1088. q, parent, NULL, event, false);
  1089. }
  1090. static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
  1091. struct netlink_ext_ack *extack)
  1092. {
  1093. struct net *net = sock_net(skb->sk);
  1094. struct nlattr *tca[TCA_MAX + 1];
  1095. struct tcmsg *t;
  1096. u32 protocol;
  1097. u32 prio;
  1098. bool prio_allocate;
  1099. u32 parent;
  1100. u32 chain_index;
  1101. struct Qdisc *q = NULL;
  1102. struct tcf_chain_info chain_info;
  1103. struct tcf_chain *chain = NULL;
  1104. struct tcf_block *block;
  1105. struct tcf_proto *tp;
  1106. unsigned long cl;
  1107. void *fh;
  1108. int err;
  1109. int tp_created;
  1110. if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
  1111. return -EPERM;
  1112. replay:
  1113. tp_created = 0;
  1114. err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
  1115. if (err < 0)
  1116. return err;
  1117. t = nlmsg_data(n);
  1118. protocol = TC_H_MIN(t->tcm_info);
  1119. prio = TC_H_MAJ(t->tcm_info);
  1120. prio_allocate = false;
  1121. parent = t->tcm_parent;
  1122. cl = 0;
  1123. if (prio == 0) {
  1124. /* If no priority is provided by the user,
  1125. * we allocate one.
  1126. */
  1127. if (n->nlmsg_flags & NLM_F_CREATE) {
  1128. prio = TC_H_MAKE(0x80000000U, 0U);
  1129. prio_allocate = true;
  1130. } else {
  1131. NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
  1132. return -ENOENT;
  1133. }
  1134. }
  1135. /* Find head of filter chain. */
  1136. block = tcf_block_find(net, &q, &parent, &cl,
  1137. t->tcm_ifindex, t->tcm_block_index, extack);
  1138. if (IS_ERR(block)) {
  1139. err = PTR_ERR(block);
  1140. goto errout;
  1141. }
  1142. chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
  1143. if (chain_index > TC_ACT_EXT_VAL_MASK) {
  1144. NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
  1145. err = -EINVAL;
  1146. goto errout;
  1147. }
  1148. chain = tcf_chain_get(block, chain_index, true);
  1149. if (!chain) {
  1150. NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
  1151. err = -ENOMEM;
  1152. goto errout;
  1153. }
  1154. tp = tcf_chain_tp_find(chain, &chain_info, protocol,
  1155. prio, prio_allocate);
  1156. if (IS_ERR(tp)) {
  1157. NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
  1158. err = PTR_ERR(tp);
  1159. goto errout;
  1160. }
  1161. if (tp == NULL) {
  1162. /* Proto-tcf does not exist, create new one */
  1163. if (tca[TCA_KIND] == NULL || !protocol) {
  1164. NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
  1165. err = -EINVAL;
  1166. goto errout;
  1167. }
  1168. if (!(n->nlmsg_flags & NLM_F_CREATE)) {
  1169. NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
  1170. err = -ENOENT;
  1171. goto errout;
  1172. }
  1173. if (prio_allocate)
  1174. prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info));
  1175. tp = tcf_proto_create(nla_data(tca[TCA_KIND]),
  1176. protocol, prio, chain, extack);
  1177. if (IS_ERR(tp)) {
  1178. err = PTR_ERR(tp);
  1179. goto errout;
  1180. }
  1181. tp_created = 1;
  1182. } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
  1183. NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
  1184. err = -EINVAL;
  1185. goto errout;
  1186. }
  1187. fh = tp->ops->get(tp, t->tcm_handle);
  1188. if (!fh) {
  1189. if (!(n->nlmsg_flags & NLM_F_CREATE)) {
  1190. NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
  1191. err = -ENOENT;
  1192. goto errout;
  1193. }
  1194. } else if (n->nlmsg_flags & NLM_F_EXCL) {
  1195. NL_SET_ERR_MSG(extack, "Filter already exists");
  1196. err = -EEXIST;
  1197. goto errout;
  1198. }
  1199. if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
  1200. NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
  1201. err = -EINVAL;
  1202. goto errout;
  1203. }
  1204. err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
  1205. n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
  1206. extack);
  1207. if (err == 0) {
  1208. if (tp_created)
  1209. tcf_chain_tp_insert(chain, &chain_info, tp);
  1210. tfilter_notify(net, skb, n, tp, block, q, parent, fh,
  1211. RTM_NEWTFILTER, false);
  1212. } else {
  1213. if (tp_created)
  1214. tcf_proto_destroy(tp, NULL);
  1215. }
  1216. errout:
  1217. if (chain)
  1218. tcf_chain_put(chain);
  1219. tcf_block_release(q, block);
  1220. if (err == -EAGAIN)
  1221. /* Replay the request. */
  1222. goto replay;
  1223. return err;
  1224. }
  1225. static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
  1226. struct netlink_ext_ack *extack)
  1227. {
  1228. struct net *net = sock_net(skb->sk);
  1229. struct nlattr *tca[TCA_MAX + 1];
  1230. struct tcmsg *t;
  1231. u32 protocol;
  1232. u32 prio;
  1233. u32 parent;
  1234. u32 chain_index;
  1235. struct Qdisc *q = NULL;
  1236. struct tcf_chain_info chain_info;
  1237. struct tcf_chain *chain = NULL;
  1238. struct tcf_block *block;
  1239. struct tcf_proto *tp = NULL;
  1240. unsigned long cl = 0;
  1241. void *fh = NULL;
  1242. int err;
  1243. if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
  1244. return -EPERM;
  1245. err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
  1246. if (err < 0)
  1247. return err;
  1248. t = nlmsg_data(n);
  1249. protocol = TC_H_MIN(t->tcm_info);
  1250. prio = TC_H_MAJ(t->tcm_info);
  1251. parent = t->tcm_parent;
  1252. if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
  1253. NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
  1254. return -ENOENT;
  1255. }
  1256. /* Find head of filter chain. */
  1257. block = tcf_block_find(net, &q, &parent, &cl,
  1258. t->tcm_ifindex, t->tcm_block_index, extack);
  1259. if (IS_ERR(block)) {
  1260. err = PTR_ERR(block);
  1261. goto errout;
  1262. }
  1263. chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
  1264. if (chain_index > TC_ACT_EXT_VAL_MASK) {
  1265. NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
  1266. err = -EINVAL;
  1267. goto errout;
  1268. }
  1269. chain = tcf_chain_get(block, chain_index, false);
  1270. if (!chain) {
  1271. /* User requested flush on non-existent chain. Nothing to do,
  1272. * so just return success.
  1273. */
  1274. if (prio == 0) {
  1275. err = 0;
  1276. goto errout;
  1277. }
  1278. NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
  1279. err = -ENOENT;
  1280. goto errout;
  1281. }
  1282. if (prio == 0) {
  1283. tfilter_notify_chain(net, skb, block, q, parent, n,
  1284. chain, RTM_DELTFILTER);
  1285. tcf_chain_flush(chain);
  1286. err = 0;
  1287. goto errout;
  1288. }
  1289. tp = tcf_chain_tp_find(chain, &chain_info, protocol,
  1290. prio, false);
  1291. if (!tp || IS_ERR(tp)) {
  1292. NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
  1293. err = tp ? PTR_ERR(tp) : -ENOENT;
  1294. goto errout;
  1295. } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
  1296. NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
  1297. err = -EINVAL;
  1298. goto errout;
  1299. }
  1300. fh = tp->ops->get(tp, t->tcm_handle);
  1301. if (!fh) {
  1302. if (t->tcm_handle == 0) {
  1303. tcf_chain_tp_remove(chain, &chain_info, tp);
  1304. tfilter_notify(net, skb, n, tp, block, q, parent, fh,
  1305. RTM_DELTFILTER, false);
  1306. tcf_proto_destroy(tp, extack);
  1307. err = 0;
  1308. } else {
  1309. NL_SET_ERR_MSG(extack, "Specified filter handle not found");
  1310. err = -ENOENT;
  1311. }
  1312. } else {
  1313. bool last;
  1314. err = tfilter_del_notify(net, skb, n, tp, block,
  1315. q, parent, fh, false, &last,
  1316. extack);
  1317. if (err)
  1318. goto errout;
  1319. if (last) {
  1320. tcf_chain_tp_remove(chain, &chain_info, tp);
  1321. tcf_proto_destroy(tp, extack);
  1322. }
  1323. }
  1324. errout:
  1325. if (chain)
  1326. tcf_chain_put(chain);
  1327. tcf_block_release(q, block);
  1328. return err;
  1329. }
  1330. static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
  1331. struct netlink_ext_ack *extack)
  1332. {
  1333. struct net *net = sock_net(skb->sk);
  1334. struct nlattr *tca[TCA_MAX + 1];
  1335. struct tcmsg *t;
  1336. u32 protocol;
  1337. u32 prio;
  1338. u32 parent;
  1339. u32 chain_index;
  1340. struct Qdisc *q = NULL;
  1341. struct tcf_chain_info chain_info;
  1342. struct tcf_chain *chain = NULL;
  1343. struct tcf_block *block;
  1344. struct tcf_proto *tp = NULL;
  1345. unsigned long cl = 0;
  1346. void *fh = NULL;
  1347. int err;
  1348. err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
  1349. if (err < 0)
  1350. return err;
  1351. t = nlmsg_data(n);
  1352. protocol = TC_H_MIN(t->tcm_info);
  1353. prio = TC_H_MAJ(t->tcm_info);
  1354. parent = t->tcm_parent;
  1355. if (prio == 0) {
  1356. NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
  1357. return -ENOENT;
  1358. }
  1359. /* Find head of filter chain. */
  1360. block = tcf_block_find(net, &q, &parent, &cl,
  1361. t->tcm_ifindex, t->tcm_block_index, extack);
  1362. if (IS_ERR(block)) {
  1363. err = PTR_ERR(block);
  1364. goto errout;
  1365. }
  1366. chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
  1367. if (chain_index > TC_ACT_EXT_VAL_MASK) {
  1368. NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
  1369. err = -EINVAL;
  1370. goto errout;
  1371. }
  1372. chain = tcf_chain_get(block, chain_index, false);
  1373. if (!chain) {
  1374. NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
  1375. err = -EINVAL;
  1376. goto errout;
  1377. }
  1378. tp = tcf_chain_tp_find(chain, &chain_info, protocol,
  1379. prio, false);
  1380. if (!tp || IS_ERR(tp)) {
  1381. NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
  1382. err = tp ? PTR_ERR(tp) : -ENOENT;
  1383. goto errout;
  1384. } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
  1385. NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
  1386. err = -EINVAL;
  1387. goto errout;
  1388. }
  1389. fh = tp->ops->get(tp, t->tcm_handle);
  1390. if (!fh) {
  1391. NL_SET_ERR_MSG(extack, "Specified filter handle not found");
  1392. err = -ENOENT;
  1393. } else {
  1394. err = tfilter_notify(net, skb, n, tp, block, q, parent,
  1395. fh, RTM_NEWTFILTER, true);
  1396. if (err < 0)
  1397. NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
  1398. }
  1399. errout:
  1400. if (chain)
  1401. tcf_chain_put(chain);
  1402. tcf_block_release(q, block);
  1403. return err;
  1404. }
  1405. struct tcf_dump_args {
  1406. struct tcf_walker w;
  1407. struct sk_buff *skb;
  1408. struct netlink_callback *cb;
  1409. struct tcf_block *block;
  1410. struct Qdisc *q;
  1411. u32 parent;
  1412. };
  1413. static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
  1414. {
  1415. struct tcf_dump_args *a = (void *)arg;
  1416. struct net *net = sock_net(a->skb->sk);
  1417. return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
  1418. n, NETLINK_CB(a->cb->skb).portid,
  1419. a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
  1420. RTM_NEWTFILTER);
  1421. }
  1422. static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
  1423. struct sk_buff *skb, struct netlink_callback *cb,
  1424. long index_start, long *p_index)
  1425. {
  1426. struct net *net = sock_net(skb->sk);
  1427. struct tcf_block *block = chain->block;
  1428. struct tcmsg *tcm = nlmsg_data(cb->nlh);
  1429. struct tcf_dump_args arg;
  1430. struct tcf_proto *tp;
  1431. for (tp = rtnl_dereference(chain->filter_chain);
  1432. tp; tp = rtnl_dereference(tp->next), (*p_index)++) {
  1433. if (*p_index < index_start)
  1434. continue;
  1435. if (TC_H_MAJ(tcm->tcm_info) &&
  1436. TC_H_MAJ(tcm->tcm_info) != tp->prio)
  1437. continue;
  1438. if (TC_H_MIN(tcm->tcm_info) &&
  1439. TC_H_MIN(tcm->tcm_info) != tp->protocol)
  1440. continue;
  1441. if (*p_index > index_start)
  1442. memset(&cb->args[1], 0,
  1443. sizeof(cb->args) - sizeof(cb->args[0]));
  1444. if (cb->args[1] == 0) {
  1445. if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
  1446. NETLINK_CB(cb->skb).portid,
  1447. cb->nlh->nlmsg_seq, NLM_F_MULTI,
  1448. RTM_NEWTFILTER) <= 0)
  1449. return false;
  1450. cb->args[1] = 1;
  1451. }
  1452. if (!tp->ops->walk)
  1453. continue;
  1454. arg.w.fn = tcf_node_dump;
  1455. arg.skb = skb;
  1456. arg.cb = cb;
  1457. arg.block = block;
  1458. arg.q = q;
  1459. arg.parent = parent;
  1460. arg.w.stop = 0;
  1461. arg.w.skip = cb->args[1] - 1;
  1462. arg.w.count = 0;
  1463. arg.w.cookie = cb->args[2];
  1464. tp->ops->walk(tp, &arg.w);
  1465. cb->args[2] = arg.w.cookie;
  1466. cb->args[1] = arg.w.count + 1;
  1467. if (arg.w.stop)
  1468. return false;
  1469. }
  1470. return true;
  1471. }
  1472. /* called with RTNL */
  1473. static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
  1474. {
  1475. struct net *net = sock_net(skb->sk);
  1476. struct nlattr *tca[TCA_MAX + 1];
  1477. struct Qdisc *q = NULL;
  1478. struct tcf_block *block;
  1479. struct tcf_chain *chain;
  1480. struct tcmsg *tcm = nlmsg_data(cb->nlh);
  1481. long index_start;
  1482. long index;
  1483. u32 parent;
  1484. int err;
  1485. if (nlmsg_len(cb->nlh) < sizeof(*tcm))
  1486. return skb->len;
  1487. err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL,
  1488. cb->extack);
  1489. if (err)
  1490. return err;
  1491. if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
  1492. block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
  1493. if (!block)
  1494. goto out;
  1495. /* If we work with block index, q is NULL and parent value
  1496. * will never be used in the following code. The check
  1497. * in tcf_fill_node prevents it. However, compiler does not
  1498. * see that far, so set parent to zero to silence the warning
  1499. * about parent being uninitialized.
  1500. */
  1501. parent = 0;
  1502. } else {
  1503. const struct Qdisc_class_ops *cops;
  1504. struct net_device *dev;
  1505. unsigned long cl = 0;
  1506. dev = __dev_get_by_index(net, tcm->tcm_ifindex);
  1507. if (!dev)
  1508. return skb->len;
  1509. parent = tcm->tcm_parent;
  1510. if (!parent) {
  1511. q = dev->qdisc;
  1512. parent = q->handle;
  1513. } else {
  1514. q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
  1515. }
  1516. if (!q)
  1517. goto out;
  1518. cops = q->ops->cl_ops;
  1519. if (!cops)
  1520. goto out;
  1521. if (!cops->tcf_block)
  1522. goto out;
  1523. if (TC_H_MIN(tcm->tcm_parent)) {
  1524. cl = cops->find(q, tcm->tcm_parent);
  1525. if (cl == 0)
  1526. goto out;
  1527. }
  1528. block = cops->tcf_block(q, cl, NULL);
  1529. if (!block)
  1530. goto out;
  1531. if (tcf_block_shared(block))
  1532. q = NULL;
  1533. }
  1534. index_start = cb->args[0];
  1535. index = 0;
  1536. list_for_each_entry(chain, &block->chain_list, list) {
  1537. if (tca[TCA_CHAIN] &&
  1538. nla_get_u32(tca[TCA_CHAIN]) != chain->index)
  1539. continue;
  1540. if (!tcf_chain_dump(chain, q, parent, skb, cb,
  1541. index_start, &index)) {
  1542. err = -EMSGSIZE;
  1543. break;
  1544. }
  1545. }
  1546. if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
  1547. tcf_block_refcnt_put(block);
  1548. cb->args[0] = index;
  1549. out:
  1550. /* If we did no progress, the error (EMSGSIZE) is real */
  1551. if (skb->len == 0 && err)
  1552. return err;
  1553. return skb->len;
  1554. }
  1555. static int tc_chain_fill_node(struct tcf_chain *chain, struct net *net,
  1556. struct sk_buff *skb, struct tcf_block *block,
  1557. u32 portid, u32 seq, u16 flags, int event)
  1558. {
  1559. unsigned char *b = skb_tail_pointer(skb);
  1560. const struct tcf_proto_ops *ops;
  1561. struct nlmsghdr *nlh;
  1562. struct tcmsg *tcm;
  1563. void *priv;
  1564. ops = chain->tmplt_ops;
  1565. priv = chain->tmplt_priv;
  1566. nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
  1567. if (!nlh)
  1568. goto out_nlmsg_trim;
  1569. tcm = nlmsg_data(nlh);
  1570. tcm->tcm_family = AF_UNSPEC;
  1571. tcm->tcm__pad1 = 0;
  1572. tcm->tcm__pad2 = 0;
  1573. tcm->tcm_handle = 0;
  1574. if (block->q) {
  1575. tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
  1576. tcm->tcm_parent = block->q->handle;
  1577. } else {
  1578. tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
  1579. tcm->tcm_block_index = block->index;
  1580. }
  1581. if (nla_put_u32(skb, TCA_CHAIN, chain->index))
  1582. goto nla_put_failure;
  1583. if (ops) {
  1584. if (nla_put_string(skb, TCA_KIND, ops->kind))
  1585. goto nla_put_failure;
  1586. if (ops->tmplt_dump(skb, net, priv) < 0)
  1587. goto nla_put_failure;
  1588. }
  1589. nlh->nlmsg_len = skb_tail_pointer(skb) - b;
  1590. return skb->len;
  1591. out_nlmsg_trim:
  1592. nla_put_failure:
  1593. nlmsg_trim(skb, b);
  1594. return -EMSGSIZE;
  1595. }
  1596. static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
  1597. u32 seq, u16 flags, int event, bool unicast)
  1598. {
  1599. u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
  1600. struct tcf_block *block = chain->block;
  1601. struct net *net = block->net;
  1602. struct sk_buff *skb;
  1603. skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
  1604. if (!skb)
  1605. return -ENOBUFS;
  1606. if (tc_chain_fill_node(chain, net, skb, block, portid,
  1607. seq, flags, event) <= 0) {
  1608. kfree_skb(skb);
  1609. return -EINVAL;
  1610. }
  1611. if (unicast)
  1612. return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
  1613. return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
  1614. }
  1615. static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
  1616. struct nlattr **tca,
  1617. struct netlink_ext_ack *extack)
  1618. {
  1619. const struct tcf_proto_ops *ops;
  1620. void *tmplt_priv;
  1621. /* If kind is not set, user did not specify template. */
  1622. if (!tca[TCA_KIND])
  1623. return 0;
  1624. ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), extack);
  1625. if (IS_ERR(ops))
  1626. return PTR_ERR(ops);
  1627. if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
  1628. NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
  1629. return -EOPNOTSUPP;
  1630. }
  1631. tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
  1632. if (IS_ERR(tmplt_priv)) {
  1633. module_put(ops->owner);
  1634. return PTR_ERR(tmplt_priv);
  1635. }
  1636. chain->tmplt_ops = ops;
  1637. chain->tmplt_priv = tmplt_priv;
  1638. return 0;
  1639. }
  1640. static void tc_chain_tmplt_del(struct tcf_chain *chain)
  1641. {
  1642. const struct tcf_proto_ops *ops = chain->tmplt_ops;
  1643. /* If template ops are set, no work to do for us. */
  1644. if (!ops)
  1645. return;
  1646. ops->tmplt_destroy(chain->tmplt_priv);
  1647. module_put(ops->owner);
  1648. }
  1649. /* Add/delete/get a chain */
  1650. static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
  1651. struct netlink_ext_ack *extack)
  1652. {
  1653. struct net *net = sock_net(skb->sk);
  1654. struct nlattr *tca[TCA_MAX + 1];
  1655. struct tcmsg *t;
  1656. u32 parent;
  1657. u32 chain_index;
  1658. struct Qdisc *q = NULL;
  1659. struct tcf_chain *chain = NULL;
  1660. struct tcf_block *block;
  1661. unsigned long cl;
  1662. int err;
  1663. if (n->nlmsg_type != RTM_GETCHAIN &&
  1664. !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
  1665. return -EPERM;
  1666. replay:
  1667. err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
  1668. if (err < 0)
  1669. return err;
  1670. t = nlmsg_data(n);
  1671. parent = t->tcm_parent;
  1672. cl = 0;
  1673. block = tcf_block_find(net, &q, &parent, &cl,
  1674. t->tcm_ifindex, t->tcm_block_index, extack);
  1675. if (IS_ERR(block))
  1676. return PTR_ERR(block);
  1677. chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
  1678. if (chain_index > TC_ACT_EXT_VAL_MASK) {
  1679. NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
  1680. err = -EINVAL;
  1681. goto errout_block;
  1682. }
  1683. chain = tcf_chain_lookup(block, chain_index);
  1684. if (n->nlmsg_type == RTM_NEWCHAIN) {
  1685. if (chain) {
  1686. if (tcf_chain_held_by_acts_only(chain)) {
  1687. /* The chain exists only because there is
  1688. * some action referencing it.
  1689. */
  1690. tcf_chain_hold(chain);
  1691. } else {
  1692. NL_SET_ERR_MSG(extack, "Filter chain already exists");
  1693. err = -EEXIST;
  1694. goto errout_block;
  1695. }
  1696. } else {
  1697. if (!(n->nlmsg_flags & NLM_F_CREATE)) {
  1698. NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
  1699. err = -ENOENT;
  1700. goto errout_block;
  1701. }
  1702. chain = tcf_chain_create(block, chain_index);
  1703. if (!chain) {
  1704. NL_SET_ERR_MSG(extack, "Failed to create filter chain");
  1705. err = -ENOMEM;
  1706. goto errout_block;
  1707. }
  1708. }
  1709. } else {
  1710. if (!chain || tcf_chain_held_by_acts_only(chain)) {
  1711. NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
  1712. err = -EINVAL;
  1713. goto errout_block;
  1714. }
  1715. tcf_chain_hold(chain);
  1716. }
  1717. switch (n->nlmsg_type) {
  1718. case RTM_NEWCHAIN:
  1719. err = tc_chain_tmplt_add(chain, net, tca, extack);
  1720. if (err)
  1721. goto errout;
  1722. /* In case the chain was successfully added, take a reference
  1723. * to the chain. This ensures that an empty chain
  1724. * does not disappear at the end of this function.
  1725. */
  1726. tcf_chain_hold(chain);
  1727. chain->explicitly_created = true;
  1728. tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
  1729. RTM_NEWCHAIN, false);
  1730. break;
  1731. case RTM_DELCHAIN:
  1732. tfilter_notify_chain(net, skb, block, q, parent, n,
  1733. chain, RTM_DELTFILTER);
  1734. /* Flush the chain first as the user requested chain removal. */
  1735. tcf_chain_flush(chain);
  1736. /* In case the chain was successfully deleted, put a reference
  1737. * to the chain previously taken during addition.
  1738. */
  1739. tcf_chain_put_explicitly_created(chain);
  1740. chain->explicitly_created = false;
  1741. break;
  1742. case RTM_GETCHAIN:
  1743. err = tc_chain_notify(chain, skb, n->nlmsg_seq,
  1744. n->nlmsg_seq, n->nlmsg_type, true);
  1745. if (err < 0)
  1746. NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
  1747. break;
  1748. default:
  1749. err = -EOPNOTSUPP;
  1750. NL_SET_ERR_MSG(extack, "Unsupported message type");
  1751. goto errout;
  1752. }
  1753. errout:
  1754. tcf_chain_put(chain);
  1755. errout_block:
  1756. tcf_block_release(q, block);
  1757. if (err == -EAGAIN)
  1758. /* Replay the request. */
  1759. goto replay;
  1760. return err;
  1761. }
  1762. /* called with RTNL */
  1763. static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
  1764. {
  1765. struct net *net = sock_net(skb->sk);
  1766. struct nlattr *tca[TCA_MAX + 1];
  1767. struct Qdisc *q = NULL;
  1768. struct tcf_block *block;
  1769. struct tcf_chain *chain;
  1770. struct tcmsg *tcm = nlmsg_data(cb->nlh);
  1771. long index_start;
  1772. long index;
  1773. u32 parent;
  1774. int err;
  1775. if (nlmsg_len(cb->nlh) < sizeof(*tcm))
  1776. return skb->len;
  1777. err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
  1778. cb->extack);
  1779. if (err)
  1780. return err;
  1781. if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
  1782. block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
  1783. if (!block)
  1784. goto out;
  1785. /* If we work with block index, q is NULL and parent value
  1786. * will never be used in the following code. The check
  1787. * in tcf_fill_node prevents it. However, compiler does not
  1788. * see that far, so set parent to zero to silence the warning
  1789. * about parent being uninitialized.
  1790. */
  1791. parent = 0;
  1792. } else {
  1793. const struct Qdisc_class_ops *cops;
  1794. struct net_device *dev;
  1795. unsigned long cl = 0;
  1796. dev = __dev_get_by_index(net, tcm->tcm_ifindex);
  1797. if (!dev)
  1798. return skb->len;
  1799. parent = tcm->tcm_parent;
  1800. if (!parent) {
  1801. q = dev->qdisc;
  1802. parent = q->handle;
  1803. } else {
  1804. q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
  1805. }
  1806. if (!q)
  1807. goto out;
  1808. cops = q->ops->cl_ops;
  1809. if (!cops)
  1810. goto out;
  1811. if (!cops->tcf_block)
  1812. goto out;
  1813. if (TC_H_MIN(tcm->tcm_parent)) {
  1814. cl = cops->find(q, tcm->tcm_parent);
  1815. if (cl == 0)
  1816. goto out;
  1817. }
  1818. block = cops->tcf_block(q, cl, NULL);
  1819. if (!block)
  1820. goto out;
  1821. if (tcf_block_shared(block))
  1822. q = NULL;
  1823. }
  1824. index_start = cb->args[0];
  1825. index = 0;
  1826. list_for_each_entry(chain, &block->chain_list, list) {
  1827. if ((tca[TCA_CHAIN] &&
  1828. nla_get_u32(tca[TCA_CHAIN]) != chain->index))
  1829. continue;
  1830. if (index < index_start) {
  1831. index++;
  1832. continue;
  1833. }
  1834. if (tcf_chain_held_by_acts_only(chain))
  1835. continue;
  1836. err = tc_chain_fill_node(chain, net, skb, block,
  1837. NETLINK_CB(cb->skb).portid,
  1838. cb->nlh->nlmsg_seq, NLM_F_MULTI,
  1839. RTM_NEWCHAIN);
  1840. if (err <= 0)
  1841. break;
  1842. index++;
  1843. }
  1844. if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
  1845. tcf_block_refcnt_put(block);
  1846. cb->args[0] = index;
  1847. out:
  1848. /* If we did no progress, the error (EMSGSIZE) is real */
  1849. if (skb->len == 0 && err)
  1850. return err;
  1851. return skb->len;
  1852. }
  1853. void tcf_exts_destroy(struct tcf_exts *exts)
  1854. {
  1855. #ifdef CONFIG_NET_CLS_ACT
  1856. tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
  1857. kfree(exts->actions);
  1858. exts->nr_actions = 0;
  1859. #endif
  1860. }
  1861. EXPORT_SYMBOL(tcf_exts_destroy);
  1862. int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
  1863. struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
  1864. struct netlink_ext_ack *extack)
  1865. {
  1866. #ifdef CONFIG_NET_CLS_ACT
  1867. {
  1868. struct tc_action *act;
  1869. size_t attr_size = 0;
  1870. if (exts->police && tb[exts->police]) {
  1871. act = tcf_action_init_1(net, tp, tb[exts->police],
  1872. rate_tlv, "police", ovr,
  1873. TCA_ACT_BIND, true, extack);
  1874. if (IS_ERR(act))
  1875. return PTR_ERR(act);
  1876. act->type = exts->type = TCA_OLD_COMPAT;
  1877. exts->actions[0] = act;
  1878. exts->nr_actions = 1;
  1879. } else if (exts->action && tb[exts->action]) {
  1880. int err;
  1881. err = tcf_action_init(net, tp, tb[exts->action],
  1882. rate_tlv, NULL, ovr, TCA_ACT_BIND,
  1883. exts->actions, &attr_size, true,
  1884. extack);
  1885. if (err < 0)
  1886. return err;
  1887. exts->nr_actions = err;
  1888. }
  1889. exts->net = net;
  1890. }
  1891. #else
  1892. if ((exts->action && tb[exts->action]) ||
  1893. (exts->police && tb[exts->police])) {
  1894. NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
  1895. return -EOPNOTSUPP;
  1896. }
  1897. #endif
  1898. return 0;
  1899. }
  1900. EXPORT_SYMBOL(tcf_exts_validate);
  1901. void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
  1902. {
  1903. #ifdef CONFIG_NET_CLS_ACT
  1904. struct tcf_exts old = *dst;
  1905. *dst = *src;
  1906. tcf_exts_destroy(&old);
  1907. #endif
  1908. }
  1909. EXPORT_SYMBOL(tcf_exts_change);
  1910. #ifdef CONFIG_NET_CLS_ACT
  1911. static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
  1912. {
  1913. if (exts->nr_actions == 0)
  1914. return NULL;
  1915. else
  1916. return exts->actions[0];
  1917. }
  1918. #endif
  1919. int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
  1920. {
  1921. #ifdef CONFIG_NET_CLS_ACT
  1922. struct nlattr *nest;
  1923. if (exts->action && tcf_exts_has_actions(exts)) {
  1924. /*
  1925. * again for backward compatible mode - we want
  1926. * to work with both old and new modes of entering
  1927. * tc data even if iproute2 was newer - jhs
  1928. */
  1929. if (exts->type != TCA_OLD_COMPAT) {
  1930. nest = nla_nest_start(skb, exts->action);
  1931. if (nest == NULL)
  1932. goto nla_put_failure;
  1933. if (tcf_action_dump(skb, exts->actions, 0, 0) < 0)
  1934. goto nla_put_failure;
  1935. nla_nest_end(skb, nest);
  1936. } else if (exts->police) {
  1937. struct tc_action *act = tcf_exts_first_act(exts);
  1938. nest = nla_nest_start(skb, exts->police);
  1939. if (nest == NULL || !act)
  1940. goto nla_put_failure;
  1941. if (tcf_action_dump_old(skb, act, 0, 0) < 0)
  1942. goto nla_put_failure;
  1943. nla_nest_end(skb, nest);
  1944. }
  1945. }
  1946. return 0;
  1947. nla_put_failure:
  1948. nla_nest_cancel(skb, nest);
  1949. return -1;
  1950. #else
  1951. return 0;
  1952. #endif
  1953. }
  1954. EXPORT_SYMBOL(tcf_exts_dump);
  1955. int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
  1956. {
  1957. #ifdef CONFIG_NET_CLS_ACT
  1958. struct tc_action *a = tcf_exts_first_act(exts);
  1959. if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
  1960. return -1;
  1961. #endif
  1962. return 0;
  1963. }
  1964. EXPORT_SYMBOL(tcf_exts_dump_stats);
  1965. static int tc_exts_setup_cb_egdev_call(struct tcf_exts *exts,
  1966. enum tc_setup_type type,
  1967. void *type_data, bool err_stop)
  1968. {
  1969. int ok_count = 0;
  1970. #ifdef CONFIG_NET_CLS_ACT
  1971. const struct tc_action *a;
  1972. struct net_device *dev;
  1973. int i, ret;
  1974. if (!tcf_exts_has_actions(exts))
  1975. return 0;
  1976. for (i = 0; i < exts->nr_actions; i++) {
  1977. a = exts->actions[i];
  1978. if (!a->ops->get_dev)
  1979. continue;
  1980. dev = a->ops->get_dev(a);
  1981. if (!dev)
  1982. continue;
  1983. ret = tc_setup_cb_egdev_call(dev, type, type_data, err_stop);
  1984. a->ops->put_dev(dev);
  1985. if (ret < 0)
  1986. return ret;
  1987. ok_count += ret;
  1988. }
  1989. #endif
  1990. return ok_count;
  1991. }
  1992. int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
  1993. enum tc_setup_type type, void *type_data, bool err_stop)
  1994. {
  1995. int ok_count;
  1996. int ret;
  1997. ret = tcf_block_cb_call(block, type, type_data, err_stop);
  1998. if (ret < 0)
  1999. return ret;
  2000. ok_count = ret;
  2001. if (!exts || ok_count)
  2002. return ok_count;
  2003. ret = tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop);
  2004. if (ret < 0)
  2005. return ret;
  2006. ok_count += ret;
  2007. return ok_count;
  2008. }
  2009. EXPORT_SYMBOL(tc_setup_cb_call);
  2010. static __net_init int tcf_net_init(struct net *net)
  2011. {
  2012. struct tcf_net *tn = net_generic(net, tcf_net_id);
  2013. spin_lock_init(&tn->idr_lock);
  2014. idr_init(&tn->idr);
  2015. return 0;
  2016. }
  2017. static void __net_exit tcf_net_exit(struct net *net)
  2018. {
  2019. struct tcf_net *tn = net_generic(net, tcf_net_id);
  2020. idr_destroy(&tn->idr);
  2021. }
  2022. static struct pernet_operations tcf_net_ops = {
  2023. .init = tcf_net_init,
  2024. .exit = tcf_net_exit,
  2025. .id = &tcf_net_id,
  2026. .size = sizeof(struct tcf_net),
  2027. };
  2028. static int __init tc_filter_init(void)
  2029. {
  2030. int err;
  2031. tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
  2032. if (!tc_filter_wq)
  2033. return -ENOMEM;
  2034. err = register_pernet_subsys(&tcf_net_ops);
  2035. if (err)
  2036. goto err_register_pernet_subsys;
  2037. rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 0);
  2038. rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 0);
  2039. rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
  2040. tc_dump_tfilter, 0);
  2041. rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
  2042. rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
  2043. rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
  2044. tc_dump_chain, 0);
  2045. return 0;
  2046. err_register_pernet_subsys:
  2047. destroy_workqueue(tc_filter_wq);
  2048. return err;
  2049. }
  2050. subsys_initcall(tc_filter_init);