pkt_cls.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __NET_PKT_CLS_H
  3. #define __NET_PKT_CLS_H
  4. #include <linux/pkt_cls.h>
  5. #include <linux/workqueue.h>
  6. #include <net/sch_generic.h>
  7. #include <net/act_api.h>
  8. /* TC action not accessible from user space */
  9. #define TC_ACT_REINSERT (TC_ACT_VALUE_MAX + 1)
  10. /* Basic packet classifier frontend definitions. */
  11. struct tcf_walker {
  12. int stop;
  13. int skip;
  14. int count;
  15. unsigned long cookie;
  16. int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
  17. };
  18. int register_tcf_proto_ops(struct tcf_proto_ops *ops);
  19. int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
  20. enum tcf_block_binder_type {
  21. TCF_BLOCK_BINDER_TYPE_UNSPEC,
  22. TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
  23. TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
  24. };
  25. struct tcf_block_ext_info {
  26. enum tcf_block_binder_type binder_type;
  27. tcf_chain_head_change_t *chain_head_change;
  28. void *chain_head_change_priv;
  29. u32 block_index;
  30. };
  31. struct tcf_block_cb;
  32. bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
  33. #ifdef CONFIG_NET_CLS
  34. struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
  35. u32 chain_index);
  36. void tcf_chain_put_by_act(struct tcf_chain *chain);
  37. void tcf_block_netif_keep_dst(struct tcf_block *block);
  38. int tcf_block_get(struct tcf_block **p_block,
  39. struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
  40. struct netlink_ext_ack *extack);
  41. int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
  42. struct tcf_block_ext_info *ei,
  43. struct netlink_ext_ack *extack);
  44. void tcf_block_put(struct tcf_block *block);
  45. void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
  46. struct tcf_block_ext_info *ei);
  47. static inline bool tcf_block_shared(struct tcf_block *block)
  48. {
  49. return block->index;
  50. }
  51. static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
  52. {
  53. WARN_ON(tcf_block_shared(block));
  54. return block->q;
  55. }
  56. static inline struct net_device *tcf_block_dev(struct tcf_block *block)
  57. {
  58. return tcf_block_q(block)->dev_queue->dev;
  59. }
  60. void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
  61. struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
  62. tc_setup_cb_t *cb, void *cb_ident);
  63. void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
  64. unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
  65. struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
  66. tc_setup_cb_t *cb, void *cb_ident,
  67. void *cb_priv,
  68. struct netlink_ext_ack *extack);
  69. int tcf_block_cb_register(struct tcf_block *block,
  70. tc_setup_cb_t *cb, void *cb_ident,
  71. void *cb_priv, struct netlink_ext_ack *extack);
  72. void __tcf_block_cb_unregister(struct tcf_block *block,
  73. struct tcf_block_cb *block_cb);
  74. void tcf_block_cb_unregister(struct tcf_block *block,
  75. tc_setup_cb_t *cb, void *cb_ident);
  76. int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  77. struct tcf_result *res, bool compat_mode);
  78. #else
  79. static inline
  80. int tcf_block_get(struct tcf_block **p_block,
  81. struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
  82. struct netlink_ext_ack *extack)
  83. {
  84. return 0;
  85. }
  86. static inline
  87. int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
  88. struct tcf_block_ext_info *ei,
  89. struct netlink_ext_ack *extack)
  90. {
  91. return 0;
  92. }
  93. static inline void tcf_block_put(struct tcf_block *block)
  94. {
  95. }
  96. static inline
  97. void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
  98. struct tcf_block_ext_info *ei)
  99. {
  100. }
  101. static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
  102. {
  103. return NULL;
  104. }
  105. static inline struct net_device *tcf_block_dev(struct tcf_block *block)
  106. {
  107. return NULL;
  108. }
  109. static inline
  110. int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
  111. void *cb_priv)
  112. {
  113. return 0;
  114. }
  115. static inline
  116. void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
  117. void *cb_priv)
  118. {
  119. }
  120. static inline
  121. void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
  122. {
  123. return NULL;
  124. }
  125. static inline
  126. struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
  127. tc_setup_cb_t *cb, void *cb_ident)
  128. {
  129. return NULL;
  130. }
  131. static inline
  132. void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
  133. {
  134. }
  135. static inline
  136. unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
  137. {
  138. return 0;
  139. }
  140. static inline
  141. struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
  142. tc_setup_cb_t *cb, void *cb_ident,
  143. void *cb_priv,
  144. struct netlink_ext_ack *extack)
  145. {
  146. return NULL;
  147. }
  148. static inline
  149. int tcf_block_cb_register(struct tcf_block *block,
  150. tc_setup_cb_t *cb, void *cb_ident,
  151. void *cb_priv, struct netlink_ext_ack *extack)
  152. {
  153. return 0;
  154. }
  155. static inline
  156. void __tcf_block_cb_unregister(struct tcf_block *block,
  157. struct tcf_block_cb *block_cb)
  158. {
  159. }
  160. static inline
  161. void tcf_block_cb_unregister(struct tcf_block *block,
  162. tc_setup_cb_t *cb, void *cb_ident)
  163. {
  164. }
  165. static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  166. struct tcf_result *res, bool compat_mode)
  167. {
  168. return TC_ACT_UNSPEC;
  169. }
  170. #endif
  171. static inline unsigned long
  172. __cls_set_class(unsigned long *clp, unsigned long cl)
  173. {
  174. return xchg(clp, cl);
  175. }
  176. static inline unsigned long
  177. cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
  178. {
  179. unsigned long old_cl;
  180. sch_tree_lock(q);
  181. old_cl = __cls_set_class(clp, cl);
  182. sch_tree_unlock(q);
  183. return old_cl;
  184. }
  185. static inline void
  186. tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
  187. {
  188. struct Qdisc *q = tp->chain->block->q;
  189. unsigned long cl;
  190. /* Check q as it is not set for shared blocks. In that case,
  191. * setting class is not supported.
  192. */
  193. if (!q)
  194. return;
  195. cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
  196. cl = cls_set_class(q, &r->class, cl);
  197. if (cl)
  198. q->ops->cl_ops->unbind_tcf(q, cl);
  199. }
  200. static inline void
  201. tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
  202. {
  203. struct Qdisc *q = tp->chain->block->q;
  204. unsigned long cl;
  205. if (!q)
  206. return;
  207. if ((cl = __cls_set_class(&r->class, 0)) != 0)
  208. q->ops->cl_ops->unbind_tcf(q, cl);
  209. }
  210. struct tcf_exts {
  211. #ifdef CONFIG_NET_CLS_ACT
  212. __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
  213. int nr_actions;
  214. struct tc_action **actions;
  215. struct net *net;
  216. #endif
  217. /* Map to export classifier specific extension TLV types to the
  218. * generic extensions API. Unsupported extensions must be set to 0.
  219. */
  220. int action;
  221. int police;
  222. };
  223. static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
  224. {
  225. #ifdef CONFIG_NET_CLS_ACT
  226. exts->type = 0;
  227. exts->nr_actions = 0;
  228. exts->net = NULL;
  229. exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
  230. GFP_KERNEL);
  231. if (!exts->actions)
  232. return -ENOMEM;
  233. #endif
  234. exts->action = action;
  235. exts->police = police;
  236. return 0;
  237. }
  238. /* Return false if the netns is being destroyed in cleanup_net(). Callers
  239. * need to do cleanup synchronously in this case, otherwise may race with
  240. * tc_action_net_exit(). Return true for other cases.
  241. */
  242. static inline bool tcf_exts_get_net(struct tcf_exts *exts)
  243. {
  244. #ifdef CONFIG_NET_CLS_ACT
  245. exts->net = maybe_get_net(exts->net);
  246. return exts->net != NULL;
  247. #else
  248. return true;
  249. #endif
  250. }
  251. static inline void tcf_exts_put_net(struct tcf_exts *exts)
  252. {
  253. #ifdef CONFIG_NET_CLS_ACT
  254. if (exts->net)
  255. put_net(exts->net);
  256. #endif
  257. }
  258. #ifdef CONFIG_NET_CLS_ACT
  259. #define tcf_exts_for_each_action(i, a, exts) \
  260. for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
  261. #else
  262. #define tcf_exts_for_each_action(i, a, exts) \
  263. for (; 0; (void)(i), (void)(a), (void)(exts))
  264. #endif
  265. static inline void
  266. tcf_exts_stats_update(const struct tcf_exts *exts,
  267. u64 bytes, u64 packets, u64 lastuse)
  268. {
  269. #ifdef CONFIG_NET_CLS_ACT
  270. int i;
  271. preempt_disable();
  272. for (i = 0; i < exts->nr_actions; i++) {
  273. struct tc_action *a = exts->actions[i];
  274. tcf_action_stats_update(a, bytes, packets, lastuse);
  275. }
  276. preempt_enable();
  277. #endif
  278. }
  279. /**
  280. * tcf_exts_has_actions - check if at least one action is present
  281. * @exts: tc filter extensions handle
  282. *
  283. * Returns true if at least one action is present.
  284. */
  285. static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
  286. {
  287. #ifdef CONFIG_NET_CLS_ACT
  288. return exts->nr_actions;
  289. #else
  290. return false;
  291. #endif
  292. }
  293. /**
  294. * tcf_exts_has_one_action - check if exactly one action is present
  295. * @exts: tc filter extensions handle
  296. *
  297. * Returns true if exactly one action is present.
  298. */
  299. static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
  300. {
  301. #ifdef CONFIG_NET_CLS_ACT
  302. return exts->nr_actions == 1;
  303. #else
  304. return false;
  305. #endif
  306. }
  307. static inline struct tc_action *tcf_exts_first_action(struct tcf_exts *exts)
  308. {
  309. #ifdef CONFIG_NET_CLS_ACT
  310. return exts->actions[0];
  311. #else
  312. return NULL;
  313. #endif
  314. }
  315. /**
  316. * tcf_exts_exec - execute tc filter extensions
  317. * @skb: socket buffer
  318. * @exts: tc filter extensions handle
  319. * @res: desired result
  320. *
  321. * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
  322. * a negative number if the filter must be considered unmatched or
  323. * a positive action code (TC_ACT_*) which must be returned to the
  324. * underlying layer.
  325. */
  326. static inline int
  327. tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
  328. struct tcf_result *res)
  329. {
  330. #ifdef CONFIG_NET_CLS_ACT
  331. return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
  332. #endif
  333. return TC_ACT_OK;
  334. }
  335. int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
  336. struct nlattr **tb, struct nlattr *rate_tlv,
  337. struct tcf_exts *exts, bool ovr,
  338. struct netlink_ext_ack *extack);
  339. void tcf_exts_destroy(struct tcf_exts *exts);
  340. void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
  341. int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
  342. int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
  343. /**
  344. * struct tcf_pkt_info - packet information
  345. */
  346. struct tcf_pkt_info {
  347. unsigned char * ptr;
  348. int nexthdr;
  349. };
  350. #ifdef CONFIG_NET_EMATCH
  351. struct tcf_ematch_ops;
  352. /**
  353. * struct tcf_ematch - extended match (ematch)
  354. *
  355. * @matchid: identifier to allow userspace to reidentify a match
  356. * @flags: flags specifying attributes and the relation to other matches
  357. * @ops: the operations lookup table of the corresponding ematch module
  358. * @datalen: length of the ematch specific configuration data
  359. * @data: ematch specific data
  360. */
  361. struct tcf_ematch {
  362. struct tcf_ematch_ops * ops;
  363. unsigned long data;
  364. unsigned int datalen;
  365. u16 matchid;
  366. u16 flags;
  367. struct net *net;
  368. };
  369. static inline int tcf_em_is_container(struct tcf_ematch *em)
  370. {
  371. return !em->ops;
  372. }
  373. static inline int tcf_em_is_simple(struct tcf_ematch *em)
  374. {
  375. return em->flags & TCF_EM_SIMPLE;
  376. }
  377. static inline int tcf_em_is_inverted(struct tcf_ematch *em)
  378. {
  379. return em->flags & TCF_EM_INVERT;
  380. }
  381. static inline int tcf_em_last_match(struct tcf_ematch *em)
  382. {
  383. return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
  384. }
  385. static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
  386. {
  387. if (tcf_em_last_match(em))
  388. return 1;
  389. if (result == 0 && em->flags & TCF_EM_REL_AND)
  390. return 1;
  391. if (result != 0 && em->flags & TCF_EM_REL_OR)
  392. return 1;
  393. return 0;
  394. }
  395. /**
  396. * struct tcf_ematch_tree - ematch tree handle
  397. *
  398. * @hdr: ematch tree header supplied by userspace
  399. * @matches: array of ematches
  400. */
  401. struct tcf_ematch_tree {
  402. struct tcf_ematch_tree_hdr hdr;
  403. struct tcf_ematch * matches;
  404. };
  405. /**
  406. * struct tcf_ematch_ops - ematch module operations
  407. *
  408. * @kind: identifier (kind) of this ematch module
  409. * @datalen: length of expected configuration data (optional)
  410. * @change: called during validation (optional)
  411. * @match: called during ematch tree evaluation, must return 1/0
  412. * @destroy: called during destroyage (optional)
  413. * @dump: called during dumping process (optional)
  414. * @owner: owner, must be set to THIS_MODULE
  415. * @link: link to previous/next ematch module (internal use)
  416. */
  417. struct tcf_ematch_ops {
  418. int kind;
  419. int datalen;
  420. int (*change)(struct net *net, void *,
  421. int, struct tcf_ematch *);
  422. int (*match)(struct sk_buff *, struct tcf_ematch *,
  423. struct tcf_pkt_info *);
  424. void (*destroy)(struct tcf_ematch *);
  425. int (*dump)(struct sk_buff *, struct tcf_ematch *);
  426. struct module *owner;
  427. struct list_head link;
  428. };
  429. int tcf_em_register(struct tcf_ematch_ops *);
  430. void tcf_em_unregister(struct tcf_ematch_ops *);
  431. int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
  432. struct tcf_ematch_tree *);
  433. void tcf_em_tree_destroy(struct tcf_ematch_tree *);
  434. int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
  435. int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
  436. struct tcf_pkt_info *);
  437. /**
  438. * tcf_em_tree_match - evaulate an ematch tree
  439. *
  440. * @skb: socket buffer of the packet in question
  441. * @tree: ematch tree to be used for evaluation
  442. * @info: packet information examined by classifier
  443. *
  444. * This function matches @skb against the ematch tree in @tree by going
  445. * through all ematches respecting their logic relations returning
  446. * as soon as the result is obvious.
  447. *
  448. * Returns 1 if the ematch tree as-one matches, no ematches are configured
  449. * or ematch is not enabled in the kernel, otherwise 0 is returned.
  450. */
  451. static inline int tcf_em_tree_match(struct sk_buff *skb,
  452. struct tcf_ematch_tree *tree,
  453. struct tcf_pkt_info *info)
  454. {
  455. if (tree->hdr.nmatches)
  456. return __tcf_em_tree_match(skb, tree, info);
  457. else
  458. return 1;
  459. }
  460. #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
  461. #else /* CONFIG_NET_EMATCH */
  462. struct tcf_ematch_tree {
  463. };
  464. #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
  465. #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
  466. #define tcf_em_tree_dump(skb, t, tlv) (0)
  467. #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
  468. #endif /* CONFIG_NET_EMATCH */
  469. static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
  470. {
  471. switch (layer) {
  472. case TCF_LAYER_LINK:
  473. return skb_mac_header(skb);
  474. case TCF_LAYER_NETWORK:
  475. return skb_network_header(skb);
  476. case TCF_LAYER_TRANSPORT:
  477. return skb_transport_header(skb);
  478. }
  479. return NULL;
  480. }
  481. static inline int tcf_valid_offset(const struct sk_buff *skb,
  482. const unsigned char *ptr, const int len)
  483. {
  484. return likely((ptr + len) <= skb_tail_pointer(skb) &&
  485. ptr >= skb->head &&
  486. (ptr <= (ptr + len)));
  487. }
  488. #ifdef CONFIG_NET_CLS_IND
  489. #include <net/net_namespace.h>
  490. static inline int
  491. tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
  492. struct netlink_ext_ack *extack)
  493. {
  494. char indev[IFNAMSIZ];
  495. struct net_device *dev;
  496. if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
  497. NL_SET_ERR_MSG(extack, "Interface name too long");
  498. return -EINVAL;
  499. }
  500. dev = __dev_get_by_name(net, indev);
  501. if (!dev)
  502. return -ENODEV;
  503. return dev->ifindex;
  504. }
  505. static inline bool
  506. tcf_match_indev(struct sk_buff *skb, int ifindex)
  507. {
  508. if (!ifindex)
  509. return true;
  510. if (!skb->skb_iif)
  511. return false;
  512. return ifindex == skb->skb_iif;
  513. }
  514. #endif /* CONFIG_NET_CLS_IND */
  515. int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
  516. enum tc_setup_type type, void *type_data, bool err_stop);
  517. enum tc_block_command {
  518. TC_BLOCK_BIND,
  519. TC_BLOCK_UNBIND,
  520. };
  521. struct tc_block_offload {
  522. enum tc_block_command command;
  523. enum tcf_block_binder_type binder_type;
  524. struct tcf_block *block;
  525. struct netlink_ext_ack *extack;
  526. };
  527. struct tc_cls_common_offload {
  528. u32 chain_index;
  529. __be16 protocol;
  530. u32 prio;
  531. struct netlink_ext_ack *extack;
  532. };
  533. struct tc_cls_u32_knode {
  534. struct tcf_exts *exts;
  535. struct tc_u32_sel *sel;
  536. u32 handle;
  537. u32 val;
  538. u32 mask;
  539. u32 link_handle;
  540. u8 fshift;
  541. };
  542. struct tc_cls_u32_hnode {
  543. u32 handle;
  544. u32 prio;
  545. unsigned int divisor;
  546. };
  547. enum tc_clsu32_command {
  548. TC_CLSU32_NEW_KNODE,
  549. TC_CLSU32_REPLACE_KNODE,
  550. TC_CLSU32_DELETE_KNODE,
  551. TC_CLSU32_NEW_HNODE,
  552. TC_CLSU32_REPLACE_HNODE,
  553. TC_CLSU32_DELETE_HNODE,
  554. };
  555. struct tc_cls_u32_offload {
  556. struct tc_cls_common_offload common;
  557. /* knode values */
  558. enum tc_clsu32_command command;
  559. union {
  560. struct tc_cls_u32_knode knode;
  561. struct tc_cls_u32_hnode hnode;
  562. };
  563. };
  564. static inline bool tc_can_offload(const struct net_device *dev)
  565. {
  566. return dev->features & NETIF_F_HW_TC;
  567. }
  568. static inline bool tc_can_offload_extack(const struct net_device *dev,
  569. struct netlink_ext_ack *extack)
  570. {
  571. bool can = tc_can_offload(dev);
  572. if (!can)
  573. NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
  574. return can;
  575. }
  576. static inline bool
  577. tc_cls_can_offload_and_chain0(const struct net_device *dev,
  578. struct tc_cls_common_offload *common)
  579. {
  580. if (!tc_can_offload_extack(dev, common->extack))
  581. return false;
  582. if (common->chain_index) {
  583. NL_SET_ERR_MSG(common->extack,
  584. "Driver supports only offload of chain 0");
  585. return false;
  586. }
  587. return true;
  588. }
  589. static inline bool tc_skip_hw(u32 flags)
  590. {
  591. return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
  592. }
  593. static inline bool tc_skip_sw(u32 flags)
  594. {
  595. return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
  596. }
  597. /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
  598. static inline bool tc_flags_valid(u32 flags)
  599. {
  600. if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
  601. TCA_CLS_FLAGS_VERBOSE))
  602. return false;
  603. flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
  604. if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
  605. return false;
  606. return true;
  607. }
  608. static inline bool tc_in_hw(u32 flags)
  609. {
  610. return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
  611. }
  612. static inline void
  613. tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
  614. const struct tcf_proto *tp, u32 flags,
  615. struct netlink_ext_ack *extack)
  616. {
  617. cls_common->chain_index = tp->chain->index;
  618. cls_common->protocol = tp->protocol;
  619. cls_common->prio = tp->prio;
  620. if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
  621. cls_common->extack = extack;
  622. }
  623. enum tc_fl_command {
  624. TC_CLSFLOWER_REPLACE,
  625. TC_CLSFLOWER_DESTROY,
  626. TC_CLSFLOWER_STATS,
  627. TC_CLSFLOWER_TMPLT_CREATE,
  628. TC_CLSFLOWER_TMPLT_DESTROY,
  629. };
  630. struct tc_cls_flower_offload {
  631. struct tc_cls_common_offload common;
  632. enum tc_fl_command command;
  633. unsigned long cookie;
  634. struct flow_dissector *dissector;
  635. struct fl_flow_key *mask;
  636. struct fl_flow_key *key;
  637. struct tcf_exts *exts;
  638. u32 classid;
  639. };
  640. enum tc_matchall_command {
  641. TC_CLSMATCHALL_REPLACE,
  642. TC_CLSMATCHALL_DESTROY,
  643. };
  644. struct tc_cls_matchall_offload {
  645. struct tc_cls_common_offload common;
  646. enum tc_matchall_command command;
  647. struct tcf_exts *exts;
  648. unsigned long cookie;
  649. };
  650. enum tc_clsbpf_command {
  651. TC_CLSBPF_OFFLOAD,
  652. TC_CLSBPF_STATS,
  653. };
  654. struct tc_cls_bpf_offload {
  655. struct tc_cls_common_offload common;
  656. enum tc_clsbpf_command command;
  657. struct tcf_exts *exts;
  658. struct bpf_prog *prog;
  659. struct bpf_prog *oldprog;
  660. const char *name;
  661. bool exts_integrated;
  662. };
  663. struct tc_mqprio_qopt_offload {
  664. /* struct tc_mqprio_qopt must always be the first element */
  665. struct tc_mqprio_qopt qopt;
  666. u16 mode;
  667. u16 shaper;
  668. u32 flags;
  669. u64 min_rate[TC_QOPT_MAX_QUEUE];
  670. u64 max_rate[TC_QOPT_MAX_QUEUE];
  671. };
  672. /* This structure holds cookie structure that is passed from user
  673. * to the kernel for actions and classifiers
  674. */
  675. struct tc_cookie {
  676. u8 *data;
  677. u32 len;
  678. struct rcu_head rcu;
  679. };
  680. struct tc_qopt_offload_stats {
  681. struct gnet_stats_basic_packed *bstats;
  682. struct gnet_stats_queue *qstats;
  683. };
  684. enum tc_mq_command {
  685. TC_MQ_CREATE,
  686. TC_MQ_DESTROY,
  687. TC_MQ_STATS,
  688. };
  689. struct tc_mq_qopt_offload {
  690. enum tc_mq_command command;
  691. u32 handle;
  692. struct tc_qopt_offload_stats stats;
  693. };
  694. enum tc_red_command {
  695. TC_RED_REPLACE,
  696. TC_RED_DESTROY,
  697. TC_RED_STATS,
  698. TC_RED_XSTATS,
  699. };
  700. struct tc_red_qopt_offload_params {
  701. u32 min;
  702. u32 max;
  703. u32 probability;
  704. bool is_ecn;
  705. struct gnet_stats_queue *qstats;
  706. };
  707. struct tc_red_qopt_offload {
  708. enum tc_red_command command;
  709. u32 handle;
  710. u32 parent;
  711. union {
  712. struct tc_red_qopt_offload_params set;
  713. struct tc_qopt_offload_stats stats;
  714. struct red_stats *xstats;
  715. };
  716. };
  717. enum tc_prio_command {
  718. TC_PRIO_REPLACE,
  719. TC_PRIO_DESTROY,
  720. TC_PRIO_STATS,
  721. TC_PRIO_GRAFT,
  722. };
  723. struct tc_prio_qopt_offload_params {
  724. int bands;
  725. u8 priomap[TC_PRIO_MAX + 1];
  726. /* In case that a prio qdisc is offloaded and now is changed to a
  727. * non-offloadedable config, it needs to update the backlog & qlen
  728. * values to negate the HW backlog & qlen values (and only them).
  729. */
  730. struct gnet_stats_queue *qstats;
  731. };
  732. struct tc_prio_qopt_offload_graft_params {
  733. u8 band;
  734. u32 child_handle;
  735. };
  736. struct tc_prio_qopt_offload {
  737. enum tc_prio_command command;
  738. u32 handle;
  739. u32 parent;
  740. union {
  741. struct tc_prio_qopt_offload_params replace_params;
  742. struct tc_qopt_offload_stats stats;
  743. struct tc_prio_qopt_offload_graft_params graft_params;
  744. };
  745. };
  746. #endif