pkt_cls.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __NET_PKT_CLS_H
  3. #define __NET_PKT_CLS_H
  4. #include <linux/pkt_cls.h>
  5. #include <linux/workqueue.h>
  6. #include <net/sch_generic.h>
  7. #include <net/act_api.h>
  8. /* Basic packet classifier frontend definitions. */
  9. struct tcf_walker {
  10. int stop;
  11. int skip;
  12. int count;
  13. int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
  14. };
  15. int register_tcf_proto_ops(struct tcf_proto_ops *ops);
  16. int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
  17. enum tcf_block_binder_type {
  18. TCF_BLOCK_BINDER_TYPE_UNSPEC,
  19. TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
  20. TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
  21. };
  22. struct tcf_block_ext_info {
  23. enum tcf_block_binder_type binder_type;
  24. tcf_chain_head_change_t *chain_head_change;
  25. void *chain_head_change_priv;
  26. };
  27. struct tcf_block_cb;
  28. bool tcf_queue_work(struct work_struct *work);
  29. #ifdef CONFIG_NET_CLS
  30. struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
  31. bool create);
  32. void tcf_chain_put(struct tcf_chain *chain);
  33. int tcf_block_get(struct tcf_block **p_block,
  34. struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q);
  35. int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
  36. struct tcf_block_ext_info *ei);
  37. void tcf_block_put(struct tcf_block *block);
  38. void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
  39. struct tcf_block_ext_info *ei);
  40. static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
  41. {
  42. return block->q;
  43. }
  44. static inline struct net_device *tcf_block_dev(struct tcf_block *block)
  45. {
  46. return tcf_block_q(block)->dev_queue->dev;
  47. }
  48. void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
  49. struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
  50. tc_setup_cb_t *cb, void *cb_ident);
  51. void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
  52. unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
  53. struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
  54. tc_setup_cb_t *cb, void *cb_ident,
  55. void *cb_priv);
  56. int tcf_block_cb_register(struct tcf_block *block,
  57. tc_setup_cb_t *cb, void *cb_ident,
  58. void *cb_priv);
  59. void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb);
  60. void tcf_block_cb_unregister(struct tcf_block *block,
  61. tc_setup_cb_t *cb, void *cb_ident);
  62. int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  63. struct tcf_result *res, bool compat_mode);
  64. #else
  65. static inline
  66. int tcf_block_get(struct tcf_block **p_block,
  67. struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q)
  68. {
  69. return 0;
  70. }
  71. static inline
  72. int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
  73. struct tcf_block_ext_info *ei)
  74. {
  75. return 0;
  76. }
  77. static inline void tcf_block_put(struct tcf_block *block)
  78. {
  79. }
  80. static inline
  81. void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
  82. struct tcf_block_ext_info *ei)
  83. {
  84. }
  85. static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
  86. {
  87. return NULL;
  88. }
  89. static inline struct net_device *tcf_block_dev(struct tcf_block *block)
  90. {
  91. return NULL;
  92. }
  93. static inline
  94. int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
  95. void *cb_priv)
  96. {
  97. return 0;
  98. }
  99. static inline
  100. void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
  101. void *cb_priv)
  102. {
  103. }
  104. static inline
  105. void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
  106. {
  107. return NULL;
  108. }
  109. static inline
  110. struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
  111. tc_setup_cb_t *cb, void *cb_ident)
  112. {
  113. return NULL;
  114. }
  115. static inline
  116. void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
  117. {
  118. }
  119. static inline
  120. unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
  121. {
  122. return 0;
  123. }
  124. static inline
  125. struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
  126. tc_setup_cb_t *cb, void *cb_ident,
  127. void *cb_priv)
  128. {
  129. return NULL;
  130. }
  131. static inline
  132. int tcf_block_cb_register(struct tcf_block *block,
  133. tc_setup_cb_t *cb, void *cb_ident,
  134. void *cb_priv)
  135. {
  136. return 0;
  137. }
  138. static inline
  139. void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb)
  140. {
  141. }
  142. static inline
  143. void tcf_block_cb_unregister(struct tcf_block *block,
  144. tc_setup_cb_t *cb, void *cb_ident)
  145. {
  146. }
  147. static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  148. struct tcf_result *res, bool compat_mode)
  149. {
  150. return TC_ACT_UNSPEC;
  151. }
  152. #endif
  153. static inline unsigned long
  154. __cls_set_class(unsigned long *clp, unsigned long cl)
  155. {
  156. return xchg(clp, cl);
  157. }
  158. static inline unsigned long
  159. cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
  160. {
  161. unsigned long old_cl;
  162. sch_tree_lock(q);
  163. old_cl = __cls_set_class(clp, cl);
  164. sch_tree_unlock(q);
  165. return old_cl;
  166. }
  167. static inline void
  168. tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
  169. {
  170. struct Qdisc *q = tp->chain->block->q;
  171. unsigned long cl;
  172. /* Check q as it is not set for shared blocks. In that case,
  173. * setting class is not supported.
  174. */
  175. if (!q)
  176. return;
  177. cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
  178. cl = cls_set_class(q, &r->class, cl);
  179. if (cl)
  180. q->ops->cl_ops->unbind_tcf(q, cl);
  181. }
  182. static inline void
  183. tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
  184. {
  185. struct Qdisc *q = tp->chain->block->q;
  186. unsigned long cl;
  187. if (!q)
  188. return;
  189. if ((cl = __cls_set_class(&r->class, 0)) != 0)
  190. q->ops->cl_ops->unbind_tcf(q, cl);
  191. }
  192. struct tcf_exts {
  193. #ifdef CONFIG_NET_CLS_ACT
  194. __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
  195. int nr_actions;
  196. struct tc_action **actions;
  197. struct net *net;
  198. #endif
  199. /* Map to export classifier specific extension TLV types to the
  200. * generic extensions API. Unsupported extensions must be set to 0.
  201. */
  202. int action;
  203. int police;
  204. };
  205. static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
  206. {
  207. #ifdef CONFIG_NET_CLS_ACT
  208. exts->type = 0;
  209. exts->nr_actions = 0;
  210. exts->net = NULL;
  211. exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
  212. GFP_KERNEL);
  213. if (!exts->actions)
  214. return -ENOMEM;
  215. #endif
  216. exts->action = action;
  217. exts->police = police;
  218. return 0;
  219. }
  220. /* Return false if the netns is being destroyed in cleanup_net(). Callers
  221. * need to do cleanup synchronously in this case, otherwise may race with
  222. * tc_action_net_exit(). Return true for other cases.
  223. */
  224. static inline bool tcf_exts_get_net(struct tcf_exts *exts)
  225. {
  226. #ifdef CONFIG_NET_CLS_ACT
  227. exts->net = maybe_get_net(exts->net);
  228. return exts->net != NULL;
  229. #else
  230. return true;
  231. #endif
  232. }
  233. static inline void tcf_exts_put_net(struct tcf_exts *exts)
  234. {
  235. #ifdef CONFIG_NET_CLS_ACT
  236. if (exts->net)
  237. put_net(exts->net);
  238. #endif
  239. }
  240. static inline void tcf_exts_to_list(const struct tcf_exts *exts,
  241. struct list_head *actions)
  242. {
  243. #ifdef CONFIG_NET_CLS_ACT
  244. int i;
  245. for (i = 0; i < exts->nr_actions; i++) {
  246. struct tc_action *a = exts->actions[i];
  247. list_add_tail(&a->list, actions);
  248. }
  249. #endif
  250. }
  251. static inline void
  252. tcf_exts_stats_update(const struct tcf_exts *exts,
  253. u64 bytes, u64 packets, u64 lastuse)
  254. {
  255. #ifdef CONFIG_NET_CLS_ACT
  256. int i;
  257. preempt_disable();
  258. for (i = 0; i < exts->nr_actions; i++) {
  259. struct tc_action *a = exts->actions[i];
  260. tcf_action_stats_update(a, bytes, packets, lastuse);
  261. }
  262. preempt_enable();
  263. #endif
  264. }
  265. /**
  266. * tcf_exts_has_actions - check if at least one action is present
  267. * @exts: tc filter extensions handle
  268. *
  269. * Returns true if at least one action is present.
  270. */
  271. static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
  272. {
  273. #ifdef CONFIG_NET_CLS_ACT
  274. return exts->nr_actions;
  275. #else
  276. return false;
  277. #endif
  278. }
  279. /**
  280. * tcf_exts_has_one_action - check if exactly one action is present
  281. * @exts: tc filter extensions handle
  282. *
  283. * Returns true if exactly one action is present.
  284. */
  285. static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
  286. {
  287. #ifdef CONFIG_NET_CLS_ACT
  288. return exts->nr_actions == 1;
  289. #else
  290. return false;
  291. #endif
  292. }
  293. /**
  294. * tcf_exts_exec - execute tc filter extensions
  295. * @skb: socket buffer
  296. * @exts: tc filter extensions handle
  297. * @res: desired result
  298. *
  299. * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
  300. * a negative number if the filter must be considered unmatched or
  301. * a positive action code (TC_ACT_*) which must be returned to the
  302. * underlying layer.
  303. */
  304. static inline int
  305. tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
  306. struct tcf_result *res)
  307. {
  308. #ifdef CONFIG_NET_CLS_ACT
  309. return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
  310. #endif
  311. return TC_ACT_OK;
  312. }
  313. int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
  314. struct nlattr **tb, struct nlattr *rate_tlv,
  315. struct tcf_exts *exts, bool ovr);
  316. void tcf_exts_destroy(struct tcf_exts *exts);
  317. void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
  318. int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
  319. int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
  320. /**
  321. * struct tcf_pkt_info - packet information
  322. */
  323. struct tcf_pkt_info {
  324. unsigned char * ptr;
  325. int nexthdr;
  326. };
  327. #ifdef CONFIG_NET_EMATCH
  328. struct tcf_ematch_ops;
  329. /**
  330. * struct tcf_ematch - extended match (ematch)
  331. *
  332. * @matchid: identifier to allow userspace to reidentify a match
  333. * @flags: flags specifying attributes and the relation to other matches
  334. * @ops: the operations lookup table of the corresponding ematch module
  335. * @datalen: length of the ematch specific configuration data
  336. * @data: ematch specific data
  337. */
  338. struct tcf_ematch {
  339. struct tcf_ematch_ops * ops;
  340. unsigned long data;
  341. unsigned int datalen;
  342. u16 matchid;
  343. u16 flags;
  344. struct net *net;
  345. };
  346. static inline int tcf_em_is_container(struct tcf_ematch *em)
  347. {
  348. return !em->ops;
  349. }
  350. static inline int tcf_em_is_simple(struct tcf_ematch *em)
  351. {
  352. return em->flags & TCF_EM_SIMPLE;
  353. }
  354. static inline int tcf_em_is_inverted(struct tcf_ematch *em)
  355. {
  356. return em->flags & TCF_EM_INVERT;
  357. }
  358. static inline int tcf_em_last_match(struct tcf_ematch *em)
  359. {
  360. return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
  361. }
  362. static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
  363. {
  364. if (tcf_em_last_match(em))
  365. return 1;
  366. if (result == 0 && em->flags & TCF_EM_REL_AND)
  367. return 1;
  368. if (result != 0 && em->flags & TCF_EM_REL_OR)
  369. return 1;
  370. return 0;
  371. }
  372. /**
  373. * struct tcf_ematch_tree - ematch tree handle
  374. *
  375. * @hdr: ematch tree header supplied by userspace
  376. * @matches: array of ematches
  377. */
  378. struct tcf_ematch_tree {
  379. struct tcf_ematch_tree_hdr hdr;
  380. struct tcf_ematch * matches;
  381. };
  382. /**
  383. * struct tcf_ematch_ops - ematch module operations
  384. *
  385. * @kind: identifier (kind) of this ematch module
  386. * @datalen: length of expected configuration data (optional)
  387. * @change: called during validation (optional)
  388. * @match: called during ematch tree evaluation, must return 1/0
  389. * @destroy: called during destroyage (optional)
  390. * @dump: called during dumping process (optional)
  391. * @owner: owner, must be set to THIS_MODULE
  392. * @link: link to previous/next ematch module (internal use)
  393. */
  394. struct tcf_ematch_ops {
  395. int kind;
  396. int datalen;
  397. int (*change)(struct net *net, void *,
  398. int, struct tcf_ematch *);
  399. int (*match)(struct sk_buff *, struct tcf_ematch *,
  400. struct tcf_pkt_info *);
  401. void (*destroy)(struct tcf_ematch *);
  402. int (*dump)(struct sk_buff *, struct tcf_ematch *);
  403. struct module *owner;
  404. struct list_head link;
  405. };
  406. int tcf_em_register(struct tcf_ematch_ops *);
  407. void tcf_em_unregister(struct tcf_ematch_ops *);
  408. int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
  409. struct tcf_ematch_tree *);
  410. void tcf_em_tree_destroy(struct tcf_ematch_tree *);
  411. int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
  412. int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
  413. struct tcf_pkt_info *);
  414. /**
  415. * tcf_em_tree_match - evaulate an ematch tree
  416. *
  417. * @skb: socket buffer of the packet in question
  418. * @tree: ematch tree to be used for evaluation
  419. * @info: packet information examined by classifier
  420. *
  421. * This function matches @skb against the ematch tree in @tree by going
  422. * through all ematches respecting their logic relations returning
  423. * as soon as the result is obvious.
  424. *
  425. * Returns 1 if the ematch tree as-one matches, no ematches are configured
  426. * or ematch is not enabled in the kernel, otherwise 0 is returned.
  427. */
  428. static inline int tcf_em_tree_match(struct sk_buff *skb,
  429. struct tcf_ematch_tree *tree,
  430. struct tcf_pkt_info *info)
  431. {
  432. if (tree->hdr.nmatches)
  433. return __tcf_em_tree_match(skb, tree, info);
  434. else
  435. return 1;
  436. }
  437. #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
  438. #else /* CONFIG_NET_EMATCH */
  439. struct tcf_ematch_tree {
  440. };
  441. #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
  442. #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
  443. #define tcf_em_tree_dump(skb, t, tlv) (0)
  444. #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
  445. #endif /* CONFIG_NET_EMATCH */
  446. static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
  447. {
  448. switch (layer) {
  449. case TCF_LAYER_LINK:
  450. return skb->data;
  451. case TCF_LAYER_NETWORK:
  452. return skb_network_header(skb);
  453. case TCF_LAYER_TRANSPORT:
  454. return skb_transport_header(skb);
  455. }
  456. return NULL;
  457. }
  458. static inline int tcf_valid_offset(const struct sk_buff *skb,
  459. const unsigned char *ptr, const int len)
  460. {
  461. return likely((ptr + len) <= skb_tail_pointer(skb) &&
  462. ptr >= skb->head &&
  463. (ptr <= (ptr + len)));
  464. }
  465. #ifdef CONFIG_NET_CLS_IND
  466. #include <net/net_namespace.h>
  467. static inline int
  468. tcf_change_indev(struct net *net, struct nlattr *indev_tlv)
  469. {
  470. char indev[IFNAMSIZ];
  471. struct net_device *dev;
  472. if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ)
  473. return -EINVAL;
  474. dev = __dev_get_by_name(net, indev);
  475. if (!dev)
  476. return -ENODEV;
  477. return dev->ifindex;
  478. }
  479. static inline bool
  480. tcf_match_indev(struct sk_buff *skb, int ifindex)
  481. {
  482. if (!ifindex)
  483. return true;
  484. if (!skb->skb_iif)
  485. return false;
  486. return ifindex == skb->skb_iif;
  487. }
  488. #endif /* CONFIG_NET_CLS_IND */
  489. int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
  490. enum tc_setup_type type, void *type_data, bool err_stop);
  491. enum tc_block_command {
  492. TC_BLOCK_BIND,
  493. TC_BLOCK_UNBIND,
  494. };
  495. struct tc_block_offload {
  496. enum tc_block_command command;
  497. enum tcf_block_binder_type binder_type;
  498. struct tcf_block *block;
  499. };
  500. struct tc_cls_common_offload {
  501. u32 chain_index;
  502. __be16 protocol;
  503. u32 prio;
  504. };
  505. static inline void
  506. tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
  507. const struct tcf_proto *tp)
  508. {
  509. cls_common->chain_index = tp->chain->index;
  510. cls_common->protocol = tp->protocol;
  511. cls_common->prio = tp->prio;
  512. }
  513. struct tc_cls_u32_knode {
  514. struct tcf_exts *exts;
  515. struct tc_u32_sel *sel;
  516. u32 handle;
  517. u32 val;
  518. u32 mask;
  519. u32 link_handle;
  520. u8 fshift;
  521. };
  522. struct tc_cls_u32_hnode {
  523. u32 handle;
  524. u32 prio;
  525. unsigned int divisor;
  526. };
  527. enum tc_clsu32_command {
  528. TC_CLSU32_NEW_KNODE,
  529. TC_CLSU32_REPLACE_KNODE,
  530. TC_CLSU32_DELETE_KNODE,
  531. TC_CLSU32_NEW_HNODE,
  532. TC_CLSU32_REPLACE_HNODE,
  533. TC_CLSU32_DELETE_HNODE,
  534. };
  535. struct tc_cls_u32_offload {
  536. struct tc_cls_common_offload common;
  537. /* knode values */
  538. enum tc_clsu32_command command;
  539. union {
  540. struct tc_cls_u32_knode knode;
  541. struct tc_cls_u32_hnode hnode;
  542. };
  543. };
  544. static inline bool tc_can_offload(const struct net_device *dev)
  545. {
  546. return dev->features & NETIF_F_HW_TC;
  547. }
  548. static inline bool tc_skip_hw(u32 flags)
  549. {
  550. return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
  551. }
  552. static inline bool tc_skip_sw(u32 flags)
  553. {
  554. return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
  555. }
  556. /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
  557. static inline bool tc_flags_valid(u32 flags)
  558. {
  559. if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
  560. return false;
  561. if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
  562. return false;
  563. return true;
  564. }
  565. static inline bool tc_in_hw(u32 flags)
  566. {
  567. return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
  568. }
  569. enum tc_fl_command {
  570. TC_CLSFLOWER_REPLACE,
  571. TC_CLSFLOWER_DESTROY,
  572. TC_CLSFLOWER_STATS,
  573. };
  574. struct tc_cls_flower_offload {
  575. struct tc_cls_common_offload common;
  576. enum tc_fl_command command;
  577. unsigned long cookie;
  578. struct flow_dissector *dissector;
  579. struct fl_flow_key *mask;
  580. struct fl_flow_key *key;
  581. struct tcf_exts *exts;
  582. u32 classid;
  583. };
  584. enum tc_matchall_command {
  585. TC_CLSMATCHALL_REPLACE,
  586. TC_CLSMATCHALL_DESTROY,
  587. };
  588. struct tc_cls_matchall_offload {
  589. struct tc_cls_common_offload common;
  590. enum tc_matchall_command command;
  591. struct tcf_exts *exts;
  592. unsigned long cookie;
  593. };
  594. enum tc_clsbpf_command {
  595. TC_CLSBPF_ADD,
  596. TC_CLSBPF_REPLACE,
  597. TC_CLSBPF_DESTROY,
  598. TC_CLSBPF_STATS,
  599. };
  600. struct tc_cls_bpf_offload {
  601. struct tc_cls_common_offload common;
  602. enum tc_clsbpf_command command;
  603. struct tcf_exts *exts;
  604. struct bpf_prog *prog;
  605. const char *name;
  606. bool exts_integrated;
  607. u32 gen_flags;
  608. };
  609. struct tc_mqprio_qopt_offload {
  610. /* struct tc_mqprio_qopt must always be the first element */
  611. struct tc_mqprio_qopt qopt;
  612. u16 mode;
  613. u16 shaper;
  614. u32 flags;
  615. u64 min_rate[TC_QOPT_MAX_QUEUE];
  616. u64 max_rate[TC_QOPT_MAX_QUEUE];
  617. };
  618. /* This structure holds cookie structure that is passed from user
  619. * to the kernel for actions and classifiers
  620. */
  621. struct tc_cookie {
  622. u8 *data;
  623. u32 len;
  624. };
  625. enum tc_red_command {
  626. TC_RED_REPLACE,
  627. TC_RED_DESTROY,
  628. TC_RED_STATS,
  629. TC_RED_XSTATS,
  630. };
  631. struct tc_red_qopt_offload_params {
  632. u32 min;
  633. u32 max;
  634. u32 probability;
  635. bool is_ecn;
  636. };
  637. struct tc_red_qopt_offload_stats {
  638. struct gnet_stats_basic_packed *bstats;
  639. struct gnet_stats_queue *qstats;
  640. };
  641. struct tc_red_qopt_offload {
  642. enum tc_red_command command;
  643. u32 handle;
  644. u32 parent;
  645. union {
  646. struct tc_red_qopt_offload_params set;
  647. struct tc_red_qopt_offload_stats stats;
  648. struct red_stats *xstats;
  649. };
  650. };
  651. #endif