pkt_cls.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __NET_PKT_CLS_H
  3. #define __NET_PKT_CLS_H
  4. #include <linux/pkt_cls.h>
  5. #include <linux/workqueue.h>
  6. #include <net/sch_generic.h>
  7. #include <net/act_api.h>
  8. /* Basic packet classifier frontend definitions. */
  9. struct tcf_walker {
  10. int stop;
  11. int skip;
  12. int count;
  13. int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
  14. };
  15. int register_tcf_proto_ops(struct tcf_proto_ops *ops);
  16. int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
  17. enum tcf_block_binder_type {
  18. TCF_BLOCK_BINDER_TYPE_UNSPEC,
  19. TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
  20. TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
  21. };
  22. struct tcf_block_ext_info {
  23. enum tcf_block_binder_type binder_type;
  24. tcf_chain_head_change_t *chain_head_change;
  25. void *chain_head_change_priv;
  26. u32 block_index;
  27. };
  28. struct tcf_block_cb;
  29. bool tcf_queue_work(struct work_struct *work);
  30. #ifdef CONFIG_NET_CLS
  31. struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
  32. bool create);
  33. void tcf_chain_put(struct tcf_chain *chain);
  34. void tcf_block_netif_keep_dst(struct tcf_block *block);
  35. int tcf_block_get(struct tcf_block **p_block,
  36. struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
  37. struct netlink_ext_ack *extack);
  38. int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
  39. struct tcf_block_ext_info *ei,
  40. struct netlink_ext_ack *extack);
  41. void tcf_block_put(struct tcf_block *block);
  42. void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
  43. struct tcf_block_ext_info *ei);
  44. static inline bool tcf_block_shared(struct tcf_block *block)
  45. {
  46. return block->index;
  47. }
  48. static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
  49. {
  50. WARN_ON(tcf_block_shared(block));
  51. return block->q;
  52. }
  53. static inline struct net_device *tcf_block_dev(struct tcf_block *block)
  54. {
  55. return tcf_block_q(block)->dev_queue->dev;
  56. }
  57. void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
  58. struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
  59. tc_setup_cb_t *cb, void *cb_ident);
  60. void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
  61. unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
  62. struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
  63. tc_setup_cb_t *cb, void *cb_ident,
  64. void *cb_priv);
  65. int tcf_block_cb_register(struct tcf_block *block,
  66. tc_setup_cb_t *cb, void *cb_ident,
  67. void *cb_priv);
  68. void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb);
  69. void tcf_block_cb_unregister(struct tcf_block *block,
  70. tc_setup_cb_t *cb, void *cb_ident);
  71. int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  72. struct tcf_result *res, bool compat_mode);
  73. #else
  74. static inline
  75. int tcf_block_get(struct tcf_block **p_block,
  76. struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
  77. struct netlink_ext_ack *extack)
  78. {
  79. return 0;
  80. }
  81. static inline
  82. int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
  83. struct tcf_block_ext_info *ei,
  84. struct netlink_ext_ack *extack)
  85. {
  86. return 0;
  87. }
  88. static inline void tcf_block_put(struct tcf_block *block)
  89. {
  90. }
  91. static inline
  92. void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
  93. struct tcf_block_ext_info *ei)
  94. {
  95. }
  96. static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
  97. {
  98. return NULL;
  99. }
  100. static inline struct net_device *tcf_block_dev(struct tcf_block *block)
  101. {
  102. return NULL;
  103. }
  104. static inline
  105. int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
  106. void *cb_priv)
  107. {
  108. return 0;
  109. }
  110. static inline
  111. void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
  112. void *cb_priv)
  113. {
  114. }
  115. static inline
  116. void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
  117. {
  118. return NULL;
  119. }
  120. static inline
  121. struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
  122. tc_setup_cb_t *cb, void *cb_ident)
  123. {
  124. return NULL;
  125. }
  126. static inline
  127. void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
  128. {
  129. }
  130. static inline
  131. unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
  132. {
  133. return 0;
  134. }
  135. static inline
  136. struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
  137. tc_setup_cb_t *cb, void *cb_ident,
  138. void *cb_priv)
  139. {
  140. return NULL;
  141. }
  142. static inline
  143. int tcf_block_cb_register(struct tcf_block *block,
  144. tc_setup_cb_t *cb, void *cb_ident,
  145. void *cb_priv)
  146. {
  147. return 0;
  148. }
  149. static inline
  150. void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb)
  151. {
  152. }
  153. static inline
  154. void tcf_block_cb_unregister(struct tcf_block *block,
  155. tc_setup_cb_t *cb, void *cb_ident)
  156. {
  157. }
  158. static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  159. struct tcf_result *res, bool compat_mode)
  160. {
  161. return TC_ACT_UNSPEC;
  162. }
  163. #endif
  164. static inline unsigned long
  165. __cls_set_class(unsigned long *clp, unsigned long cl)
  166. {
  167. return xchg(clp, cl);
  168. }
  169. static inline unsigned long
  170. cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
  171. {
  172. unsigned long old_cl;
  173. sch_tree_lock(q);
  174. old_cl = __cls_set_class(clp, cl);
  175. sch_tree_unlock(q);
  176. return old_cl;
  177. }
  178. static inline void
  179. tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
  180. {
  181. struct Qdisc *q = tp->chain->block->q;
  182. unsigned long cl;
  183. /* Check q as it is not set for shared blocks. In that case,
  184. * setting class is not supported.
  185. */
  186. if (!q)
  187. return;
  188. cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
  189. cl = cls_set_class(q, &r->class, cl);
  190. if (cl)
  191. q->ops->cl_ops->unbind_tcf(q, cl);
  192. }
  193. static inline void
  194. tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
  195. {
  196. struct Qdisc *q = tp->chain->block->q;
  197. unsigned long cl;
  198. if (!q)
  199. return;
  200. if ((cl = __cls_set_class(&r->class, 0)) != 0)
  201. q->ops->cl_ops->unbind_tcf(q, cl);
  202. }
  203. struct tcf_exts {
  204. #ifdef CONFIG_NET_CLS_ACT
  205. __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
  206. int nr_actions;
  207. struct tc_action **actions;
  208. struct net *net;
  209. #endif
  210. /* Map to export classifier specific extension TLV types to the
  211. * generic extensions API. Unsupported extensions must be set to 0.
  212. */
  213. int action;
  214. int police;
  215. };
  216. static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
  217. {
  218. #ifdef CONFIG_NET_CLS_ACT
  219. exts->type = 0;
  220. exts->nr_actions = 0;
  221. exts->net = NULL;
  222. exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
  223. GFP_KERNEL);
  224. if (!exts->actions)
  225. return -ENOMEM;
  226. #endif
  227. exts->action = action;
  228. exts->police = police;
  229. return 0;
  230. }
  231. /* Return false if the netns is being destroyed in cleanup_net(). Callers
  232. * need to do cleanup synchronously in this case, otherwise may race with
  233. * tc_action_net_exit(). Return true for other cases.
  234. */
  235. static inline bool tcf_exts_get_net(struct tcf_exts *exts)
  236. {
  237. #ifdef CONFIG_NET_CLS_ACT
  238. exts->net = maybe_get_net(exts->net);
  239. return exts->net != NULL;
  240. #else
  241. return true;
  242. #endif
  243. }
  244. static inline void tcf_exts_put_net(struct tcf_exts *exts)
  245. {
  246. #ifdef CONFIG_NET_CLS_ACT
  247. if (exts->net)
  248. put_net(exts->net);
  249. #endif
  250. }
  251. static inline void tcf_exts_to_list(const struct tcf_exts *exts,
  252. struct list_head *actions)
  253. {
  254. #ifdef CONFIG_NET_CLS_ACT
  255. int i;
  256. for (i = 0; i < exts->nr_actions; i++) {
  257. struct tc_action *a = exts->actions[i];
  258. list_add_tail(&a->list, actions);
  259. }
  260. #endif
  261. }
  262. static inline void
  263. tcf_exts_stats_update(const struct tcf_exts *exts,
  264. u64 bytes, u64 packets, u64 lastuse)
  265. {
  266. #ifdef CONFIG_NET_CLS_ACT
  267. int i;
  268. preempt_disable();
  269. for (i = 0; i < exts->nr_actions; i++) {
  270. struct tc_action *a = exts->actions[i];
  271. tcf_action_stats_update(a, bytes, packets, lastuse);
  272. }
  273. preempt_enable();
  274. #endif
  275. }
  276. /**
  277. * tcf_exts_has_actions - check if at least one action is present
  278. * @exts: tc filter extensions handle
  279. *
  280. * Returns true if at least one action is present.
  281. */
  282. static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
  283. {
  284. #ifdef CONFIG_NET_CLS_ACT
  285. return exts->nr_actions;
  286. #else
  287. return false;
  288. #endif
  289. }
  290. /**
  291. * tcf_exts_has_one_action - check if exactly one action is present
  292. * @exts: tc filter extensions handle
  293. *
  294. * Returns true if exactly one action is present.
  295. */
  296. static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
  297. {
  298. #ifdef CONFIG_NET_CLS_ACT
  299. return exts->nr_actions == 1;
  300. #else
  301. return false;
  302. #endif
  303. }
  304. /**
  305. * tcf_exts_exec - execute tc filter extensions
  306. * @skb: socket buffer
  307. * @exts: tc filter extensions handle
  308. * @res: desired result
  309. *
  310. * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
  311. * a negative number if the filter must be considered unmatched or
  312. * a positive action code (TC_ACT_*) which must be returned to the
  313. * underlying layer.
  314. */
  315. static inline int
  316. tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
  317. struct tcf_result *res)
  318. {
  319. #ifdef CONFIG_NET_CLS_ACT
  320. return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
  321. #endif
  322. return TC_ACT_OK;
  323. }
  324. int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
  325. struct nlattr **tb, struct nlattr *rate_tlv,
  326. struct tcf_exts *exts, bool ovr,
  327. struct netlink_ext_ack *extack);
  328. void tcf_exts_destroy(struct tcf_exts *exts);
  329. void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
  330. int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
  331. int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
  332. /**
  333. * struct tcf_pkt_info - packet information
  334. */
  335. struct tcf_pkt_info {
  336. unsigned char * ptr;
  337. int nexthdr;
  338. };
  339. #ifdef CONFIG_NET_EMATCH
  340. struct tcf_ematch_ops;
  341. /**
  342. * struct tcf_ematch - extended match (ematch)
  343. *
  344. * @matchid: identifier to allow userspace to reidentify a match
  345. * @flags: flags specifying attributes and the relation to other matches
  346. * @ops: the operations lookup table of the corresponding ematch module
  347. * @datalen: length of the ematch specific configuration data
  348. * @data: ematch specific data
  349. */
  350. struct tcf_ematch {
  351. struct tcf_ematch_ops * ops;
  352. unsigned long data;
  353. unsigned int datalen;
  354. u16 matchid;
  355. u16 flags;
  356. struct net *net;
  357. };
  358. static inline int tcf_em_is_container(struct tcf_ematch *em)
  359. {
  360. return !em->ops;
  361. }
  362. static inline int tcf_em_is_simple(struct tcf_ematch *em)
  363. {
  364. return em->flags & TCF_EM_SIMPLE;
  365. }
  366. static inline int tcf_em_is_inverted(struct tcf_ematch *em)
  367. {
  368. return em->flags & TCF_EM_INVERT;
  369. }
  370. static inline int tcf_em_last_match(struct tcf_ematch *em)
  371. {
  372. return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
  373. }
  374. static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
  375. {
  376. if (tcf_em_last_match(em))
  377. return 1;
  378. if (result == 0 && em->flags & TCF_EM_REL_AND)
  379. return 1;
  380. if (result != 0 && em->flags & TCF_EM_REL_OR)
  381. return 1;
  382. return 0;
  383. }
  384. /**
  385. * struct tcf_ematch_tree - ematch tree handle
  386. *
  387. * @hdr: ematch tree header supplied by userspace
  388. * @matches: array of ematches
  389. */
  390. struct tcf_ematch_tree {
  391. struct tcf_ematch_tree_hdr hdr;
  392. struct tcf_ematch * matches;
  393. };
  394. /**
  395. * struct tcf_ematch_ops - ematch module operations
  396. *
  397. * @kind: identifier (kind) of this ematch module
  398. * @datalen: length of expected configuration data (optional)
  399. * @change: called during validation (optional)
  400. * @match: called during ematch tree evaluation, must return 1/0
  401. * @destroy: called during destroyage (optional)
  402. * @dump: called during dumping process (optional)
  403. * @owner: owner, must be set to THIS_MODULE
  404. * @link: link to previous/next ematch module (internal use)
  405. */
  406. struct tcf_ematch_ops {
  407. int kind;
  408. int datalen;
  409. int (*change)(struct net *net, void *,
  410. int, struct tcf_ematch *);
  411. int (*match)(struct sk_buff *, struct tcf_ematch *,
  412. struct tcf_pkt_info *);
  413. void (*destroy)(struct tcf_ematch *);
  414. int (*dump)(struct sk_buff *, struct tcf_ematch *);
  415. struct module *owner;
  416. struct list_head link;
  417. };
  418. int tcf_em_register(struct tcf_ematch_ops *);
  419. void tcf_em_unregister(struct tcf_ematch_ops *);
  420. int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
  421. struct tcf_ematch_tree *);
  422. void tcf_em_tree_destroy(struct tcf_ematch_tree *);
  423. int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
  424. int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
  425. struct tcf_pkt_info *);
  426. /**
  427. * tcf_em_tree_match - evaulate an ematch tree
  428. *
  429. * @skb: socket buffer of the packet in question
  430. * @tree: ematch tree to be used for evaluation
  431. * @info: packet information examined by classifier
  432. *
  433. * This function matches @skb against the ematch tree in @tree by going
  434. * through all ematches respecting their logic relations returning
  435. * as soon as the result is obvious.
  436. *
  437. * Returns 1 if the ematch tree as-one matches, no ematches are configured
  438. * or ematch is not enabled in the kernel, otherwise 0 is returned.
  439. */
  440. static inline int tcf_em_tree_match(struct sk_buff *skb,
  441. struct tcf_ematch_tree *tree,
  442. struct tcf_pkt_info *info)
  443. {
  444. if (tree->hdr.nmatches)
  445. return __tcf_em_tree_match(skb, tree, info);
  446. else
  447. return 1;
  448. }
  449. #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
  450. #else /* CONFIG_NET_EMATCH */
  451. struct tcf_ematch_tree {
  452. };
  453. #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
  454. #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
  455. #define tcf_em_tree_dump(skb, t, tlv) (0)
  456. #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
  457. #endif /* CONFIG_NET_EMATCH */
  458. static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
  459. {
  460. switch (layer) {
  461. case TCF_LAYER_LINK:
  462. return skb_mac_header(skb);
  463. case TCF_LAYER_NETWORK:
  464. return skb_network_header(skb);
  465. case TCF_LAYER_TRANSPORT:
  466. return skb_transport_header(skb);
  467. }
  468. return NULL;
  469. }
  470. static inline int tcf_valid_offset(const struct sk_buff *skb,
  471. const unsigned char *ptr, const int len)
  472. {
  473. return likely((ptr + len) <= skb_tail_pointer(skb) &&
  474. ptr >= skb->head &&
  475. (ptr <= (ptr + len)));
  476. }
  477. #ifdef CONFIG_NET_CLS_IND
  478. #include <net/net_namespace.h>
  479. static inline int
  480. tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
  481. struct netlink_ext_ack *extack)
  482. {
  483. char indev[IFNAMSIZ];
  484. struct net_device *dev;
  485. if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
  486. NL_SET_ERR_MSG(extack, "Interface name too long");
  487. return -EINVAL;
  488. }
  489. dev = __dev_get_by_name(net, indev);
  490. if (!dev)
  491. return -ENODEV;
  492. return dev->ifindex;
  493. }
  494. static inline bool
  495. tcf_match_indev(struct sk_buff *skb, int ifindex)
  496. {
  497. if (!ifindex)
  498. return true;
  499. if (!skb->skb_iif)
  500. return false;
  501. return ifindex == skb->skb_iif;
  502. }
  503. #endif /* CONFIG_NET_CLS_IND */
  504. int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
  505. enum tc_setup_type type, void *type_data, bool err_stop);
  506. enum tc_block_command {
  507. TC_BLOCK_BIND,
  508. TC_BLOCK_UNBIND,
  509. };
  510. struct tc_block_offload {
  511. enum tc_block_command command;
  512. enum tcf_block_binder_type binder_type;
  513. struct tcf_block *block;
  514. };
  515. struct tc_cls_common_offload {
  516. u32 chain_index;
  517. __be16 protocol;
  518. u32 prio;
  519. struct netlink_ext_ack *extack;
  520. };
  521. struct tc_cls_u32_knode {
  522. struct tcf_exts *exts;
  523. struct tc_u32_sel *sel;
  524. u32 handle;
  525. u32 val;
  526. u32 mask;
  527. u32 link_handle;
  528. u8 fshift;
  529. };
  530. struct tc_cls_u32_hnode {
  531. u32 handle;
  532. u32 prio;
  533. unsigned int divisor;
  534. };
  535. enum tc_clsu32_command {
  536. TC_CLSU32_NEW_KNODE,
  537. TC_CLSU32_REPLACE_KNODE,
  538. TC_CLSU32_DELETE_KNODE,
  539. TC_CLSU32_NEW_HNODE,
  540. TC_CLSU32_REPLACE_HNODE,
  541. TC_CLSU32_DELETE_HNODE,
  542. };
  543. struct tc_cls_u32_offload {
  544. struct tc_cls_common_offload common;
  545. /* knode values */
  546. enum tc_clsu32_command command;
  547. union {
  548. struct tc_cls_u32_knode knode;
  549. struct tc_cls_u32_hnode hnode;
  550. };
  551. };
  552. static inline bool tc_can_offload(const struct net_device *dev)
  553. {
  554. return dev->features & NETIF_F_HW_TC;
  555. }
  556. static inline bool tc_can_offload_extack(const struct net_device *dev,
  557. struct netlink_ext_ack *extack)
  558. {
  559. bool can = tc_can_offload(dev);
  560. if (!can)
  561. NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
  562. return can;
  563. }
  564. static inline bool
  565. tc_cls_can_offload_and_chain0(const struct net_device *dev,
  566. struct tc_cls_common_offload *common)
  567. {
  568. if (!tc_can_offload_extack(dev, common->extack))
  569. return false;
  570. if (common->chain_index) {
  571. NL_SET_ERR_MSG(common->extack,
  572. "Driver supports only offload of chain 0");
  573. return false;
  574. }
  575. return true;
  576. }
  577. static inline bool tc_skip_hw(u32 flags)
  578. {
  579. return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
  580. }
  581. static inline bool tc_skip_sw(u32 flags)
  582. {
  583. return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
  584. }
  585. /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
  586. static inline bool tc_flags_valid(u32 flags)
  587. {
  588. if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
  589. return false;
  590. if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
  591. return false;
  592. return true;
  593. }
  594. static inline bool tc_in_hw(u32 flags)
  595. {
  596. return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
  597. }
  598. static inline void
  599. tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
  600. const struct tcf_proto *tp, u32 flags,
  601. struct netlink_ext_ack *extack)
  602. {
  603. cls_common->chain_index = tp->chain->index;
  604. cls_common->protocol = tp->protocol;
  605. cls_common->prio = tp->prio;
  606. if (tc_skip_sw(flags))
  607. cls_common->extack = extack;
  608. }
  609. enum tc_fl_command {
  610. TC_CLSFLOWER_REPLACE,
  611. TC_CLSFLOWER_DESTROY,
  612. TC_CLSFLOWER_STATS,
  613. };
  614. struct tc_cls_flower_offload {
  615. struct tc_cls_common_offload common;
  616. enum tc_fl_command command;
  617. unsigned long cookie;
  618. struct flow_dissector *dissector;
  619. struct fl_flow_key *mask;
  620. struct fl_flow_key *key;
  621. struct tcf_exts *exts;
  622. u32 classid;
  623. };
  624. enum tc_matchall_command {
  625. TC_CLSMATCHALL_REPLACE,
  626. TC_CLSMATCHALL_DESTROY,
  627. };
  628. struct tc_cls_matchall_offload {
  629. struct tc_cls_common_offload common;
  630. enum tc_matchall_command command;
  631. struct tcf_exts *exts;
  632. unsigned long cookie;
  633. };
  634. enum tc_clsbpf_command {
  635. TC_CLSBPF_OFFLOAD,
  636. TC_CLSBPF_STATS,
  637. };
  638. struct tc_cls_bpf_offload {
  639. struct tc_cls_common_offload common;
  640. enum tc_clsbpf_command command;
  641. struct tcf_exts *exts;
  642. struct bpf_prog *prog;
  643. struct bpf_prog *oldprog;
  644. const char *name;
  645. bool exts_integrated;
  646. };
  647. struct tc_mqprio_qopt_offload {
  648. /* struct tc_mqprio_qopt must always be the first element */
  649. struct tc_mqprio_qopt qopt;
  650. u16 mode;
  651. u16 shaper;
  652. u32 flags;
  653. u64 min_rate[TC_QOPT_MAX_QUEUE];
  654. u64 max_rate[TC_QOPT_MAX_QUEUE];
  655. };
  656. /* This structure holds cookie structure that is passed from user
  657. * to the kernel for actions and classifiers
  658. */
  659. struct tc_cookie {
  660. u8 *data;
  661. u32 len;
  662. };
  663. struct tc_qopt_offload_stats {
  664. struct gnet_stats_basic_packed *bstats;
  665. struct gnet_stats_queue *qstats;
  666. };
  667. enum tc_red_command {
  668. TC_RED_REPLACE,
  669. TC_RED_DESTROY,
  670. TC_RED_STATS,
  671. TC_RED_XSTATS,
  672. };
  673. struct tc_red_qopt_offload_params {
  674. u32 min;
  675. u32 max;
  676. u32 probability;
  677. bool is_ecn;
  678. struct gnet_stats_queue *qstats;
  679. };
  680. struct tc_red_qopt_offload {
  681. enum tc_red_command command;
  682. u32 handle;
  683. u32 parent;
  684. union {
  685. struct tc_red_qopt_offload_params set;
  686. struct tc_qopt_offload_stats stats;
  687. struct red_stats *xstats;
  688. };
  689. };
  690. enum tc_prio_command {
  691. TC_PRIO_REPLACE,
  692. TC_PRIO_DESTROY,
  693. TC_PRIO_STATS,
  694. TC_PRIO_GRAFT,
  695. };
  696. struct tc_prio_qopt_offload_params {
  697. int bands;
  698. u8 priomap[TC_PRIO_MAX + 1];
  699. /* In case that a prio qdisc is offloaded and now is changed to a
  700. * non-offloadedable config, it needs to update the backlog & qlen
  701. * values to negate the HW backlog & qlen values (and only them).
  702. */
  703. struct gnet_stats_queue *qstats;
  704. };
  705. struct tc_prio_qopt_offload_graft_params {
  706. u8 band;
  707. u32 child_handle;
  708. };
  709. struct tc_prio_qopt_offload {
  710. enum tc_prio_command command;
  711. u32 handle;
  712. u32 parent;
  713. union {
  714. struct tc_prio_qopt_offload_params replace_params;
  715. struct tc_qopt_offload_stats stats;
  716. struct tc_prio_qopt_offload_graft_params graft_params;
  717. };
  718. };
  719. #endif