spectrum_flower.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402
  1. /*
  2. * drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
  3. * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions are met:
  8. *
  9. * 1. Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. * 2. Redistributions in binary form must reproduce the above copyright
  12. * notice, this list of conditions and the following disclaimer in the
  13. * documentation and/or other materials provided with the distribution.
  14. * 3. Neither the names of the copyright holders nor the names of its
  15. * contributors may be used to endorse or promote products derived from
  16. * this software without specific prior written permission.
  17. *
  18. * Alternatively, this software may be distributed under the terms of the
  19. * GNU General Public License ("GPL") version 2 as published by the Free
  20. * Software Foundation.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  23. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  24. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  26. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  28. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  29. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  30. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  31. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  32. * POSSIBILITY OF SUCH DAMAGE.
  33. */
  34. #include <linux/kernel.h>
  35. #include <linux/errno.h>
  36. #include <linux/netdevice.h>
  37. #include <net/flow_dissector.h>
  38. #include <net/pkt_cls.h>
  39. #include <net/tc_act/tc_gact.h>
  40. #include <net/tc_act/tc_mirred.h>
  41. #include <net/tc_act/tc_vlan.h>
  42. #include "spectrum.h"
  43. #include "core_acl_flex_keys.h"
  44. static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
  45. struct net_device *dev,
  46. struct mlxsw_sp_acl_rule_info *rulei,
  47. struct tcf_exts *exts)
  48. {
  49. const struct tc_action *a;
  50. LIST_HEAD(actions);
  51. int err;
  52. if (tc_no_actions(exts))
  53. return 0;
  54. /* Count action is inserted first */
  55. err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei);
  56. if (err)
  57. return err;
  58. tcf_exts_to_list(exts, &actions);
  59. list_for_each_entry(a, &actions, list) {
  60. if (is_tcf_gact_shot(a)) {
  61. err = mlxsw_sp_acl_rulei_act_drop(rulei);
  62. if (err)
  63. return err;
  64. } else if (is_tcf_mirred_egress_redirect(a)) {
  65. int ifindex = tcf_mirred_ifindex(a);
  66. struct net_device *out_dev;
  67. err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
  68. MLXSW_SP_DUMMY_FID);
  69. if (err)
  70. return err;
  71. out_dev = __dev_get_by_index(dev_net(dev), ifindex);
  72. if (out_dev == dev)
  73. out_dev = NULL;
  74. err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
  75. out_dev);
  76. if (err)
  77. return err;
  78. } else if (is_tcf_vlan(a)) {
  79. u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
  80. u32 action = tcf_vlan_action(a);
  81. u8 prio = tcf_vlan_push_prio(a);
  82. u16 vid = tcf_vlan_push_vid(a);
  83. return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
  84. action, vid,
  85. proto, prio);
  86. } else {
  87. dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
  88. return -EOPNOTSUPP;
  89. }
  90. }
  91. return 0;
  92. }
  93. static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
  94. struct tc_cls_flower_offload *f)
  95. {
  96. struct flow_dissector_key_ipv4_addrs *key =
  97. skb_flow_dissector_target(f->dissector,
  98. FLOW_DISSECTOR_KEY_IPV4_ADDRS,
  99. f->key);
  100. struct flow_dissector_key_ipv4_addrs *mask =
  101. skb_flow_dissector_target(f->dissector,
  102. FLOW_DISSECTOR_KEY_IPV4_ADDRS,
  103. f->mask);
  104. mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_IP4,
  105. ntohl(key->src), ntohl(mask->src));
  106. mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_IP4,
  107. ntohl(key->dst), ntohl(mask->dst));
  108. }
  109. static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
  110. struct tc_cls_flower_offload *f)
  111. {
  112. struct flow_dissector_key_ipv6_addrs *key =
  113. skb_flow_dissector_target(f->dissector,
  114. FLOW_DISSECTOR_KEY_IPV6_ADDRS,
  115. f->key);
  116. struct flow_dissector_key_ipv6_addrs *mask =
  117. skb_flow_dissector_target(f->dissector,
  118. FLOW_DISSECTOR_KEY_IPV6_ADDRS,
  119. f->mask);
  120. size_t addr_half_size = sizeof(key->src) / 2;
  121. mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_HI,
  122. &key->src.s6_addr[0],
  123. &mask->src.s6_addr[0],
  124. addr_half_size);
  125. mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_LO,
  126. &key->src.s6_addr[addr_half_size],
  127. &mask->src.s6_addr[addr_half_size],
  128. addr_half_size);
  129. mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_HI,
  130. &key->dst.s6_addr[0],
  131. &mask->dst.s6_addr[0],
  132. addr_half_size);
  133. mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_LO,
  134. &key->dst.s6_addr[addr_half_size],
  135. &mask->dst.s6_addr[addr_half_size],
  136. addr_half_size);
  137. }
  138. static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
  139. struct mlxsw_sp_acl_rule_info *rulei,
  140. struct tc_cls_flower_offload *f,
  141. u8 ip_proto)
  142. {
  143. struct flow_dissector_key_ports *key, *mask;
  144. if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS))
  145. return 0;
  146. if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
  147. dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
  148. return -EINVAL;
  149. }
  150. key = skb_flow_dissector_target(f->dissector,
  151. FLOW_DISSECTOR_KEY_PORTS,
  152. f->key);
  153. mask = skb_flow_dissector_target(f->dissector,
  154. FLOW_DISSECTOR_KEY_PORTS,
  155. f->mask);
  156. mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
  157. ntohs(key->dst), ntohs(mask->dst));
  158. mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
  159. ntohs(key->src), ntohs(mask->src));
  160. return 0;
  161. }
  162. static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
  163. struct net_device *dev,
  164. struct mlxsw_sp_acl_rule_info *rulei,
  165. struct tc_cls_flower_offload *f)
  166. {
  167. u16 addr_type = 0;
  168. u8 ip_proto = 0;
  169. int err;
  170. if (f->dissector->used_keys &
  171. ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
  172. BIT(FLOW_DISSECTOR_KEY_BASIC) |
  173. BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
  174. BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
  175. BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
  176. BIT(FLOW_DISSECTOR_KEY_PORTS) |
  177. BIT(FLOW_DISSECTOR_KEY_VLAN))) {
  178. dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
  179. return -EOPNOTSUPP;
  180. }
  181. mlxsw_sp_acl_rulei_priority(rulei, f->prio);
  182. if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
  183. struct flow_dissector_key_control *key =
  184. skb_flow_dissector_target(f->dissector,
  185. FLOW_DISSECTOR_KEY_CONTROL,
  186. f->key);
  187. addr_type = key->addr_type;
  188. }
  189. if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
  190. struct flow_dissector_key_basic *key =
  191. skb_flow_dissector_target(f->dissector,
  192. FLOW_DISSECTOR_KEY_BASIC,
  193. f->key);
  194. struct flow_dissector_key_basic *mask =
  195. skb_flow_dissector_target(f->dissector,
  196. FLOW_DISSECTOR_KEY_BASIC,
  197. f->mask);
  198. u16 n_proto_key = ntohs(key->n_proto);
  199. u16 n_proto_mask = ntohs(mask->n_proto);
  200. if (n_proto_key == ETH_P_ALL) {
  201. n_proto_key = 0;
  202. n_proto_mask = 0;
  203. }
  204. mlxsw_sp_acl_rulei_keymask_u32(rulei,
  205. MLXSW_AFK_ELEMENT_ETHERTYPE,
  206. n_proto_key, n_proto_mask);
  207. ip_proto = key->ip_proto;
  208. mlxsw_sp_acl_rulei_keymask_u32(rulei,
  209. MLXSW_AFK_ELEMENT_IP_PROTO,
  210. key->ip_proto, mask->ip_proto);
  211. }
  212. if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
  213. struct flow_dissector_key_eth_addrs *key =
  214. skb_flow_dissector_target(f->dissector,
  215. FLOW_DISSECTOR_KEY_ETH_ADDRS,
  216. f->key);
  217. struct flow_dissector_key_eth_addrs *mask =
  218. skb_flow_dissector_target(f->dissector,
  219. FLOW_DISSECTOR_KEY_ETH_ADDRS,
  220. f->mask);
  221. mlxsw_sp_acl_rulei_keymask_buf(rulei,
  222. MLXSW_AFK_ELEMENT_DMAC,
  223. key->dst, mask->dst,
  224. sizeof(key->dst));
  225. mlxsw_sp_acl_rulei_keymask_buf(rulei,
  226. MLXSW_AFK_ELEMENT_SMAC,
  227. key->src, mask->src,
  228. sizeof(key->src));
  229. }
  230. if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
  231. struct flow_dissector_key_vlan *key =
  232. skb_flow_dissector_target(f->dissector,
  233. FLOW_DISSECTOR_KEY_VLAN,
  234. f->key);
  235. struct flow_dissector_key_vlan *mask =
  236. skb_flow_dissector_target(f->dissector,
  237. FLOW_DISSECTOR_KEY_VLAN,
  238. f->mask);
  239. if (mask->vlan_id != 0)
  240. mlxsw_sp_acl_rulei_keymask_u32(rulei,
  241. MLXSW_AFK_ELEMENT_VID,
  242. key->vlan_id,
  243. mask->vlan_id);
  244. if (mask->vlan_priority != 0)
  245. mlxsw_sp_acl_rulei_keymask_u32(rulei,
  246. MLXSW_AFK_ELEMENT_PCP,
  247. key->vlan_priority,
  248. mask->vlan_priority);
  249. }
  250. if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
  251. mlxsw_sp_flower_parse_ipv4(rulei, f);
  252. if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
  253. mlxsw_sp_flower_parse_ipv6(rulei, f);
  254. err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
  255. if (err)
  256. return err;
  257. return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, rulei, f->exts);
  258. }
  259. int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
  260. __be16 protocol, struct tc_cls_flower_offload *f)
  261. {
  262. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  263. struct net_device *dev = mlxsw_sp_port->dev;
  264. struct mlxsw_sp_acl_rule_info *rulei;
  265. struct mlxsw_sp_acl_ruleset *ruleset;
  266. struct mlxsw_sp_acl_rule *rule;
  267. int err;
  268. ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress,
  269. MLXSW_SP_ACL_PROFILE_FLOWER);
  270. if (IS_ERR(ruleset))
  271. return PTR_ERR(ruleset);
  272. rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie);
  273. if (IS_ERR(rule)) {
  274. err = PTR_ERR(rule);
  275. goto err_rule_create;
  276. }
  277. rulei = mlxsw_sp_acl_rule_rulei(rule);
  278. err = mlxsw_sp_flower_parse(mlxsw_sp, dev, rulei, f);
  279. if (err)
  280. goto err_flower_parse;
  281. err = mlxsw_sp_acl_rulei_commit(rulei);
  282. if (err)
  283. goto err_rulei_commit;
  284. err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
  285. if (err)
  286. goto err_rule_add;
  287. mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
  288. return 0;
  289. err_rule_add:
  290. err_rulei_commit:
  291. err_flower_parse:
  292. mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
  293. err_rule_create:
  294. mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
  295. return err;
  296. }
  297. void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
  298. struct tc_cls_flower_offload *f)
  299. {
  300. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  301. struct mlxsw_sp_acl_ruleset *ruleset;
  302. struct mlxsw_sp_acl_rule *rule;
  303. ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
  304. ingress,
  305. MLXSW_SP_ACL_PROFILE_FLOWER);
  306. if (IS_ERR(ruleset))
  307. return;
  308. rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
  309. if (rule) {
  310. mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
  311. mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
  312. }
  313. mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
  314. }
  315. int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
  316. struct tc_cls_flower_offload *f)
  317. {
  318. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  319. struct mlxsw_sp_acl_ruleset *ruleset;
  320. struct mlxsw_sp_acl_rule *rule;
  321. struct tc_action *a;
  322. LIST_HEAD(actions);
  323. u64 packets;
  324. u64 lastuse;
  325. u64 bytes;
  326. int err;
  327. ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
  328. ingress,
  329. MLXSW_SP_ACL_PROFILE_FLOWER);
  330. if (WARN_ON(IS_ERR(ruleset)))
  331. return -EINVAL;
  332. rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
  333. if (!rule)
  334. return -EINVAL;
  335. err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
  336. &lastuse);
  337. if (err)
  338. goto err_rule_get_stats;
  339. preempt_disable();
  340. tcf_exts_to_list(f->exts, &actions);
  341. list_for_each_entry(a, &actions, list)
  342. tcf_action_stats_update(a, bytes, packets, lastuse);
  343. preempt_enable();
  344. mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
  345. return 0;
  346. err_rule_get_stats:
  347. mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
  348. return err;
  349. }