bnxt_tc.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838
  1. /* Broadcom NetXtreme-C/E network driver.
  2. *
  3. * Copyright (c) 2017 Broadcom Limited
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. */
  9. #include <linux/netdevice.h>
  10. #include <linux/inetdevice.h>
  11. #include <linux/if_vlan.h>
  12. #include <net/flow_dissector.h>
  13. #include <net/pkt_cls.h>
  14. #include <net/tc_act/tc_gact.h>
  15. #include <net/tc_act/tc_skbedit.h>
  16. #include <net/tc_act/tc_mirred.h>
  17. #include <net/tc_act/tc_vlan.h>
  18. #include "bnxt_hsi.h"
  19. #include "bnxt.h"
  20. #include "bnxt_sriov.h"
  21. #include "bnxt_tc.h"
  22. #include "bnxt_vfr.h"
  23. #ifdef CONFIG_BNXT_FLOWER_OFFLOAD
  24. #define BNXT_FID_INVALID 0xffff
  25. #define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT))
  26. /* Return the dst fid of the func for flow forwarding
  27. * For PFs: src_fid is the fid of the PF
  28. * For VF-reps: src_fid the fid of the VF
  29. */
  30. static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
  31. {
  32. struct bnxt *bp;
  33. /* check if dev belongs to the same switch */
  34. if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) {
  35. netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch",
  36. dev->ifindex);
  37. return BNXT_FID_INVALID;
  38. }
  39. /* Is dev a VF-rep? */
  40. if (dev != pf_bp->dev)
  41. return bnxt_vf_rep_get_fid(dev);
  42. bp = netdev_priv(dev);
  43. return bp->pf.fw_fid;
  44. }
  45. static int bnxt_tc_parse_redir(struct bnxt *bp,
  46. struct bnxt_tc_actions *actions,
  47. const struct tc_action *tc_act)
  48. {
  49. int ifindex = tcf_mirred_ifindex(tc_act);
  50. struct net_device *dev;
  51. u16 dst_fid;
  52. dev = __dev_get_by_index(dev_net(bp->dev), ifindex);
  53. if (!dev) {
  54. netdev_info(bp->dev, "no dev for ifindex=%d", ifindex);
  55. return -EINVAL;
  56. }
  57. /* find the FID from dev */
  58. dst_fid = bnxt_flow_get_dst_fid(bp, dev);
  59. if (dst_fid == BNXT_FID_INVALID) {
  60. netdev_info(bp->dev, "can't get fid for ifindex=%d", ifindex);
  61. return -EINVAL;
  62. }
  63. actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
  64. actions->dst_fid = dst_fid;
  65. actions->dst_dev = dev;
  66. return 0;
  67. }
  68. static void bnxt_tc_parse_vlan(struct bnxt *bp,
  69. struct bnxt_tc_actions *actions,
  70. const struct tc_action *tc_act)
  71. {
  72. if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) {
  73. actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
  74. } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) {
  75. actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
  76. actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
  77. actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
  78. }
  79. }
  80. static int bnxt_tc_parse_actions(struct bnxt *bp,
  81. struct bnxt_tc_actions *actions,
  82. struct tcf_exts *tc_exts)
  83. {
  84. const struct tc_action *tc_act;
  85. LIST_HEAD(tc_actions);
  86. int rc;
  87. if (!tcf_exts_has_actions(tc_exts)) {
  88. netdev_info(bp->dev, "no actions");
  89. return -EINVAL;
  90. }
  91. tcf_exts_to_list(tc_exts, &tc_actions);
  92. list_for_each_entry(tc_act, &tc_actions, list) {
  93. /* Drop action */
  94. if (is_tcf_gact_shot(tc_act)) {
  95. actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
  96. return 0; /* don't bother with other actions */
  97. }
  98. /* Redirect action */
  99. if (is_tcf_mirred_egress_redirect(tc_act)) {
  100. rc = bnxt_tc_parse_redir(bp, actions, tc_act);
  101. if (rc)
  102. return rc;
  103. continue;
  104. }
  105. /* Push/pop VLAN */
  106. if (is_tcf_vlan(tc_act)) {
  107. bnxt_tc_parse_vlan(bp, actions, tc_act);
  108. continue;
  109. }
  110. }
  111. return 0;
  112. }
  113. #define GET_KEY(flow_cmd, key_type) \
  114. skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
  115. (flow_cmd)->key)
  116. #define GET_MASK(flow_cmd, key_type) \
  117. skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
  118. (flow_cmd)->mask)
  119. static int bnxt_tc_parse_flow(struct bnxt *bp,
  120. struct tc_cls_flower_offload *tc_flow_cmd,
  121. struct bnxt_tc_flow *flow)
  122. {
  123. struct flow_dissector *dissector = tc_flow_cmd->dissector;
  124. u16 addr_type = 0;
  125. /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
  126. if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
  127. (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
  128. netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x",
  129. dissector->used_keys);
  130. return -EOPNOTSUPP;
  131. }
  132. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
  133. struct flow_dissector_key_control *key =
  134. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL);
  135. addr_type = key->addr_type;
  136. }
  137. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) {
  138. struct flow_dissector_key_basic *key =
  139. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
  140. struct flow_dissector_key_basic *mask =
  141. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
  142. flow->l2_key.ether_type = key->n_proto;
  143. flow->l2_mask.ether_type = mask->n_proto;
  144. if (key->n_proto == htons(ETH_P_IP) ||
  145. key->n_proto == htons(ETH_P_IPV6)) {
  146. flow->l4_key.ip_proto = key->ip_proto;
  147. flow->l4_mask.ip_proto = mask->ip_proto;
  148. }
  149. }
  150. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
  151. struct flow_dissector_key_eth_addrs *key =
  152. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
  153. struct flow_dissector_key_eth_addrs *mask =
  154. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
  155. flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS;
  156. ether_addr_copy(flow->l2_key.dmac, key->dst);
  157. ether_addr_copy(flow->l2_mask.dmac, mask->dst);
  158. ether_addr_copy(flow->l2_key.smac, key->src);
  159. ether_addr_copy(flow->l2_mask.smac, mask->src);
  160. }
  161. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) {
  162. struct flow_dissector_key_vlan *key =
  163. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
  164. struct flow_dissector_key_vlan *mask =
  165. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
  166. flow->l2_key.inner_vlan_tci =
  167. cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority));
  168. flow->l2_mask.inner_vlan_tci =
  169. cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority)));
  170. flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q);
  171. flow->l2_mask.inner_vlan_tpid = htons(0xffff);
  172. flow->l2_key.num_vlans = 1;
  173. }
  174. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
  175. struct flow_dissector_key_ipv4_addrs *key =
  176. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
  177. struct flow_dissector_key_ipv4_addrs *mask =
  178. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
  179. flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS;
  180. flow->l3_key.ipv4.daddr.s_addr = key->dst;
  181. flow->l3_mask.ipv4.daddr.s_addr = mask->dst;
  182. flow->l3_key.ipv4.saddr.s_addr = key->src;
  183. flow->l3_mask.ipv4.saddr.s_addr = mask->src;
  184. } else if (dissector_uses_key(dissector,
  185. FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
  186. struct flow_dissector_key_ipv6_addrs *key =
  187. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
  188. struct flow_dissector_key_ipv6_addrs *mask =
  189. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
  190. flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS;
  191. flow->l3_key.ipv6.daddr = key->dst;
  192. flow->l3_mask.ipv6.daddr = mask->dst;
  193. flow->l3_key.ipv6.saddr = key->src;
  194. flow->l3_mask.ipv6.saddr = mask->src;
  195. }
  196. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) {
  197. struct flow_dissector_key_ports *key =
  198. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
  199. struct flow_dissector_key_ports *mask =
  200. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
  201. flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS;
  202. flow->l4_key.ports.dport = key->dst;
  203. flow->l4_mask.ports.dport = mask->dst;
  204. flow->l4_key.ports.sport = key->src;
  205. flow->l4_mask.ports.sport = mask->src;
  206. }
  207. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) {
  208. struct flow_dissector_key_icmp *key =
  209. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
  210. struct flow_dissector_key_icmp *mask =
  211. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
  212. flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP;
  213. flow->l4_key.icmp.type = key->type;
  214. flow->l4_key.icmp.code = key->code;
  215. flow->l4_mask.icmp.type = mask->type;
  216. flow->l4_mask.icmp.code = mask->code;
  217. }
  218. return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
  219. }
  220. static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
  221. {
  222. struct hwrm_cfa_flow_free_input req = { 0 };
  223. int rc;
  224. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
  225. req.flow_handle = flow_handle;
  226. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  227. if (rc)
  228. netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
  229. __func__, flow_handle, rc);
  230. return rc;
  231. }
  232. static int ipv6_mask_len(struct in6_addr *mask)
  233. {
  234. int mask_len = 0, i;
  235. for (i = 0; i < 4; i++)
  236. mask_len += inet_mask_len(mask->s6_addr32[i]);
  237. return mask_len;
  238. }
  239. static bool is_wildcard(void *mask, int len)
  240. {
  241. const u8 *p = mask;
  242. int i;
  243. for (i = 0; i < len; i++) {
  244. if (p[i] != 0)
  245. return false;
  246. }
  247. return true;
  248. }
  249. static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
  250. __le16 ref_flow_handle, __le16 *flow_handle)
  251. {
  252. struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
  253. struct bnxt_tc_actions *actions = &flow->actions;
  254. struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
  255. struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
  256. struct hwrm_cfa_flow_alloc_input req = { 0 };
  257. u16 flow_flags = 0, action_flags = 0;
  258. int rc;
  259. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1);
  260. req.src_fid = cpu_to_le16(flow->src_fid);
  261. req.ref_flow_handle = ref_flow_handle;
  262. req.ethertype = flow->l2_key.ether_type;
  263. req.ip_proto = flow->l4_key.ip_proto;
  264. if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) {
  265. memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN);
  266. memcpy(req.smac, flow->l2_key.smac, ETH_ALEN);
  267. }
  268. if (flow->l2_key.num_vlans > 0) {
  269. flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE;
  270. /* FW expects the inner_vlan_tci value to be set
  271. * in outer_vlan_tci when num_vlans is 1 (which is
  272. * always the case in TC.)
  273. */
  274. req.outer_vlan_tci = flow->l2_key.inner_vlan_tci;
  275. }
  276. /* If all IP and L4 fields are wildcarded then this is an L2 flow */
  277. if (is_wildcard(&l3_mask, sizeof(l3_mask)) &&
  278. is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
  279. flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
  280. } else {
  281. flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ?
  282. CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 :
  283. CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6;
  284. if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) {
  285. req.ip_dst[0] = l3_key->ipv4.daddr.s_addr;
  286. req.ip_dst_mask_len =
  287. inet_mask_len(l3_mask->ipv4.daddr.s_addr);
  288. req.ip_src[0] = l3_key->ipv4.saddr.s_addr;
  289. req.ip_src_mask_len =
  290. inet_mask_len(l3_mask->ipv4.saddr.s_addr);
  291. } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) {
  292. memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32,
  293. sizeof(req.ip_dst));
  294. req.ip_dst_mask_len =
  295. ipv6_mask_len(&l3_mask->ipv6.daddr);
  296. memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32,
  297. sizeof(req.ip_src));
  298. req.ip_src_mask_len =
  299. ipv6_mask_len(&l3_mask->ipv6.saddr);
  300. }
  301. }
  302. if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) {
  303. req.l4_src_port = flow->l4_key.ports.sport;
  304. req.l4_src_port_mask = flow->l4_mask.ports.sport;
  305. req.l4_dst_port = flow->l4_key.ports.dport;
  306. req.l4_dst_port_mask = flow->l4_mask.ports.dport;
  307. } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) {
  308. /* l4 ports serve as type/code when ip_proto is ICMP */
  309. req.l4_src_port = htons(flow->l4_key.icmp.type);
  310. req.l4_src_port_mask = htons(flow->l4_mask.icmp.type);
  311. req.l4_dst_port = htons(flow->l4_key.icmp.code);
  312. req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
  313. }
  314. req.flags = cpu_to_le16(flow_flags);
  315. if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) {
  316. action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP;
  317. } else {
  318. if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
  319. action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD;
  320. req.dst_fid = cpu_to_le16(actions->dst_fid);
  321. }
  322. if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) {
  323. action_flags |=
  324. CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
  325. req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
  326. req.l2_rewrite_vlan_tci = actions->push_vlan_tci;
  327. memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
  328. memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
  329. }
  330. if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) {
  331. action_flags |=
  332. CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
  333. /* Rewrite config with tpid = 0 implies vlan pop */
  334. req.l2_rewrite_vlan_tpid = 0;
  335. memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
  336. memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
  337. }
  338. }
  339. req.action_flags = cpu_to_le16(action_flags);
  340. mutex_lock(&bp->hwrm_cmd_lock);
  341. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  342. if (!rc)
  343. *flow_handle = resp->flow_handle;
  344. mutex_unlock(&bp->hwrm_cmd_lock);
  345. return rc;
  346. }
  347. /* Add val to accum while handling a possible wraparound
  348. * of val. Eventhough val is of type u64, its actual width
  349. * is denoted by mask and will wrap-around beyond that width.
  350. */
  351. static void accumulate_val(u64 *accum, u64 val, u64 mask)
  352. {
  353. #define low_bits(x, mask) ((x) & (mask))
  354. #define high_bits(x, mask) ((x) & ~(mask))
  355. bool wrapped = val < low_bits(*accum, mask);
  356. *accum = high_bits(*accum, mask) + val;
  357. if (wrapped)
  358. *accum += (mask + 1);
  359. }
  360. /* The HW counters' width is much less than 64bits.
  361. * Handle possible wrap-around while updating the stat counters
  362. */
  363. static void bnxt_flow_stats_fix_wraparound(struct bnxt_tc_info *tc_info,
  364. struct bnxt_tc_flow_stats *stats,
  365. struct bnxt_tc_flow_stats *hw_stats)
  366. {
  367. accumulate_val(&stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
  368. accumulate_val(&stats->packets, hw_stats->packets,
  369. tc_info->packets_mask);
  370. }
  371. /* Fix possible wraparound of the stats queried from HW, calculate
  372. * the delta from prev_stats, and also update the prev_stats.
  373. * The HW flow stats are fetched under the hwrm_cmd_lock mutex.
  374. * This routine is best called while under the mutex so that the
  375. * stats processing happens atomically.
  376. */
  377. static void bnxt_flow_stats_calc(struct bnxt_tc_info *tc_info,
  378. struct bnxt_tc_flow *flow,
  379. struct bnxt_tc_flow_stats *stats)
  380. {
  381. struct bnxt_tc_flow_stats *acc_stats, *prev_stats;
  382. acc_stats = &flow->stats;
  383. bnxt_flow_stats_fix_wraparound(tc_info, acc_stats, stats);
  384. prev_stats = &flow->prev_stats;
  385. stats->bytes = acc_stats->bytes - prev_stats->bytes;
  386. stats->packets = acc_stats->packets - prev_stats->packets;
  387. *prev_stats = *acc_stats;
  388. }
  389. static int bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp,
  390. __le16 flow_handle,
  391. struct bnxt_tc_flow *flow,
  392. struct bnxt_tc_flow_stats *stats)
  393. {
  394. struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
  395. struct hwrm_cfa_flow_stats_input req = { 0 };
  396. int rc;
  397. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
  398. req.num_flows = cpu_to_le16(1);
  399. req.flow_handle_0 = flow_handle;
  400. mutex_lock(&bp->hwrm_cmd_lock);
  401. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  402. if (!rc) {
  403. stats->packets = le64_to_cpu(resp->packet_0);
  404. stats->bytes = le64_to_cpu(resp->byte_0);
  405. bnxt_flow_stats_calc(&bp->tc_info, flow, stats);
  406. } else {
  407. netdev_info(bp->dev, "error rc=%d", rc);
  408. }
  409. mutex_unlock(&bp->hwrm_cmd_lock);
  410. return rc;
  411. }
  412. static int bnxt_tc_put_l2_node(struct bnxt *bp,
  413. struct bnxt_tc_flow_node *flow_node)
  414. {
  415. struct bnxt_tc_l2_node *l2_node = flow_node->l2_node;
  416. struct bnxt_tc_info *tc_info = &bp->tc_info;
  417. int rc;
  418. /* remove flow_node from the L2 shared flow list */
  419. list_del(&flow_node->l2_list_node);
  420. if (--l2_node->refcount == 0) {
  421. rc = rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node,
  422. tc_info->l2_ht_params);
  423. if (rc)
  424. netdev_err(bp->dev,
  425. "Error: %s: rhashtable_remove_fast: %d",
  426. __func__, rc);
  427. kfree_rcu(l2_node, rcu);
  428. }
  429. return 0;
  430. }
  431. static struct bnxt_tc_l2_node *
  432. bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
  433. struct rhashtable_params ht_params,
  434. struct bnxt_tc_l2_key *l2_key)
  435. {
  436. struct bnxt_tc_l2_node *l2_node;
  437. int rc;
  438. l2_node = rhashtable_lookup_fast(l2_table, l2_key, ht_params);
  439. if (!l2_node) {
  440. l2_node = kzalloc(sizeof(*l2_node), GFP_KERNEL);
  441. if (!l2_node) {
  442. rc = -ENOMEM;
  443. return NULL;
  444. }
  445. l2_node->key = *l2_key;
  446. rc = rhashtable_insert_fast(l2_table, &l2_node->node,
  447. ht_params);
  448. if (rc) {
  449. kfree(l2_node);
  450. netdev_err(bp->dev,
  451. "Error: %s: rhashtable_insert_fast: %d",
  452. __func__, rc);
  453. return NULL;
  454. }
  455. INIT_LIST_HEAD(&l2_node->common_l2_flows);
  456. }
  457. return l2_node;
  458. }
  459. /* Get the ref_flow_handle for a flow by checking if there are any other
  460. * flows that share the same L2 key as this flow.
  461. */
  462. static int
  463. bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
  464. struct bnxt_tc_flow_node *flow_node,
  465. __le16 *ref_flow_handle)
  466. {
  467. struct bnxt_tc_info *tc_info = &bp->tc_info;
  468. struct bnxt_tc_flow_node *ref_flow_node;
  469. struct bnxt_tc_l2_node *l2_node;
  470. l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table,
  471. tc_info->l2_ht_params,
  472. &flow->l2_key);
  473. if (!l2_node)
  474. return -1;
  475. /* If any other flow is using this l2_node, use it's flow_handle
  476. * as the ref_flow_handle
  477. */
  478. if (l2_node->refcount > 0) {
  479. ref_flow_node = list_first_entry(&l2_node->common_l2_flows,
  480. struct bnxt_tc_flow_node,
  481. l2_list_node);
  482. *ref_flow_handle = ref_flow_node->flow_handle;
  483. } else {
  484. *ref_flow_handle = cpu_to_le16(0xffff);
  485. }
  486. /* Insert the l2_node into the flow_node so that subsequent flows
  487. * with a matching l2 key can use the flow_handle of this flow
  488. * as their ref_flow_handle
  489. */
  490. flow_node->l2_node = l2_node;
  491. list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows);
  492. l2_node->refcount++;
  493. return 0;
  494. }
  495. /* After the flow parsing is done, this routine is used for checking
  496. * if there are any aspects of the flow that prevent it from being
  497. * offloaded.
  498. */
  499. static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
  500. {
  501. /* If L4 ports are specified then ip_proto must be TCP or UDP */
  502. if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) &&
  503. (flow->l4_key.ip_proto != IPPROTO_TCP &&
  504. flow->l4_key.ip_proto != IPPROTO_UDP)) {
  505. netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports",
  506. flow->l4_key.ip_proto);
  507. return false;
  508. }
  509. return true;
  510. }
  511. static int __bnxt_tc_del_flow(struct bnxt *bp,
  512. struct bnxt_tc_flow_node *flow_node)
  513. {
  514. struct bnxt_tc_info *tc_info = &bp->tc_info;
  515. int rc;
  516. /* send HWRM cmd to free the flow-id */
  517. bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle);
  518. mutex_lock(&tc_info->lock);
  519. /* release reference to l2 node */
  520. bnxt_tc_put_l2_node(bp, flow_node);
  521. mutex_unlock(&tc_info->lock);
  522. rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node,
  523. tc_info->flow_ht_params);
  524. if (rc)
  525. netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d",
  526. __func__, rc);
  527. kfree_rcu(flow_node, rcu);
  528. return 0;
  529. }
  530. /* Add a new flow or replace an existing flow.
  531. * Notes on locking:
  532. * There are essentially two critical sections here.
  533. * 1. while adding a new flow
  534. * a) lookup l2-key
  535. * b) issue HWRM cmd and get flow_handle
  536. * c) link l2-key with flow
  537. * 2. while deleting a flow
  538. * a) unlinking l2-key from flow
  539. * A lock is needed to protect these two critical sections.
  540. *
  541. * The hash-tables are already protected by the rhashtable API.
  542. */
  543. static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
  544. struct tc_cls_flower_offload *tc_flow_cmd)
  545. {
  546. struct bnxt_tc_flow_node *new_node, *old_node;
  547. struct bnxt_tc_info *tc_info = &bp->tc_info;
  548. struct bnxt_tc_flow *flow;
  549. __le16 ref_flow_handle;
  550. int rc;
  551. /* allocate memory for the new flow and it's node */
  552. new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
  553. if (!new_node) {
  554. rc = -ENOMEM;
  555. goto done;
  556. }
  557. new_node->cookie = tc_flow_cmd->cookie;
  558. flow = &new_node->flow;
  559. rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
  560. if (rc)
  561. goto free_node;
  562. flow->src_fid = src_fid;
  563. if (!bnxt_tc_can_offload(bp, flow)) {
  564. rc = -ENOSPC;
  565. goto free_node;
  566. }
  567. /* If a flow exists with the same cookie, delete it */
  568. old_node = rhashtable_lookup_fast(&tc_info->flow_table,
  569. &tc_flow_cmd->cookie,
  570. tc_info->flow_ht_params);
  571. if (old_node)
  572. __bnxt_tc_del_flow(bp, old_node);
  573. /* Check if the L2 part of the flow has been offloaded already.
  574. * If so, bump up it's refcnt and get it's reference handle.
  575. */
  576. mutex_lock(&tc_info->lock);
  577. rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle);
  578. if (rc)
  579. goto unlock;
  580. /* send HWRM cmd to alloc the flow */
  581. rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
  582. &new_node->flow_handle);
  583. if (rc)
  584. goto put_l2;
  585. /* add new flow to flow-table */
  586. rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node,
  587. tc_info->flow_ht_params);
  588. if (rc)
  589. goto hwrm_flow_free;
  590. mutex_unlock(&tc_info->lock);
  591. return 0;
  592. hwrm_flow_free:
  593. bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle);
  594. put_l2:
  595. bnxt_tc_put_l2_node(bp, new_node);
  596. unlock:
  597. mutex_unlock(&tc_info->lock);
  598. free_node:
  599. kfree(new_node);
  600. done:
  601. netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d",
  602. __func__, tc_flow_cmd->cookie, rc);
  603. return rc;
  604. }
  605. static int bnxt_tc_del_flow(struct bnxt *bp,
  606. struct tc_cls_flower_offload *tc_flow_cmd)
  607. {
  608. struct bnxt_tc_info *tc_info = &bp->tc_info;
  609. struct bnxt_tc_flow_node *flow_node;
  610. flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
  611. &tc_flow_cmd->cookie,
  612. tc_info->flow_ht_params);
  613. if (!flow_node) {
  614. netdev_info(bp->dev, "ERROR: no flow_node for cookie %lx",
  615. tc_flow_cmd->cookie);
  616. return -EINVAL;
  617. }
  618. return __bnxt_tc_del_flow(bp, flow_node);
  619. }
  620. static int bnxt_tc_get_flow_stats(struct bnxt *bp,
  621. struct tc_cls_flower_offload *tc_flow_cmd)
  622. {
  623. struct bnxt_tc_info *tc_info = &bp->tc_info;
  624. struct bnxt_tc_flow_node *flow_node;
  625. struct bnxt_tc_flow_stats stats;
  626. int rc;
  627. flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
  628. &tc_flow_cmd->cookie,
  629. tc_info->flow_ht_params);
  630. if (!flow_node) {
  631. netdev_info(bp->dev, "Error: no flow_node for cookie %lx",
  632. tc_flow_cmd->cookie);
  633. return -1;
  634. }
  635. rc = bnxt_hwrm_cfa_flow_stats_get(bp, flow_node->flow_handle,
  636. &flow_node->flow, &stats);
  637. if (rc)
  638. return rc;
  639. tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets, 0);
  640. return 0;
  641. }
  642. int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
  643. struct tc_cls_flower_offload *cls_flower)
  644. {
  645. int rc = 0;
  646. if (!is_classid_clsact_ingress(cls_flower->common.classid) ||
  647. cls_flower->common.chain_index)
  648. return -EOPNOTSUPP;
  649. switch (cls_flower->command) {
  650. case TC_CLSFLOWER_REPLACE:
  651. rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
  652. break;
  653. case TC_CLSFLOWER_DESTROY:
  654. rc = bnxt_tc_del_flow(bp, cls_flower);
  655. break;
  656. case TC_CLSFLOWER_STATS:
  657. rc = bnxt_tc_get_flow_stats(bp, cls_flower);
  658. break;
  659. }
  660. return rc;
  661. }
  662. static const struct rhashtable_params bnxt_tc_flow_ht_params = {
  663. .head_offset = offsetof(struct bnxt_tc_flow_node, node),
  664. .key_offset = offsetof(struct bnxt_tc_flow_node, cookie),
  665. .key_len = sizeof(((struct bnxt_tc_flow_node *)0)->cookie),
  666. .automatic_shrinking = true
  667. };
  668. static const struct rhashtable_params bnxt_tc_l2_ht_params = {
  669. .head_offset = offsetof(struct bnxt_tc_l2_node, node),
  670. .key_offset = offsetof(struct bnxt_tc_l2_node, key),
  671. .key_len = BNXT_TC_L2_KEY_LEN,
  672. .automatic_shrinking = true
  673. };
  674. /* convert counter width in bits to a mask */
  675. #define mask(width) ((u64)~0 >> (64 - (width)))
  676. int bnxt_init_tc(struct bnxt *bp)
  677. {
  678. struct bnxt_tc_info *tc_info = &bp->tc_info;
  679. int rc;
  680. if (bp->hwrm_spec_code < 0x10800) {
  681. netdev_warn(bp->dev,
  682. "Firmware does not support TC flower offload.\n");
  683. return -ENOTSUPP;
  684. }
  685. mutex_init(&tc_info->lock);
  686. /* Counter widths are programmed by FW */
  687. tc_info->bytes_mask = mask(36);
  688. tc_info->packets_mask = mask(28);
  689. tc_info->flow_ht_params = bnxt_tc_flow_ht_params;
  690. rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params);
  691. if (rc)
  692. return rc;
  693. tc_info->l2_ht_params = bnxt_tc_l2_ht_params;
  694. rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params);
  695. if (rc)
  696. goto destroy_flow_table;
  697. tc_info->enabled = true;
  698. bp->dev->hw_features |= NETIF_F_HW_TC;
  699. bp->dev->features |= NETIF_F_HW_TC;
  700. return 0;
  701. destroy_flow_table:
  702. rhashtable_destroy(&tc_info->flow_table);
  703. return rc;
  704. }
  705. void bnxt_shutdown_tc(struct bnxt *bp)
  706. {
  707. struct bnxt_tc_info *tc_info = &bp->tc_info;
  708. if (!tc_info->enabled)
  709. return;
  710. rhashtable_destroy(&tc_info->flow_table);
  711. rhashtable_destroy(&tc_info->l2_table);
  712. }
  713. #else
  714. #endif