bnxt_tc.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677
  1. /* Broadcom NetXtreme-C/E network driver.
  2. *
  3. * Copyright (c) 2017 Broadcom Limited
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. */
  9. #include <linux/netdevice.h>
  10. #include <linux/inetdevice.h>
  11. #include <linux/if_vlan.h>
  12. #include <net/flow_dissector.h>
  13. #include <net/pkt_cls.h>
  14. #include <net/tc_act/tc_gact.h>
  15. #include <net/tc_act/tc_skbedit.h>
  16. #include <net/tc_act/tc_mirred.h>
  17. #include <net/tc_act/tc_vlan.h>
  18. #include <net/tc_act/tc_tunnel_key.h>
  19. #include "bnxt_hsi.h"
  20. #include "bnxt.h"
  21. #include "bnxt_sriov.h"
  22. #include "bnxt_tc.h"
  23. #include "bnxt_vfr.h"
  24. #define BNXT_FID_INVALID 0xffff
  25. #define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT))
  26. /* Return the dst fid of the func for flow forwarding
  27. * For PFs: src_fid is the fid of the PF
  28. * For VF-reps: src_fid the fid of the VF
  29. */
  30. static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
  31. {
  32. struct bnxt *bp;
  33. /* check if dev belongs to the same switch */
  34. if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) {
  35. netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch",
  36. dev->ifindex);
  37. return BNXT_FID_INVALID;
  38. }
  39. /* Is dev a VF-rep? */
  40. if (bnxt_dev_is_vf_rep(dev))
  41. return bnxt_vf_rep_get_fid(dev);
  42. bp = netdev_priv(dev);
  43. return bp->pf.fw_fid;
  44. }
  45. static int bnxt_tc_parse_redir(struct bnxt *bp,
  46. struct bnxt_tc_actions *actions,
  47. const struct tc_action *tc_act)
  48. {
  49. struct net_device *dev = tcf_mirred_dev(tc_act);
  50. if (!dev) {
  51. netdev_info(bp->dev, "no dev in mirred action");
  52. return -EINVAL;
  53. }
  54. actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
  55. actions->dst_dev = dev;
  56. return 0;
  57. }
  58. static void bnxt_tc_parse_vlan(struct bnxt *bp,
  59. struct bnxt_tc_actions *actions,
  60. const struct tc_action *tc_act)
  61. {
  62. if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) {
  63. actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
  64. } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) {
  65. actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
  66. actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
  67. actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
  68. }
  69. }
  70. static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
  71. struct bnxt_tc_actions *actions,
  72. const struct tc_action *tc_act)
  73. {
  74. struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act);
  75. struct ip_tunnel_key *tun_key = &tun_info->key;
  76. if (ip_tunnel_info_af(tun_info) != AF_INET) {
  77. netdev_info(bp->dev, "only IPv4 tunnel-encap is supported");
  78. return -EOPNOTSUPP;
  79. }
  80. actions->tun_encap_key = *tun_key;
  81. actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP;
  82. return 0;
  83. }
  84. static int bnxt_tc_parse_actions(struct bnxt *bp,
  85. struct bnxt_tc_actions *actions,
  86. struct tcf_exts *tc_exts)
  87. {
  88. const struct tc_action *tc_act;
  89. LIST_HEAD(tc_actions);
  90. int rc;
  91. if (!tcf_exts_has_actions(tc_exts)) {
  92. netdev_info(bp->dev, "no actions");
  93. return -EINVAL;
  94. }
  95. tcf_exts_to_list(tc_exts, &tc_actions);
  96. list_for_each_entry(tc_act, &tc_actions, list) {
  97. /* Drop action */
  98. if (is_tcf_gact_shot(tc_act)) {
  99. actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
  100. return 0; /* don't bother with other actions */
  101. }
  102. /* Redirect action */
  103. if (is_tcf_mirred_egress_redirect(tc_act)) {
  104. rc = bnxt_tc_parse_redir(bp, actions, tc_act);
  105. if (rc)
  106. return rc;
  107. continue;
  108. }
  109. /* Push/pop VLAN */
  110. if (is_tcf_vlan(tc_act)) {
  111. bnxt_tc_parse_vlan(bp, actions, tc_act);
  112. continue;
  113. }
  114. /* Tunnel encap */
  115. if (is_tcf_tunnel_set(tc_act)) {
  116. rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act);
  117. if (rc)
  118. return rc;
  119. continue;
  120. }
  121. /* Tunnel decap */
  122. if (is_tcf_tunnel_release(tc_act)) {
  123. actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
  124. continue;
  125. }
  126. }
  127. if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
  128. if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
  129. /* dst_fid is PF's fid */
  130. actions->dst_fid = bp->pf.fw_fid;
  131. } else {
  132. /* find the FID from dst_dev */
  133. actions->dst_fid =
  134. bnxt_flow_get_dst_fid(bp, actions->dst_dev);
  135. if (actions->dst_fid == BNXT_FID_INVALID)
  136. return -EINVAL;
  137. }
  138. }
  139. return 0;
  140. }
  141. #define GET_KEY(flow_cmd, key_type) \
  142. skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
  143. (flow_cmd)->key)
  144. #define GET_MASK(flow_cmd, key_type) \
  145. skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
  146. (flow_cmd)->mask)
  147. static int bnxt_tc_parse_flow(struct bnxt *bp,
  148. struct tc_cls_flower_offload *tc_flow_cmd,
  149. struct bnxt_tc_flow *flow)
  150. {
  151. struct flow_dissector *dissector = tc_flow_cmd->dissector;
  152. u16 addr_type = 0;
  153. /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
  154. if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
  155. (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
  156. netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x",
  157. dissector->used_keys);
  158. return -EOPNOTSUPP;
  159. }
  160. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
  161. struct flow_dissector_key_control *key =
  162. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL);
  163. addr_type = key->addr_type;
  164. }
  165. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) {
  166. struct flow_dissector_key_basic *key =
  167. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
  168. struct flow_dissector_key_basic *mask =
  169. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
  170. flow->l2_key.ether_type = key->n_proto;
  171. flow->l2_mask.ether_type = mask->n_proto;
  172. if (key->n_proto == htons(ETH_P_IP) ||
  173. key->n_proto == htons(ETH_P_IPV6)) {
  174. flow->l4_key.ip_proto = key->ip_proto;
  175. flow->l4_mask.ip_proto = mask->ip_proto;
  176. }
  177. }
  178. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
  179. struct flow_dissector_key_eth_addrs *key =
  180. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
  181. struct flow_dissector_key_eth_addrs *mask =
  182. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
  183. flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS;
  184. ether_addr_copy(flow->l2_key.dmac, key->dst);
  185. ether_addr_copy(flow->l2_mask.dmac, mask->dst);
  186. ether_addr_copy(flow->l2_key.smac, key->src);
  187. ether_addr_copy(flow->l2_mask.smac, mask->src);
  188. }
  189. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) {
  190. struct flow_dissector_key_vlan *key =
  191. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
  192. struct flow_dissector_key_vlan *mask =
  193. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
  194. flow->l2_key.inner_vlan_tci =
  195. cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority));
  196. flow->l2_mask.inner_vlan_tci =
  197. cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority)));
  198. flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q);
  199. flow->l2_mask.inner_vlan_tpid = htons(0xffff);
  200. flow->l2_key.num_vlans = 1;
  201. }
  202. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
  203. struct flow_dissector_key_ipv4_addrs *key =
  204. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
  205. struct flow_dissector_key_ipv4_addrs *mask =
  206. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
  207. flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS;
  208. flow->l3_key.ipv4.daddr.s_addr = key->dst;
  209. flow->l3_mask.ipv4.daddr.s_addr = mask->dst;
  210. flow->l3_key.ipv4.saddr.s_addr = key->src;
  211. flow->l3_mask.ipv4.saddr.s_addr = mask->src;
  212. } else if (dissector_uses_key(dissector,
  213. FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
  214. struct flow_dissector_key_ipv6_addrs *key =
  215. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
  216. struct flow_dissector_key_ipv6_addrs *mask =
  217. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
  218. flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS;
  219. flow->l3_key.ipv6.daddr = key->dst;
  220. flow->l3_mask.ipv6.daddr = mask->dst;
  221. flow->l3_key.ipv6.saddr = key->src;
  222. flow->l3_mask.ipv6.saddr = mask->src;
  223. }
  224. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) {
  225. struct flow_dissector_key_ports *key =
  226. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
  227. struct flow_dissector_key_ports *mask =
  228. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
  229. flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS;
  230. flow->l4_key.ports.dport = key->dst;
  231. flow->l4_mask.ports.dport = mask->dst;
  232. flow->l4_key.ports.sport = key->src;
  233. flow->l4_mask.ports.sport = mask->src;
  234. }
  235. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) {
  236. struct flow_dissector_key_icmp *key =
  237. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
  238. struct flow_dissector_key_icmp *mask =
  239. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
  240. flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP;
  241. flow->l4_key.icmp.type = key->type;
  242. flow->l4_key.icmp.code = key->code;
  243. flow->l4_mask.icmp.type = mask->type;
  244. flow->l4_mask.icmp.code = mask->code;
  245. }
  246. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
  247. struct flow_dissector_key_control *key =
  248. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL);
  249. addr_type = key->addr_type;
  250. }
  251. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
  252. struct flow_dissector_key_ipv4_addrs *key =
  253. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
  254. struct flow_dissector_key_ipv4_addrs *mask =
  255. GET_MASK(tc_flow_cmd,
  256. FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
  257. flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS;
  258. flow->tun_key.u.ipv4.dst = key->dst;
  259. flow->tun_mask.u.ipv4.dst = mask->dst;
  260. flow->tun_key.u.ipv4.src = key->src;
  261. flow->tun_mask.u.ipv4.src = mask->src;
  262. } else if (dissector_uses_key(dissector,
  263. FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
  264. return -EOPNOTSUPP;
  265. }
  266. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
  267. struct flow_dissector_key_keyid *key =
  268. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
  269. struct flow_dissector_key_keyid *mask =
  270. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
  271. flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID;
  272. flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid);
  273. flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid);
  274. }
  275. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
  276. struct flow_dissector_key_ports *key =
  277. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
  278. struct flow_dissector_key_ports *mask =
  279. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
  280. flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS;
  281. flow->tun_key.tp_dst = key->dst;
  282. flow->tun_mask.tp_dst = mask->dst;
  283. flow->tun_key.tp_src = key->src;
  284. flow->tun_mask.tp_src = mask->src;
  285. }
  286. return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
  287. }
  288. static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
  289. {
  290. struct hwrm_cfa_flow_free_input req = { 0 };
  291. int rc;
  292. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
  293. req.flow_handle = flow_handle;
  294. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  295. if (rc)
  296. netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
  297. __func__, flow_handle, rc);
  298. if (rc)
  299. rc = -EIO;
  300. return rc;
  301. }
  302. static int ipv6_mask_len(struct in6_addr *mask)
  303. {
  304. int mask_len = 0, i;
  305. for (i = 0; i < 4; i++)
  306. mask_len += inet_mask_len(mask->s6_addr32[i]);
  307. return mask_len;
  308. }
  309. static bool is_wildcard(void *mask, int len)
  310. {
  311. const u8 *p = mask;
  312. int i;
  313. for (i = 0; i < len; i++) {
  314. if (p[i] != 0)
  315. return false;
  316. }
  317. return true;
  318. }
  319. static bool is_exactmatch(void *mask, int len)
  320. {
  321. const u8 *p = mask;
  322. int i;
  323. for (i = 0; i < len; i++)
  324. if (p[i] != 0xff)
  325. return false;
  326. return true;
  327. }
  328. static bool bits_set(void *key, int len)
  329. {
  330. const u8 *p = key;
  331. int i;
  332. for (i = 0; i < len; i++)
  333. if (p[i] != 0)
  334. return true;
  335. return false;
  336. }
  337. static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
  338. __le16 ref_flow_handle,
  339. __le32 tunnel_handle, __le16 *flow_handle)
  340. {
  341. struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
  342. struct bnxt_tc_actions *actions = &flow->actions;
  343. struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
  344. struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
  345. struct hwrm_cfa_flow_alloc_input req = { 0 };
  346. u16 flow_flags = 0, action_flags = 0;
  347. int rc;
  348. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1);
  349. req.src_fid = cpu_to_le16(flow->src_fid);
  350. req.ref_flow_handle = ref_flow_handle;
  351. if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
  352. actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
  353. req.tunnel_handle = tunnel_handle;
  354. flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL;
  355. action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL;
  356. }
  357. req.ethertype = flow->l2_key.ether_type;
  358. req.ip_proto = flow->l4_key.ip_proto;
  359. if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) {
  360. memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN);
  361. memcpy(req.smac, flow->l2_key.smac, ETH_ALEN);
  362. }
  363. if (flow->l2_key.num_vlans > 0) {
  364. flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE;
  365. /* FW expects the inner_vlan_tci value to be set
  366. * in outer_vlan_tci when num_vlans is 1 (which is
  367. * always the case in TC.)
  368. */
  369. req.outer_vlan_tci = flow->l2_key.inner_vlan_tci;
  370. }
  371. /* If all IP and L4 fields are wildcarded then this is an L2 flow */
  372. if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
  373. is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
  374. flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
  375. } else {
  376. flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ?
  377. CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 :
  378. CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6;
  379. if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) {
  380. req.ip_dst[0] = l3_key->ipv4.daddr.s_addr;
  381. req.ip_dst_mask_len =
  382. inet_mask_len(l3_mask->ipv4.daddr.s_addr);
  383. req.ip_src[0] = l3_key->ipv4.saddr.s_addr;
  384. req.ip_src_mask_len =
  385. inet_mask_len(l3_mask->ipv4.saddr.s_addr);
  386. } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) {
  387. memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32,
  388. sizeof(req.ip_dst));
  389. req.ip_dst_mask_len =
  390. ipv6_mask_len(&l3_mask->ipv6.daddr);
  391. memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32,
  392. sizeof(req.ip_src));
  393. req.ip_src_mask_len =
  394. ipv6_mask_len(&l3_mask->ipv6.saddr);
  395. }
  396. }
  397. if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) {
  398. req.l4_src_port = flow->l4_key.ports.sport;
  399. req.l4_src_port_mask = flow->l4_mask.ports.sport;
  400. req.l4_dst_port = flow->l4_key.ports.dport;
  401. req.l4_dst_port_mask = flow->l4_mask.ports.dport;
  402. } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) {
  403. /* l4 ports serve as type/code when ip_proto is ICMP */
  404. req.l4_src_port = htons(flow->l4_key.icmp.type);
  405. req.l4_src_port_mask = htons(flow->l4_mask.icmp.type);
  406. req.l4_dst_port = htons(flow->l4_key.icmp.code);
  407. req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
  408. }
  409. req.flags = cpu_to_le16(flow_flags);
  410. if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) {
  411. action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP;
  412. } else {
  413. if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
  414. action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD;
  415. req.dst_fid = cpu_to_le16(actions->dst_fid);
  416. }
  417. if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) {
  418. action_flags |=
  419. CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
  420. req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
  421. req.l2_rewrite_vlan_tci = actions->push_vlan_tci;
  422. memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
  423. memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
  424. }
  425. if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) {
  426. action_flags |=
  427. CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
  428. /* Rewrite config with tpid = 0 implies vlan pop */
  429. req.l2_rewrite_vlan_tpid = 0;
  430. memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
  431. memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
  432. }
  433. }
  434. req.action_flags = cpu_to_le16(action_flags);
  435. mutex_lock(&bp->hwrm_cmd_lock);
  436. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  437. if (!rc)
  438. *flow_handle = resp->flow_handle;
  439. mutex_unlock(&bp->hwrm_cmd_lock);
  440. if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
  441. rc = -ENOSPC;
  442. else if (rc)
  443. rc = -EIO;
  444. return rc;
  445. }
  446. static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
  447. struct bnxt_tc_flow *flow,
  448. struct bnxt_tc_l2_key *l2_info,
  449. __le32 ref_decap_handle,
  450. __le32 *decap_filter_handle)
  451. {
  452. struct hwrm_cfa_decap_filter_alloc_output *resp =
  453. bp->hwrm_cmd_resp_addr;
  454. struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
  455. struct ip_tunnel_key *tun_key = &flow->tun_key;
  456. u32 enables = 0;
  457. int rc;
  458. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1);
  459. req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL);
  460. enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE |
  461. CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL;
  462. req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
  463. req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP;
  464. if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) {
  465. enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID;
  466. /* tunnel_id is wrongly defined in hsi defn. as __le32 */
  467. req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
  468. }
  469. if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
  470. enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
  471. ether_addr_copy(req.dst_macaddr, l2_info->dmac);
  472. }
  473. if (l2_info->num_vlans) {
  474. enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
  475. req.t_ivlan_vid = l2_info->inner_vlan_tci;
  476. }
  477. enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE;
  478. req.ethertype = htons(ETH_P_IP);
  479. if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) {
  480. enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |
  481. CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |
  482. CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE;
  483. req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
  484. req.dst_ipaddr[0] = tun_key->u.ipv4.dst;
  485. req.src_ipaddr[0] = tun_key->u.ipv4.src;
  486. }
  487. if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) {
  488. enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT;
  489. req.dst_port = tun_key->tp_dst;
  490. }
  491. /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc
  492. * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16.
  493. */
  494. req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
  495. req.enables = cpu_to_le32(enables);
  496. mutex_lock(&bp->hwrm_cmd_lock);
  497. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  498. if (!rc)
  499. *decap_filter_handle = resp->decap_filter_id;
  500. else
  501. netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
  502. mutex_unlock(&bp->hwrm_cmd_lock);
  503. if (rc)
  504. rc = -EIO;
  505. return rc;
  506. }
  507. static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
  508. __le32 decap_filter_handle)
  509. {
  510. struct hwrm_cfa_decap_filter_free_input req = { 0 };
  511. int rc;
  512. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1);
  513. req.decap_filter_id = decap_filter_handle;
  514. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  515. if (rc)
  516. netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
  517. if (rc)
  518. rc = -EIO;
  519. return rc;
  520. }
  521. static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
  522. struct ip_tunnel_key *encap_key,
  523. struct bnxt_tc_l2_key *l2_info,
  524. __le32 *encap_record_handle)
  525. {
  526. struct hwrm_cfa_encap_record_alloc_output *resp =
  527. bp->hwrm_cmd_resp_addr;
  528. struct hwrm_cfa_encap_record_alloc_input req = { 0 };
  529. struct hwrm_cfa_encap_data_vxlan *encap =
  530. (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
  531. struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
  532. (struct hwrm_vxlan_ipv4_hdr *)encap->l3;
  533. int rc;
  534. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1);
  535. req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN;
  536. ether_addr_copy(encap->dst_mac_addr, l2_info->dmac);
  537. ether_addr_copy(encap->src_mac_addr, l2_info->smac);
  538. if (l2_info->num_vlans) {
  539. encap->num_vlan_tags = l2_info->num_vlans;
  540. encap->ovlan_tci = l2_info->inner_vlan_tci;
  541. encap->ovlan_tpid = l2_info->inner_vlan_tpid;
  542. }
  543. encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT;
  544. encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT;
  545. encap_ipv4->ttl = encap_key->ttl;
  546. encap_ipv4->dest_ip_addr = encap_key->u.ipv4.dst;
  547. encap_ipv4->src_ip_addr = encap_key->u.ipv4.src;
  548. encap_ipv4->protocol = IPPROTO_UDP;
  549. encap->dst_port = encap_key->tp_dst;
  550. encap->vni = tunnel_id_to_key32(encap_key->tun_id);
  551. mutex_lock(&bp->hwrm_cmd_lock);
  552. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  553. if (!rc)
  554. *encap_record_handle = resp->encap_record_id;
  555. else
  556. netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
  557. mutex_unlock(&bp->hwrm_cmd_lock);
  558. if (rc)
  559. rc = -EIO;
  560. return rc;
  561. }
  562. static int hwrm_cfa_encap_record_free(struct bnxt *bp,
  563. __le32 encap_record_handle)
  564. {
  565. struct hwrm_cfa_encap_record_free_input req = { 0 };
  566. int rc;
  567. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1);
  568. req.encap_record_id = encap_record_handle;
  569. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  570. if (rc)
  571. netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
  572. if (rc)
  573. rc = -EIO;
  574. return rc;
  575. }
  576. static int bnxt_tc_put_l2_node(struct bnxt *bp,
  577. struct bnxt_tc_flow_node *flow_node)
  578. {
  579. struct bnxt_tc_l2_node *l2_node = flow_node->l2_node;
  580. struct bnxt_tc_info *tc_info = bp->tc_info;
  581. int rc;
  582. /* remove flow_node from the L2 shared flow list */
  583. list_del(&flow_node->l2_list_node);
  584. if (--l2_node->refcount == 0) {
  585. rc = rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node,
  586. tc_info->l2_ht_params);
  587. if (rc)
  588. netdev_err(bp->dev,
  589. "Error: %s: rhashtable_remove_fast: %d",
  590. __func__, rc);
  591. kfree_rcu(l2_node, rcu);
  592. }
  593. return 0;
  594. }
  595. static struct bnxt_tc_l2_node *
  596. bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
  597. struct rhashtable_params ht_params,
  598. struct bnxt_tc_l2_key *l2_key)
  599. {
  600. struct bnxt_tc_l2_node *l2_node;
  601. int rc;
  602. l2_node = rhashtable_lookup_fast(l2_table, l2_key, ht_params);
  603. if (!l2_node) {
  604. l2_node = kzalloc(sizeof(*l2_node), GFP_KERNEL);
  605. if (!l2_node) {
  606. rc = -ENOMEM;
  607. return NULL;
  608. }
  609. l2_node->key = *l2_key;
  610. rc = rhashtable_insert_fast(l2_table, &l2_node->node,
  611. ht_params);
  612. if (rc) {
  613. kfree_rcu(l2_node, rcu);
  614. netdev_err(bp->dev,
  615. "Error: %s: rhashtable_insert_fast: %d",
  616. __func__, rc);
  617. return NULL;
  618. }
  619. INIT_LIST_HEAD(&l2_node->common_l2_flows);
  620. }
  621. return l2_node;
  622. }
  623. /* Get the ref_flow_handle for a flow by checking if there are any other
  624. * flows that share the same L2 key as this flow.
  625. */
  626. static int
  627. bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
  628. struct bnxt_tc_flow_node *flow_node,
  629. __le16 *ref_flow_handle)
  630. {
  631. struct bnxt_tc_info *tc_info = bp->tc_info;
  632. struct bnxt_tc_flow_node *ref_flow_node;
  633. struct bnxt_tc_l2_node *l2_node;
  634. l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table,
  635. tc_info->l2_ht_params,
  636. &flow->l2_key);
  637. if (!l2_node)
  638. return -1;
  639. /* If any other flow is using this l2_node, use it's flow_handle
  640. * as the ref_flow_handle
  641. */
  642. if (l2_node->refcount > 0) {
  643. ref_flow_node = list_first_entry(&l2_node->common_l2_flows,
  644. struct bnxt_tc_flow_node,
  645. l2_list_node);
  646. *ref_flow_handle = ref_flow_node->flow_handle;
  647. } else {
  648. *ref_flow_handle = cpu_to_le16(0xffff);
  649. }
  650. /* Insert the l2_node into the flow_node so that subsequent flows
  651. * with a matching l2 key can use the flow_handle of this flow
  652. * as their ref_flow_handle
  653. */
  654. flow_node->l2_node = l2_node;
  655. list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows);
  656. l2_node->refcount++;
  657. return 0;
  658. }
  659. /* After the flow parsing is done, this routine is used for checking
  660. * if there are any aspects of the flow that prevent it from being
  661. * offloaded.
  662. */
  663. static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
  664. {
  665. /* If L4 ports are specified then ip_proto must be TCP or UDP */
  666. if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) &&
  667. (flow->l4_key.ip_proto != IPPROTO_TCP &&
  668. flow->l4_key.ip_proto != IPPROTO_UDP)) {
  669. netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports",
  670. flow->l4_key.ip_proto);
  671. return false;
  672. }
  673. /* Currently source/dest MAC cannot be partial wildcard */
  674. if (bits_set(&flow->l2_key.smac, sizeof(flow->l2_key.smac)) &&
  675. !is_exactmatch(flow->l2_mask.smac, sizeof(flow->l2_mask.smac))) {
  676. netdev_info(bp->dev, "Wildcard match unsupported for Source MAC\n");
  677. return false;
  678. }
  679. if (bits_set(&flow->l2_key.dmac, sizeof(flow->l2_key.dmac)) &&
  680. !is_exactmatch(&flow->l2_mask.dmac, sizeof(flow->l2_mask.dmac))) {
  681. netdev_info(bp->dev, "Wildcard match unsupported for Dest MAC\n");
  682. return false;
  683. }
  684. /* Currently VLAN fields cannot be partial wildcard */
  685. if (bits_set(&flow->l2_key.inner_vlan_tci,
  686. sizeof(flow->l2_key.inner_vlan_tci)) &&
  687. !is_exactmatch(&flow->l2_mask.inner_vlan_tci,
  688. sizeof(flow->l2_mask.inner_vlan_tci))) {
  689. netdev_info(bp->dev, "Wildcard match unsupported for VLAN TCI\n");
  690. return false;
  691. }
  692. if (bits_set(&flow->l2_key.inner_vlan_tpid,
  693. sizeof(flow->l2_key.inner_vlan_tpid)) &&
  694. !is_exactmatch(&flow->l2_mask.inner_vlan_tpid,
  695. sizeof(flow->l2_mask.inner_vlan_tpid))) {
  696. netdev_info(bp->dev, "Wildcard match unsupported for VLAN TPID\n");
  697. return false;
  698. }
  699. /* Currently Ethertype must be set */
  700. if (!is_exactmatch(&flow->l2_mask.ether_type,
  701. sizeof(flow->l2_mask.ether_type))) {
  702. netdev_info(bp->dev, "Wildcard match unsupported for Ethertype\n");
  703. return false;
  704. }
  705. return true;
  706. }
  707. /* Returns the final refcount of the node on success
  708. * or a -ve error code on failure
  709. */
  710. static int bnxt_tc_put_tunnel_node(struct bnxt *bp,
  711. struct rhashtable *tunnel_table,
  712. struct rhashtable_params *ht_params,
  713. struct bnxt_tc_tunnel_node *tunnel_node)
  714. {
  715. int rc;
  716. if (--tunnel_node->refcount == 0) {
  717. rc = rhashtable_remove_fast(tunnel_table, &tunnel_node->node,
  718. *ht_params);
  719. if (rc) {
  720. netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
  721. rc = -1;
  722. }
  723. kfree_rcu(tunnel_node, rcu);
  724. return rc;
  725. } else {
  726. return tunnel_node->refcount;
  727. }
  728. }
  729. /* Get (or add) either encap or decap tunnel node from/to the supplied
  730. * hash table.
  731. */
  732. static struct bnxt_tc_tunnel_node *
  733. bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table,
  734. struct rhashtable_params *ht_params,
  735. struct ip_tunnel_key *tun_key)
  736. {
  737. struct bnxt_tc_tunnel_node *tunnel_node;
  738. int rc;
  739. tunnel_node = rhashtable_lookup_fast(tunnel_table, tun_key, *ht_params);
  740. if (!tunnel_node) {
  741. tunnel_node = kzalloc(sizeof(*tunnel_node), GFP_KERNEL);
  742. if (!tunnel_node) {
  743. rc = -ENOMEM;
  744. goto err;
  745. }
  746. tunnel_node->key = *tun_key;
  747. tunnel_node->tunnel_handle = INVALID_TUNNEL_HANDLE;
  748. rc = rhashtable_insert_fast(tunnel_table, &tunnel_node->node,
  749. *ht_params);
  750. if (rc) {
  751. kfree_rcu(tunnel_node, rcu);
  752. goto err;
  753. }
  754. }
  755. tunnel_node->refcount++;
  756. return tunnel_node;
  757. err:
  758. netdev_info(bp->dev, "error rc=%d", rc);
  759. return NULL;
  760. }
  761. static int bnxt_tc_get_ref_decap_handle(struct bnxt *bp,
  762. struct bnxt_tc_flow *flow,
  763. struct bnxt_tc_l2_key *l2_key,
  764. struct bnxt_tc_flow_node *flow_node,
  765. __le32 *ref_decap_handle)
  766. {
  767. struct bnxt_tc_info *tc_info = bp->tc_info;
  768. struct bnxt_tc_flow_node *ref_flow_node;
  769. struct bnxt_tc_l2_node *decap_l2_node;
  770. decap_l2_node = bnxt_tc_get_l2_node(bp, &tc_info->decap_l2_table,
  771. tc_info->decap_l2_ht_params,
  772. l2_key);
  773. if (!decap_l2_node)
  774. return -1;
  775. /* If any other flow is using this decap_l2_node, use it's decap_handle
  776. * as the ref_decap_handle
  777. */
  778. if (decap_l2_node->refcount > 0) {
  779. ref_flow_node =
  780. list_first_entry(&decap_l2_node->common_l2_flows,
  781. struct bnxt_tc_flow_node,
  782. decap_l2_list_node);
  783. *ref_decap_handle = ref_flow_node->decap_node->tunnel_handle;
  784. } else {
  785. *ref_decap_handle = INVALID_TUNNEL_HANDLE;
  786. }
  787. /* Insert the l2_node into the flow_node so that subsequent flows
  788. * with a matching decap l2 key can use the decap_filter_handle of
  789. * this flow as their ref_decap_handle
  790. */
  791. flow_node->decap_l2_node = decap_l2_node;
  792. list_add(&flow_node->decap_l2_list_node,
  793. &decap_l2_node->common_l2_flows);
  794. decap_l2_node->refcount++;
  795. return 0;
  796. }
  797. static void bnxt_tc_put_decap_l2_node(struct bnxt *bp,
  798. struct bnxt_tc_flow_node *flow_node)
  799. {
  800. struct bnxt_tc_l2_node *decap_l2_node = flow_node->decap_l2_node;
  801. struct bnxt_tc_info *tc_info = bp->tc_info;
  802. int rc;
  803. /* remove flow_node from the decap L2 sharing flow list */
  804. list_del(&flow_node->decap_l2_list_node);
  805. if (--decap_l2_node->refcount == 0) {
  806. rc = rhashtable_remove_fast(&tc_info->decap_l2_table,
  807. &decap_l2_node->node,
  808. tc_info->decap_l2_ht_params);
  809. if (rc)
  810. netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
  811. kfree_rcu(decap_l2_node, rcu);
  812. }
  813. }
  814. static void bnxt_tc_put_decap_handle(struct bnxt *bp,
  815. struct bnxt_tc_flow_node *flow_node)
  816. {
  817. __le32 decap_handle = flow_node->decap_node->tunnel_handle;
  818. struct bnxt_tc_info *tc_info = bp->tc_info;
  819. int rc;
  820. if (flow_node->decap_l2_node)
  821. bnxt_tc_put_decap_l2_node(bp, flow_node);
  822. rc = bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
  823. &tc_info->decap_ht_params,
  824. flow_node->decap_node);
  825. if (!rc && decap_handle != INVALID_TUNNEL_HANDLE)
  826. hwrm_cfa_decap_filter_free(bp, decap_handle);
  827. }
  828. static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
  829. struct ip_tunnel_key *tun_key,
  830. struct bnxt_tc_l2_key *l2_info)
  831. {
  832. #ifdef CONFIG_INET
  833. struct net_device *real_dst_dev = bp->dev;
  834. struct flowi4 flow = { {0} };
  835. struct net_device *dst_dev;
  836. struct neighbour *nbr;
  837. struct rtable *rt;
  838. int rc;
  839. flow.flowi4_proto = IPPROTO_UDP;
  840. flow.fl4_dport = tun_key->tp_dst;
  841. flow.daddr = tun_key->u.ipv4.dst;
  842. rt = ip_route_output_key(dev_net(real_dst_dev), &flow);
  843. if (IS_ERR(rt)) {
  844. netdev_info(bp->dev, "no route to %pI4b", &flow.daddr);
  845. return -EOPNOTSUPP;
  846. }
  847. /* The route must either point to the real_dst_dev or a dst_dev that
  848. * uses the real_dst_dev.
  849. */
  850. dst_dev = rt->dst.dev;
  851. if (is_vlan_dev(dst_dev)) {
  852. #if IS_ENABLED(CONFIG_VLAN_8021Q)
  853. struct vlan_dev_priv *vlan = vlan_dev_priv(dst_dev);
  854. if (vlan->real_dev != real_dst_dev) {
  855. netdev_info(bp->dev,
  856. "dst_dev(%s) doesn't use PF-if(%s)",
  857. netdev_name(dst_dev),
  858. netdev_name(real_dst_dev));
  859. rc = -EOPNOTSUPP;
  860. goto put_rt;
  861. }
  862. l2_info->inner_vlan_tci = htons(vlan->vlan_id);
  863. l2_info->inner_vlan_tpid = vlan->vlan_proto;
  864. l2_info->num_vlans = 1;
  865. #endif
  866. } else if (dst_dev != real_dst_dev) {
  867. netdev_info(bp->dev,
  868. "dst_dev(%s) for %pI4b is not PF-if(%s)",
  869. netdev_name(dst_dev), &flow.daddr,
  870. netdev_name(real_dst_dev));
  871. rc = -EOPNOTSUPP;
  872. goto put_rt;
  873. }
  874. nbr = dst_neigh_lookup(&rt->dst, &flow.daddr);
  875. if (!nbr) {
  876. netdev_info(bp->dev, "can't lookup neighbor for %pI4b",
  877. &flow.daddr);
  878. rc = -EOPNOTSUPP;
  879. goto put_rt;
  880. }
  881. tun_key->u.ipv4.src = flow.saddr;
  882. tun_key->ttl = ip4_dst_hoplimit(&rt->dst);
  883. neigh_ha_snapshot(l2_info->dmac, nbr, dst_dev);
  884. ether_addr_copy(l2_info->smac, dst_dev->dev_addr);
  885. neigh_release(nbr);
  886. ip_rt_put(rt);
  887. return 0;
  888. put_rt:
  889. ip_rt_put(rt);
  890. return rc;
  891. #else
  892. return -EOPNOTSUPP;
  893. #endif
  894. }
  895. static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
  896. struct bnxt_tc_flow_node *flow_node,
  897. __le32 *decap_filter_handle)
  898. {
  899. struct ip_tunnel_key *decap_key = &flow->tun_key;
  900. struct bnxt_tc_info *tc_info = bp->tc_info;
  901. struct bnxt_tc_l2_key l2_info = { {0} };
  902. struct bnxt_tc_tunnel_node *decap_node;
  903. struct ip_tunnel_key tun_key = { 0 };
  904. struct bnxt_tc_l2_key *decap_l2_info;
  905. __le32 ref_decap_handle;
  906. int rc;
  907. /* Check if there's another flow using the same tunnel decap.
  908. * If not, add this tunnel to the table and resolve the other
  909. * tunnel header fileds. Ignore src_port in the tunnel_key,
  910. * since it is not required for decap filters.
  911. */
  912. decap_key->tp_src = 0;
  913. decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table,
  914. &tc_info->decap_ht_params,
  915. decap_key);
  916. if (!decap_node)
  917. return -ENOMEM;
  918. flow_node->decap_node = decap_node;
  919. if (decap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
  920. goto done;
  921. /* Resolve the L2 fields for tunnel decap
  922. * Resolve the route for remote vtep (saddr) of the decap key
  923. * Find it's next-hop mac addrs
  924. */
  925. tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
  926. tun_key.tp_dst = flow->tun_key.tp_dst;
  927. rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info);
  928. if (rc)
  929. goto put_decap;
  930. decap_l2_info = &decap_node->l2_info;
  931. /* decap smac is wildcarded */
  932. ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
  933. if (l2_info.num_vlans) {
  934. decap_l2_info->num_vlans = l2_info.num_vlans;
  935. decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
  936. decap_l2_info->inner_vlan_tci = l2_info.inner_vlan_tci;
  937. }
  938. flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS;
  939. /* For getting a decap_filter_handle we first need to check if
  940. * there are any other decap flows that share the same tunnel L2
  941. * key and if so, pass that flow's decap_filter_handle as the
  942. * ref_decap_handle for this flow.
  943. */
  944. rc = bnxt_tc_get_ref_decap_handle(bp, flow, decap_l2_info, flow_node,
  945. &ref_decap_handle);
  946. if (rc)
  947. goto put_decap;
  948. /* Issue the hwrm cmd to allocate a decap filter handle */
  949. rc = hwrm_cfa_decap_filter_alloc(bp, flow, decap_l2_info,
  950. ref_decap_handle,
  951. &decap_node->tunnel_handle);
  952. if (rc)
  953. goto put_decap_l2;
  954. done:
  955. *decap_filter_handle = decap_node->tunnel_handle;
  956. return 0;
  957. put_decap_l2:
  958. bnxt_tc_put_decap_l2_node(bp, flow_node);
  959. put_decap:
  960. bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
  961. &tc_info->decap_ht_params,
  962. flow_node->decap_node);
  963. return rc;
  964. }
  965. static void bnxt_tc_put_encap_handle(struct bnxt *bp,
  966. struct bnxt_tc_tunnel_node *encap_node)
  967. {
  968. __le32 encap_handle = encap_node->tunnel_handle;
  969. struct bnxt_tc_info *tc_info = bp->tc_info;
  970. int rc;
  971. rc = bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
  972. &tc_info->encap_ht_params, encap_node);
  973. if (!rc && encap_handle != INVALID_TUNNEL_HANDLE)
  974. hwrm_cfa_encap_record_free(bp, encap_handle);
  975. }
  976. /* Lookup the tunnel encap table and check if there's an encap_handle
  977. * alloc'd already.
  978. * If not, query L2 info via a route lookup and issue an encap_record_alloc
  979. * cmd to FW.
  980. */
  981. static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
  982. struct bnxt_tc_flow_node *flow_node,
  983. __le32 *encap_handle)
  984. {
  985. struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key;
  986. struct bnxt_tc_info *tc_info = bp->tc_info;
  987. struct bnxt_tc_tunnel_node *encap_node;
  988. int rc;
  989. /* Check if there's another flow using the same tunnel encap.
  990. * If not, add this tunnel to the table and resolve the other
  991. * tunnel header fileds
  992. */
  993. encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table,
  994. &tc_info->encap_ht_params,
  995. encap_key);
  996. if (!encap_node)
  997. return -ENOMEM;
  998. flow_node->encap_node = encap_node;
  999. if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
  1000. goto done;
  1001. rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info);
  1002. if (rc)
  1003. goto put_encap;
  1004. /* Allocate a new tunnel encap record */
  1005. rc = hwrm_cfa_encap_record_alloc(bp, encap_key, &encap_node->l2_info,
  1006. &encap_node->tunnel_handle);
  1007. if (rc)
  1008. goto put_encap;
  1009. done:
  1010. *encap_handle = encap_node->tunnel_handle;
  1011. return 0;
  1012. put_encap:
  1013. bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
  1014. &tc_info->encap_ht_params, encap_node);
  1015. return rc;
  1016. }
  1017. static void bnxt_tc_put_tunnel_handle(struct bnxt *bp,
  1018. struct bnxt_tc_flow *flow,
  1019. struct bnxt_tc_flow_node *flow_node)
  1020. {
  1021. if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
  1022. bnxt_tc_put_decap_handle(bp, flow_node);
  1023. else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
  1024. bnxt_tc_put_encap_handle(bp, flow_node->encap_node);
  1025. }
  1026. static int bnxt_tc_get_tunnel_handle(struct bnxt *bp,
  1027. struct bnxt_tc_flow *flow,
  1028. struct bnxt_tc_flow_node *flow_node,
  1029. __le32 *tunnel_handle)
  1030. {
  1031. if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
  1032. return bnxt_tc_get_decap_handle(bp, flow, flow_node,
  1033. tunnel_handle);
  1034. else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
  1035. return bnxt_tc_get_encap_handle(bp, flow, flow_node,
  1036. tunnel_handle);
  1037. else
  1038. return 0;
  1039. }
  1040. static int __bnxt_tc_del_flow(struct bnxt *bp,
  1041. struct bnxt_tc_flow_node *flow_node)
  1042. {
  1043. struct bnxt_tc_info *tc_info = bp->tc_info;
  1044. int rc;
  1045. /* send HWRM cmd to free the flow-id */
  1046. bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle);
  1047. mutex_lock(&tc_info->lock);
  1048. /* release references to any tunnel encap/decap nodes */
  1049. bnxt_tc_put_tunnel_handle(bp, &flow_node->flow, flow_node);
  1050. /* release reference to l2 node */
  1051. bnxt_tc_put_l2_node(bp, flow_node);
  1052. mutex_unlock(&tc_info->lock);
  1053. rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node,
  1054. tc_info->flow_ht_params);
  1055. if (rc)
  1056. netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d",
  1057. __func__, rc);
  1058. kfree_rcu(flow_node, rcu);
  1059. return 0;
  1060. }
  1061. static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
  1062. u16 src_fid)
  1063. {
  1064. if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
  1065. flow->src_fid = bp->pf.fw_fid;
  1066. else
  1067. flow->src_fid = src_fid;
  1068. }
  1069. /* Add a new flow or replace an existing flow.
  1070. * Notes on locking:
  1071. * There are essentially two critical sections here.
  1072. * 1. while adding a new flow
  1073. * a) lookup l2-key
  1074. * b) issue HWRM cmd and get flow_handle
  1075. * c) link l2-key with flow
  1076. * 2. while deleting a flow
  1077. * a) unlinking l2-key from flow
  1078. * A lock is needed to protect these two critical sections.
  1079. *
  1080. * The hash-tables are already protected by the rhashtable API.
  1081. */
  1082. static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
  1083. struct tc_cls_flower_offload *tc_flow_cmd)
  1084. {
  1085. struct bnxt_tc_flow_node *new_node, *old_node;
  1086. struct bnxt_tc_info *tc_info = bp->tc_info;
  1087. struct bnxt_tc_flow *flow;
  1088. __le32 tunnel_handle = 0;
  1089. __le16 ref_flow_handle;
  1090. int rc;
  1091. /* allocate memory for the new flow and it's node */
  1092. new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
  1093. if (!new_node) {
  1094. rc = -ENOMEM;
  1095. goto done;
  1096. }
  1097. new_node->cookie = tc_flow_cmd->cookie;
  1098. flow = &new_node->flow;
  1099. rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
  1100. if (rc)
  1101. goto free_node;
  1102. bnxt_tc_set_src_fid(bp, flow, src_fid);
  1103. if (!bnxt_tc_can_offload(bp, flow)) {
  1104. rc = -ENOSPC;
  1105. goto free_node;
  1106. }
  1107. /* If a flow exists with the same cookie, delete it */
  1108. old_node = rhashtable_lookup_fast(&tc_info->flow_table,
  1109. &tc_flow_cmd->cookie,
  1110. tc_info->flow_ht_params);
  1111. if (old_node)
  1112. __bnxt_tc_del_flow(bp, old_node);
  1113. /* Check if the L2 part of the flow has been offloaded already.
  1114. * If so, bump up it's refcnt and get it's reference handle.
  1115. */
  1116. mutex_lock(&tc_info->lock);
  1117. rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle);
  1118. if (rc)
  1119. goto unlock;
  1120. /* If the flow involves tunnel encap/decap, get tunnel_handle */
  1121. rc = bnxt_tc_get_tunnel_handle(bp, flow, new_node, &tunnel_handle);
  1122. if (rc)
  1123. goto put_l2;
  1124. /* send HWRM cmd to alloc the flow */
  1125. rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
  1126. tunnel_handle, &new_node->flow_handle);
  1127. if (rc)
  1128. goto put_tunnel;
  1129. flow->lastused = jiffies;
  1130. spin_lock_init(&flow->stats_lock);
  1131. /* add new flow to flow-table */
  1132. rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node,
  1133. tc_info->flow_ht_params);
  1134. if (rc)
  1135. goto hwrm_flow_free;
  1136. mutex_unlock(&tc_info->lock);
  1137. return 0;
  1138. hwrm_flow_free:
  1139. bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle);
  1140. put_tunnel:
  1141. bnxt_tc_put_tunnel_handle(bp, flow, new_node);
  1142. put_l2:
  1143. bnxt_tc_put_l2_node(bp, new_node);
  1144. unlock:
  1145. mutex_unlock(&tc_info->lock);
  1146. free_node:
  1147. kfree_rcu(new_node, rcu);
  1148. done:
  1149. netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d",
  1150. __func__, tc_flow_cmd->cookie, rc);
  1151. return rc;
  1152. }
  1153. static int bnxt_tc_del_flow(struct bnxt *bp,
  1154. struct tc_cls_flower_offload *tc_flow_cmd)
  1155. {
  1156. struct bnxt_tc_info *tc_info = bp->tc_info;
  1157. struct bnxt_tc_flow_node *flow_node;
  1158. flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
  1159. &tc_flow_cmd->cookie,
  1160. tc_info->flow_ht_params);
  1161. if (!flow_node)
  1162. return -EINVAL;
  1163. return __bnxt_tc_del_flow(bp, flow_node);
  1164. }
  1165. static int bnxt_tc_get_flow_stats(struct bnxt *bp,
  1166. struct tc_cls_flower_offload *tc_flow_cmd)
  1167. {
  1168. struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats;
  1169. struct bnxt_tc_info *tc_info = bp->tc_info;
  1170. struct bnxt_tc_flow_node *flow_node;
  1171. struct bnxt_tc_flow *flow;
  1172. unsigned long lastused;
  1173. flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
  1174. &tc_flow_cmd->cookie,
  1175. tc_info->flow_ht_params);
  1176. if (!flow_node)
  1177. return -1;
  1178. flow = &flow_node->flow;
  1179. curr_stats = &flow->stats;
  1180. prev_stats = &flow->prev_stats;
  1181. spin_lock(&flow->stats_lock);
  1182. stats.packets = curr_stats->packets - prev_stats->packets;
  1183. stats.bytes = curr_stats->bytes - prev_stats->bytes;
  1184. *prev_stats = *curr_stats;
  1185. lastused = flow->lastused;
  1186. spin_unlock(&flow->stats_lock);
  1187. tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets,
  1188. lastused);
  1189. return 0;
  1190. }
  1191. static int
  1192. bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
  1193. struct bnxt_tc_stats_batch stats_batch[])
  1194. {
  1195. struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
  1196. struct hwrm_cfa_flow_stats_input req = { 0 };
  1197. __le16 *req_flow_handles = &req.flow_handle_0;
  1198. int rc, i;
  1199. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
  1200. req.num_flows = cpu_to_le16(num_flows);
  1201. for (i = 0; i < num_flows; i++) {
  1202. struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
  1203. req_flow_handles[i] = flow_node->flow_handle;
  1204. }
  1205. mutex_lock(&bp->hwrm_cmd_lock);
  1206. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  1207. if (!rc) {
  1208. __le64 *resp_packets = &resp->packet_0;
  1209. __le64 *resp_bytes = &resp->byte_0;
  1210. for (i = 0; i < num_flows; i++) {
  1211. stats_batch[i].hw_stats.packets =
  1212. le64_to_cpu(resp_packets[i]);
  1213. stats_batch[i].hw_stats.bytes =
  1214. le64_to_cpu(resp_bytes[i]);
  1215. }
  1216. } else {
  1217. netdev_info(bp->dev, "error rc=%d", rc);
  1218. }
  1219. mutex_unlock(&bp->hwrm_cmd_lock);
  1220. if (rc)
  1221. rc = -EIO;
  1222. return rc;
  1223. }
  1224. /* Add val to accum while handling a possible wraparound
  1225. * of val. Eventhough val is of type u64, its actual width
  1226. * is denoted by mask and will wrap-around beyond that width.
  1227. */
  1228. static void accumulate_val(u64 *accum, u64 val, u64 mask)
  1229. {
  1230. #define low_bits(x, mask) ((x) & (mask))
  1231. #define high_bits(x, mask) ((x) & ~(mask))
  1232. bool wrapped = val < low_bits(*accum, mask);
  1233. *accum = high_bits(*accum, mask) + val;
  1234. if (wrapped)
  1235. *accum += (mask + 1);
  1236. }
  1237. /* The HW counters' width is much less than 64bits.
  1238. * Handle possible wrap-around while updating the stat counters
  1239. */
  1240. static void bnxt_flow_stats_accum(struct bnxt_tc_info *tc_info,
  1241. struct bnxt_tc_flow_stats *acc_stats,
  1242. struct bnxt_tc_flow_stats *hw_stats)
  1243. {
  1244. accumulate_val(&acc_stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
  1245. accumulate_val(&acc_stats->packets, hw_stats->packets,
  1246. tc_info->packets_mask);
  1247. }
  1248. static int
  1249. bnxt_tc_flow_stats_batch_update(struct bnxt *bp, int num_flows,
  1250. struct bnxt_tc_stats_batch stats_batch[])
  1251. {
  1252. struct bnxt_tc_info *tc_info = bp->tc_info;
  1253. int rc, i;
  1254. rc = bnxt_hwrm_cfa_flow_stats_get(bp, num_flows, stats_batch);
  1255. if (rc)
  1256. return rc;
  1257. for (i = 0; i < num_flows; i++) {
  1258. struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
  1259. struct bnxt_tc_flow *flow = &flow_node->flow;
  1260. spin_lock(&flow->stats_lock);
  1261. bnxt_flow_stats_accum(tc_info, &flow->stats,
  1262. &stats_batch[i].hw_stats);
  1263. if (flow->stats.packets != flow->prev_stats.packets)
  1264. flow->lastused = jiffies;
  1265. spin_unlock(&flow->stats_lock);
  1266. }
  1267. return 0;
  1268. }
  1269. static int
  1270. bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
  1271. struct bnxt_tc_stats_batch stats_batch[],
  1272. int *num_flows)
  1273. {
  1274. struct bnxt_tc_info *tc_info = bp->tc_info;
  1275. struct rhashtable_iter *iter = &tc_info->iter;
  1276. void *flow_node;
  1277. int rc, i;
  1278. rhashtable_walk_start(iter);
  1279. rc = 0;
  1280. for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
  1281. flow_node = rhashtable_walk_next(iter);
  1282. if (IS_ERR(flow_node)) {
  1283. i = 0;
  1284. if (PTR_ERR(flow_node) == -EAGAIN) {
  1285. continue;
  1286. } else {
  1287. rc = PTR_ERR(flow_node);
  1288. goto done;
  1289. }
  1290. }
  1291. /* No more flows */
  1292. if (!flow_node)
  1293. goto done;
  1294. stats_batch[i].flow_node = flow_node;
  1295. }
  1296. done:
  1297. rhashtable_walk_stop(iter);
  1298. *num_flows = i;
  1299. return rc;
  1300. }
  1301. void bnxt_tc_flow_stats_work(struct bnxt *bp)
  1302. {
  1303. struct bnxt_tc_info *tc_info = bp->tc_info;
  1304. int num_flows, rc;
  1305. num_flows = atomic_read(&tc_info->flow_table.nelems);
  1306. if (!num_flows)
  1307. return;
  1308. rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter);
  1309. for (;;) {
  1310. rc = bnxt_tc_flow_stats_batch_prep(bp, tc_info->stats_batch,
  1311. &num_flows);
  1312. if (rc) {
  1313. if (rc == -EAGAIN)
  1314. continue;
  1315. break;
  1316. }
  1317. if (!num_flows)
  1318. break;
  1319. bnxt_tc_flow_stats_batch_update(bp, num_flows,
  1320. tc_info->stats_batch);
  1321. }
  1322. rhashtable_walk_exit(&tc_info->iter);
  1323. }
  1324. int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
  1325. struct tc_cls_flower_offload *cls_flower)
  1326. {
  1327. int rc = 0;
  1328. switch (cls_flower->command) {
  1329. case TC_CLSFLOWER_REPLACE:
  1330. rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
  1331. break;
  1332. case TC_CLSFLOWER_DESTROY:
  1333. rc = bnxt_tc_del_flow(bp, cls_flower);
  1334. break;
  1335. case TC_CLSFLOWER_STATS:
  1336. rc = bnxt_tc_get_flow_stats(bp, cls_flower);
  1337. break;
  1338. }
  1339. return rc;
  1340. }
  1341. static const struct rhashtable_params bnxt_tc_flow_ht_params = {
  1342. .head_offset = offsetof(struct bnxt_tc_flow_node, node),
  1343. .key_offset = offsetof(struct bnxt_tc_flow_node, cookie),
  1344. .key_len = sizeof(((struct bnxt_tc_flow_node *)0)->cookie),
  1345. .automatic_shrinking = true
  1346. };
  1347. static const struct rhashtable_params bnxt_tc_l2_ht_params = {
  1348. .head_offset = offsetof(struct bnxt_tc_l2_node, node),
  1349. .key_offset = offsetof(struct bnxt_tc_l2_node, key),
  1350. .key_len = BNXT_TC_L2_KEY_LEN,
  1351. .automatic_shrinking = true
  1352. };
  1353. static const struct rhashtable_params bnxt_tc_decap_l2_ht_params = {
  1354. .head_offset = offsetof(struct bnxt_tc_l2_node, node),
  1355. .key_offset = offsetof(struct bnxt_tc_l2_node, key),
  1356. .key_len = BNXT_TC_L2_KEY_LEN,
  1357. .automatic_shrinking = true
  1358. };
  1359. static const struct rhashtable_params bnxt_tc_tunnel_ht_params = {
  1360. .head_offset = offsetof(struct bnxt_tc_tunnel_node, node),
  1361. .key_offset = offsetof(struct bnxt_tc_tunnel_node, key),
  1362. .key_len = sizeof(struct ip_tunnel_key),
  1363. .automatic_shrinking = true
  1364. };
  1365. /* convert counter width in bits to a mask */
  1366. #define mask(width) ((u64)~0 >> (64 - (width)))
  1367. int bnxt_init_tc(struct bnxt *bp)
  1368. {
  1369. struct bnxt_tc_info *tc_info;
  1370. int rc;
  1371. if (bp->hwrm_spec_code < 0x10803) {
  1372. netdev_warn(bp->dev,
  1373. "Firmware does not support TC flower offload.\n");
  1374. return -ENOTSUPP;
  1375. }
  1376. tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL);
  1377. if (!tc_info)
  1378. return -ENOMEM;
  1379. mutex_init(&tc_info->lock);
  1380. /* Counter widths are programmed by FW */
  1381. tc_info->bytes_mask = mask(36);
  1382. tc_info->packets_mask = mask(28);
  1383. tc_info->flow_ht_params = bnxt_tc_flow_ht_params;
  1384. rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params);
  1385. if (rc)
  1386. goto free_tc_info;
  1387. tc_info->l2_ht_params = bnxt_tc_l2_ht_params;
  1388. rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params);
  1389. if (rc)
  1390. goto destroy_flow_table;
  1391. tc_info->decap_l2_ht_params = bnxt_tc_decap_l2_ht_params;
  1392. rc = rhashtable_init(&tc_info->decap_l2_table,
  1393. &tc_info->decap_l2_ht_params);
  1394. if (rc)
  1395. goto destroy_l2_table;
  1396. tc_info->decap_ht_params = bnxt_tc_tunnel_ht_params;
  1397. rc = rhashtable_init(&tc_info->decap_table,
  1398. &tc_info->decap_ht_params);
  1399. if (rc)
  1400. goto destroy_decap_l2_table;
  1401. tc_info->encap_ht_params = bnxt_tc_tunnel_ht_params;
  1402. rc = rhashtable_init(&tc_info->encap_table,
  1403. &tc_info->encap_ht_params);
  1404. if (rc)
  1405. goto destroy_decap_table;
  1406. tc_info->enabled = true;
  1407. bp->dev->hw_features |= NETIF_F_HW_TC;
  1408. bp->dev->features |= NETIF_F_HW_TC;
  1409. bp->tc_info = tc_info;
  1410. return 0;
  1411. destroy_decap_table:
  1412. rhashtable_destroy(&tc_info->decap_table);
  1413. destroy_decap_l2_table:
  1414. rhashtable_destroy(&tc_info->decap_l2_table);
  1415. destroy_l2_table:
  1416. rhashtable_destroy(&tc_info->l2_table);
  1417. destroy_flow_table:
  1418. rhashtable_destroy(&tc_info->flow_table);
  1419. free_tc_info:
  1420. kfree(tc_info);
  1421. return rc;
  1422. }
  1423. void bnxt_shutdown_tc(struct bnxt *bp)
  1424. {
  1425. struct bnxt_tc_info *tc_info = bp->tc_info;
  1426. if (!bnxt_tc_flower_enabled(bp))
  1427. return;
  1428. rhashtable_destroy(&tc_info->flow_table);
  1429. rhashtable_destroy(&tc_info->l2_table);
  1430. rhashtable_destroy(&tc_info->decap_l2_table);
  1431. rhashtable_destroy(&tc_info->decap_table);
  1432. rhashtable_destroy(&tc_info->encap_table);
  1433. kfree(tc_info);
  1434. bp->tc_info = NULL;
  1435. }