bcm_sf2_cfp.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665
  1. /*
  2. * Broadcom Starfighter 2 DSA switch CFP support
  3. *
  4. * Copyright (C) 2016, Broadcom
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/list.h>
  12. #include <linux/ethtool.h>
  13. #include <linux/if_ether.h>
  14. #include <linux/in.h>
  15. #include <linux/netdevice.h>
  16. #include <net/dsa.h>
  17. #include <linux/bitmap.h>
  18. #include "bcm_sf2.h"
  19. #include "bcm_sf2_regs.h"
  20. struct cfp_udf_slice_layout {
  21. u8 slices[UDFS_PER_SLICE];
  22. u32 mask_value;
  23. u32 base_offset;
  24. };
  25. struct cfp_udf_layout {
  26. struct cfp_udf_slice_layout udfs[UDF_NUM_SLICES];
  27. };
  28. static const u8 zero_slice[UDFS_PER_SLICE] = { };
  29. /* UDF slices layout for a TCPv4/UDPv4 specification */
  30. static const struct cfp_udf_layout udf_tcpip4_layout = {
  31. .udfs = {
  32. [1] = {
  33. .slices = {
  34. /* End of L2, byte offset 12, src IP[0:15] */
  35. CFG_UDF_EOL2 | 6,
  36. /* End of L2, byte offset 14, src IP[16:31] */
  37. CFG_UDF_EOL2 | 7,
  38. /* End of L2, byte offset 16, dst IP[0:15] */
  39. CFG_UDF_EOL2 | 8,
  40. /* End of L2, byte offset 18, dst IP[16:31] */
  41. CFG_UDF_EOL2 | 9,
  42. /* End of L3, byte offset 0, src port */
  43. CFG_UDF_EOL3 | 0,
  44. /* End of L3, byte offset 2, dst port */
  45. CFG_UDF_EOL3 | 1,
  46. 0, 0, 0
  47. },
  48. .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
  49. .base_offset = CORE_UDF_0_A_0_8_PORT_0 + UDF_SLICE_OFFSET,
  50. },
  51. },
  52. };
  53. static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout)
  54. {
  55. unsigned int i, count = 0;
  56. for (i = 0; i < UDFS_PER_SLICE; i++) {
  57. if (layout[i] != 0)
  58. count++;
  59. }
  60. return count;
  61. }
  62. static inline u32 udf_upper_bits(unsigned int num_udf)
  63. {
  64. return GENMASK(num_udf - 1, 0) >> (UDFS_PER_SLICE - 1);
  65. }
  66. static inline u32 udf_lower_bits(unsigned int num_udf)
  67. {
  68. return (u8)GENMASK(num_udf - 1, 0);
  69. }
  70. static unsigned int bcm_sf2_get_slice_number(const struct cfp_udf_layout *l,
  71. unsigned int start)
  72. {
  73. const struct cfp_udf_slice_layout *slice_layout;
  74. unsigned int slice_idx;
  75. for (slice_idx = start; slice_idx < UDF_NUM_SLICES; slice_idx++) {
  76. slice_layout = &l->udfs[slice_idx];
  77. if (memcmp(slice_layout->slices, zero_slice,
  78. sizeof(zero_slice)))
  79. break;
  80. }
  81. return slice_idx;
  82. }
  83. static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv,
  84. const struct cfp_udf_layout *layout,
  85. unsigned int slice_num)
  86. {
  87. u32 offset = layout->udfs[slice_num].base_offset;
  88. unsigned int i;
  89. for (i = 0; i < UDFS_PER_SLICE; i++)
  90. core_writel(priv, layout->udfs[slice_num].slices[i],
  91. offset + i * 4);
  92. }
  93. static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op)
  94. {
  95. unsigned int timeout = 1000;
  96. u32 reg;
  97. reg = core_readl(priv, CORE_CFP_ACC);
  98. reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
  99. reg |= OP_STR_DONE | op;
  100. core_writel(priv, reg, CORE_CFP_ACC);
  101. do {
  102. reg = core_readl(priv, CORE_CFP_ACC);
  103. if (!(reg & OP_STR_DONE))
  104. break;
  105. cpu_relax();
  106. } while (timeout--);
  107. if (!timeout)
  108. return -ETIMEDOUT;
  109. return 0;
  110. }
  111. static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
  112. unsigned int addr)
  113. {
  114. u32 reg;
  115. WARN_ON(addr >= priv->num_cfp_rules);
  116. reg = core_readl(priv, CORE_CFP_ACC);
  117. reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
  118. reg |= addr << XCESS_ADDR_SHIFT;
  119. core_writel(priv, reg, CORE_CFP_ACC);
  120. }
  121. static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
  122. {
  123. /* Entry #0 is reserved */
  124. return priv->num_cfp_rules - 1;
  125. }
  126. static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
  127. unsigned int rule_index,
  128. unsigned int port_num,
  129. unsigned int queue_num)
  130. {
  131. int ret;
  132. u32 reg;
  133. /* Replace ARL derived destination with DST_MAP derived, define
  134. * which port and queue this should be forwarded to.
  135. */
  136. reg = CHANGE_FWRD_MAP_IB_REP_ARL | BIT(port_num + DST_MAP_IB_SHIFT) |
  137. CHANGE_TC | queue_num << NEW_TC_SHIFT;
  138. core_writel(priv, reg, CORE_ACT_POL_DATA0);
  139. /* Set classification ID that needs to be put in Broadcom tag */
  140. core_writel(priv, rule_index << CHAIN_ID_SHIFT,
  141. CORE_ACT_POL_DATA1);
  142. core_writel(priv, 0, CORE_ACT_POL_DATA2);
  143. /* Configure policer RAM now */
  144. ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM);
  145. if (ret) {
  146. pr_err("Policer entry at %d failed\n", rule_index);
  147. return ret;
  148. }
  149. /* Disable the policer */
  150. core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0);
  151. /* Now the rate meter */
  152. ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM);
  153. if (ret) {
  154. pr_err("Meter entry at %d failed\n", rule_index);
  155. return ret;
  156. }
  157. return 0;
  158. }
  159. static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
  160. unsigned int port_num,
  161. unsigned int queue_num,
  162. struct ethtool_rx_flow_spec *fs)
  163. {
  164. const struct cfp_udf_layout *layout;
  165. struct ethtool_tcpip4_spec *v4_spec;
  166. unsigned int slice_num, rule_index;
  167. u8 ip_proto, ip_frag;
  168. u8 num_udf;
  169. u32 reg;
  170. int ret;
  171. switch (fs->flow_type & ~FLOW_EXT) {
  172. case TCP_V4_FLOW:
  173. ip_proto = IPPROTO_TCP;
  174. v4_spec = &fs->h_u.tcp_ip4_spec;
  175. break;
  176. case UDP_V4_FLOW:
  177. ip_proto = IPPROTO_UDP;
  178. v4_spec = &fs->h_u.udp_ip4_spec;
  179. break;
  180. default:
  181. return -EINVAL;
  182. }
  183. ip_frag = be32_to_cpu(fs->m_ext.data[0]);
  184. /* Locate the first rule available */
  185. if (fs->location == RX_CLS_LOC_ANY)
  186. rule_index = find_first_zero_bit(priv->cfp.used,
  187. bcm_sf2_cfp_rule_size(priv));
  188. else
  189. rule_index = fs->location;
  190. layout = &udf_tcpip4_layout;
  191. /* We only use one UDF slice for now */
  192. slice_num = bcm_sf2_get_slice_number(layout, 0);
  193. if (slice_num == UDF_NUM_SLICES)
  194. return -EINVAL;
  195. num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
  196. /* Apply the UDF layout for this filter */
  197. bcm_sf2_cfp_udf_set(priv, layout, slice_num);
  198. /* Apply to all packets received through this port */
  199. core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
  200. /* Source port map match */
  201. core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
  202. /* S-Tag status [31:30]
  203. * C-Tag status [29:28]
  204. * L2 framing [27:26]
  205. * L3 framing [25:24]
  206. * IP ToS [23:16]
  207. * IP proto [15:08]
  208. * IP Fragm [7]
  209. * Non 1st frag [6]
  210. * IP Authen [5]
  211. * TTL range [4:3]
  212. * PPPoE session [2]
  213. * Reserved [1]
  214. * UDF_Valid[8] [0]
  215. */
  216. core_writel(priv, v4_spec->tos << IPTOS_SHIFT |
  217. ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT |
  218. udf_upper_bits(num_udf),
  219. CORE_CFP_DATA_PORT(6));
  220. /* UDF_Valid[7:0] [31:24]
  221. * S-Tag [23:8]
  222. * C-Tag [7:0]
  223. */
  224. core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5));
  225. /* C-Tag [31:24]
  226. * UDF_n_A8 [23:8]
  227. * UDF_n_A7 [7:0]
  228. */
  229. core_writel(priv, 0, CORE_CFP_DATA_PORT(4));
  230. /* UDF_n_A7 [31:24]
  231. * UDF_n_A6 [23:8]
  232. * UDF_n_A5 [7:0]
  233. */
  234. core_writel(priv, be16_to_cpu(v4_spec->pdst) >> 8,
  235. CORE_CFP_DATA_PORT(3));
  236. /* UDF_n_A5 [31:24]
  237. * UDF_n_A4 [23:8]
  238. * UDF_n_A3 [7:0]
  239. */
  240. reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 |
  241. (u32)be16_to_cpu(v4_spec->psrc) << 8 |
  242. (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8;
  243. core_writel(priv, reg, CORE_CFP_DATA_PORT(2));
  244. /* UDF_n_A3 [31:24]
  245. * UDF_n_A2 [23:8]
  246. * UDF_n_A1 [7:0]
  247. */
  248. reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 |
  249. (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 |
  250. (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8;
  251. core_writel(priv, reg, CORE_CFP_DATA_PORT(1));
  252. /* UDF_n_A1 [31:24]
  253. * UDF_n_A0 [23:8]
  254. * Reserved [7:4]
  255. * Slice ID [3:2]
  256. * Slice valid [1:0]
  257. */
  258. reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 |
  259. (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 |
  260. SLICE_NUM(slice_num) | SLICE_VALID;
  261. core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
  262. /* Mask with the specific layout for IPv4 packets */
  263. core_writel(priv, layout->udfs[slice_num].mask_value |
  264. udf_upper_bits(num_udf), CORE_CFP_MASK_PORT(6));
  265. /* Mask all but valid UDFs */
  266. core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
  267. /* Mask all */
  268. core_writel(priv, 0, CORE_CFP_MASK_PORT(4));
  269. /* All other UDFs should be matched with the filter */
  270. core_writel(priv, 0xff, CORE_CFP_MASK_PORT(3));
  271. core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(2));
  272. core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(1));
  273. core_writel(priv, 0xffffff0f, CORE_CFP_MASK_PORT(0));
  274. /* Insert into TCAM now */
  275. bcm_sf2_cfp_rule_addr_set(priv, rule_index);
  276. ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
  277. if (ret) {
  278. pr_err("TCAM entry at addr %d failed\n", rule_index);
  279. return ret;
  280. }
  281. /* Insert into Action and policer RAMs now */
  282. ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port_num, queue_num);
  283. if (ret)
  284. return ret;
  285. /* Turn on CFP for this rule now */
  286. reg = core_readl(priv, CORE_CFP_CTL_REG);
  287. reg |= BIT(port);
  288. core_writel(priv, reg, CORE_CFP_CTL_REG);
  289. /* Flag the rule as being used and return it */
  290. set_bit(rule_index, priv->cfp.used);
  291. fs->location = rule_index;
  292. return 0;
  293. }
  294. static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
  295. struct ethtool_rx_flow_spec *fs)
  296. {
  297. struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
  298. unsigned int queue_num, port_num;
  299. int ret;
  300. /* Check for unsupported extensions */
  301. if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype ||
  302. fs->m_ext.data[1]))
  303. return -EINVAL;
  304. if (fs->location != RX_CLS_LOC_ANY &&
  305. test_bit(fs->location, priv->cfp.used))
  306. return -EBUSY;
  307. if (fs->location != RX_CLS_LOC_ANY &&
  308. fs->location > bcm_sf2_cfp_rule_size(priv))
  309. return -EINVAL;
  310. /* We do not support discarding packets, check that the
  311. * destination port is enabled and that we are within the
  312. * number of ports supported by the switch
  313. */
  314. port_num = fs->ring_cookie / SF2_NUM_EGRESS_QUEUES;
  315. if (fs->ring_cookie == RX_CLS_FLOW_DISC ||
  316. !(BIT(port_num) & ds->enabled_port_mask) ||
  317. port_num >= priv->hw_params.num_ports)
  318. return -EINVAL;
  319. /*
  320. * We have a small oddity where Port 6 just does not have a
  321. * valid bit here (so we substract by one).
  322. */
  323. queue_num = fs->ring_cookie % SF2_NUM_EGRESS_QUEUES;
  324. if (port_num >= 7)
  325. port_num -= 1;
  326. ret = bcm_sf2_cfp_ipv4_rule_set(priv, port, port_num, queue_num, fs);
  327. if (ret)
  328. return ret;
  329. return 0;
  330. }
  331. static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
  332. u32 loc)
  333. {
  334. int ret;
  335. u32 reg;
  336. /* Refuse deletion of unused rules, and the default reserved rule */
  337. if (!test_bit(loc, priv->cfp.used) || loc == 0)
  338. return -EINVAL;
  339. /* Indicate which rule we want to read */
  340. bcm_sf2_cfp_rule_addr_set(priv, loc);
  341. ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
  342. if (ret)
  343. return ret;
  344. /* Clear its valid bits */
  345. reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
  346. reg &= ~SLICE_VALID;
  347. core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
  348. /* Write back this entry into the TCAM now */
  349. ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
  350. if (ret)
  351. return ret;
  352. clear_bit(loc, priv->cfp.used);
  353. return 0;
  354. }
  355. static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
  356. {
  357. unsigned int i;
  358. for (i = 0; i < sizeof(flow->m_u); i++)
  359. flow->m_u.hdata[i] ^= 0xff;
  360. flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
  361. flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
  362. flow->m_ext.data[0] ^= cpu_to_be32(~0);
  363. flow->m_ext.data[1] ^= cpu_to_be32(~0);
  364. }
  365. static int bcm_sf2_cfp_ipv4_rule_get(struct bcm_sf2_priv *priv, int port,
  366. struct ethtool_tcpip4_spec *v4_spec,
  367. struct ethtool_tcpip4_spec *v4_m_spec)
  368. {
  369. u16 src_dst_port;
  370. u32 reg, ipv4;
  371. reg = core_readl(priv, CORE_CFP_DATA_PORT(3));
  372. /* src port [15:8] */
  373. src_dst_port = reg << 8;
  374. reg = core_readl(priv, CORE_CFP_DATA_PORT(2));
  375. /* src port [7:0] */
  376. src_dst_port |= (reg >> 24);
  377. v4_spec->pdst = cpu_to_be16(src_dst_port);
  378. v4_m_spec->pdst = cpu_to_be16(~0);
  379. v4_spec->psrc = cpu_to_be16((u16)(reg >> 8));
  380. v4_m_spec->psrc = cpu_to_be16(~0);
  381. /* IPv4 dst [15:8] */
  382. ipv4 = (reg & 0xff) << 8;
  383. reg = core_readl(priv, CORE_CFP_DATA_PORT(1));
  384. /* IPv4 dst [31:16] */
  385. ipv4 |= ((reg >> 8) & 0xffff) << 16;
  386. /* IPv4 dst [7:0] */
  387. ipv4 |= (reg >> 24) & 0xff;
  388. v4_spec->ip4dst = cpu_to_be32(ipv4);
  389. v4_m_spec->ip4dst = cpu_to_be32(~0);
  390. /* IPv4 src [15:8] */
  391. ipv4 = (reg & 0xff) << 8;
  392. reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
  393. if (!(reg & SLICE_VALID))
  394. return -EINVAL;
  395. /* IPv4 src [7:0] */
  396. ipv4 |= (reg >> 24) & 0xff;
  397. /* IPv4 src [31:16] */
  398. ipv4 |= ((reg >> 8) & 0xffff) << 16;
  399. v4_spec->ip4src = cpu_to_be32(ipv4);
  400. v4_m_spec->ip4src = cpu_to_be32(~0);
  401. return 0;
  402. }
  403. static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
  404. struct ethtool_rxnfc *nfc)
  405. {
  406. struct ethtool_tcpip4_spec *v4_spec = NULL, *v4_m_spec;
  407. unsigned int queue_num;
  408. u32 reg;
  409. int ret;
  410. bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location);
  411. ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM);
  412. if (ret)
  413. return ret;
  414. reg = core_readl(priv, CORE_ACT_POL_DATA0);
  415. ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
  416. if (ret)
  417. return ret;
  418. /* Extract the destination port */
  419. nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) &
  420. DST_MAP_IB_MASK) - 1;
  421. /* There is no Port 6, so we compensate for that here */
  422. if (nfc->fs.ring_cookie >= 6)
  423. nfc->fs.ring_cookie++;
  424. nfc->fs.ring_cookie *= SF2_NUM_EGRESS_QUEUES;
  425. /* Extract the destination queue */
  426. queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK;
  427. nfc->fs.ring_cookie += queue_num;
  428. /* Extract the IP protocol */
  429. reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
  430. switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
  431. case IPPROTO_TCP:
  432. nfc->fs.flow_type = TCP_V4_FLOW;
  433. v4_spec = &nfc->fs.h_u.tcp_ip4_spec;
  434. v4_m_spec = &nfc->fs.m_u.tcp_ip4_spec;
  435. break;
  436. case IPPROTO_UDP:
  437. nfc->fs.flow_type = UDP_V4_FLOW;
  438. v4_spec = &nfc->fs.h_u.udp_ip4_spec;
  439. v4_m_spec = &nfc->fs.m_u.udp_ip4_spec;
  440. break;
  441. default:
  442. return -EINVAL;
  443. }
  444. nfc->fs.m_ext.data[0] = cpu_to_be32((reg >> IP_FRAG_SHIFT) & 1);
  445. if (v4_spec) {
  446. v4_spec->tos = (reg >> IPTOS_SHIFT) & IPTOS_MASK;
  447. ret = bcm_sf2_cfp_ipv4_rule_get(priv, port, v4_spec, v4_m_spec);
  448. }
  449. if (ret)
  450. return ret;
  451. /* Read last to avoid next entry clobbering the results during search
  452. * operations
  453. */
  454. reg = core_readl(priv, CORE_CFP_DATA_PORT(7));
  455. if (!(reg & 1 << port))
  456. return -EINVAL;
  457. bcm_sf2_invert_masks(&nfc->fs);
  458. /* Put the TCAM size here */
  459. nfc->data = bcm_sf2_cfp_rule_size(priv);
  460. return 0;
  461. }
  462. /* We implement the search doing a TCAM search operation */
  463. static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
  464. int port, struct ethtool_rxnfc *nfc,
  465. u32 *rule_locs)
  466. {
  467. unsigned int index = 1, rules_cnt = 0;
  468. for_each_set_bit_from(index, priv->cfp.used, priv->num_cfp_rules) {
  469. rule_locs[rules_cnt] = index;
  470. rules_cnt++;
  471. }
  472. /* Put the TCAM size here */
  473. nfc->data = bcm_sf2_cfp_rule_size(priv);
  474. nfc->rule_cnt = rules_cnt;
  475. return 0;
  476. }
  477. int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
  478. struct ethtool_rxnfc *nfc, u32 *rule_locs)
  479. {
  480. struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
  481. int ret = 0;
  482. mutex_lock(&priv->cfp.lock);
  483. switch (nfc->cmd) {
  484. case ETHTOOL_GRXCLSRLCNT:
  485. /* Subtract the default, unusable rule */
  486. nfc->rule_cnt = bitmap_weight(priv->cfp.used,
  487. priv->num_cfp_rules) - 1;
  488. /* We support specifying rule locations */
  489. nfc->data |= RX_CLS_LOC_SPECIAL;
  490. break;
  491. case ETHTOOL_GRXCLSRULE:
  492. ret = bcm_sf2_cfp_rule_get(priv, port, nfc);
  493. break;
  494. case ETHTOOL_GRXCLSRLALL:
  495. ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs);
  496. break;
  497. default:
  498. ret = -EOPNOTSUPP;
  499. break;
  500. }
  501. mutex_unlock(&priv->cfp.lock);
  502. return ret;
  503. }
  504. int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
  505. struct ethtool_rxnfc *nfc)
  506. {
  507. struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
  508. int ret = 0;
  509. mutex_lock(&priv->cfp.lock);
  510. switch (nfc->cmd) {
  511. case ETHTOOL_SRXCLSRLINS:
  512. ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs);
  513. break;
  514. case ETHTOOL_SRXCLSRLDEL:
  515. ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
  516. break;
  517. default:
  518. ret = -EOPNOTSUPP;
  519. break;
  520. }
  521. mutex_unlock(&priv->cfp.lock);
  522. return ret;
  523. }
  524. int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
  525. {
  526. unsigned int timeout = 1000;
  527. u32 reg;
  528. reg = core_readl(priv, CORE_CFP_ACC);
  529. reg |= TCAM_RESET;
  530. core_writel(priv, reg, CORE_CFP_ACC);
  531. do {
  532. reg = core_readl(priv, CORE_CFP_ACC);
  533. if (!(reg & TCAM_RESET))
  534. break;
  535. cpu_relax();
  536. } while (timeout--);
  537. if (!timeout)
  538. return -ETIMEDOUT;
  539. return 0;
  540. }