bcm_sf2_cfp.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. /*
  2. * Broadcom Starfighter 2 DSA switch CFP support
  3. *
  4. * Copyright (C) 2016, Broadcom
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/list.h>
  12. #include <linux/ethtool.h>
  13. #include <linux/if_ether.h>
  14. #include <linux/in.h>
  15. #include <linux/netdevice.h>
  16. #include <net/dsa.h>
  17. #include <linux/bitmap.h>
  18. #include "bcm_sf2.h"
  19. #include "bcm_sf2_regs.h"
  20. struct cfp_udf_layout {
  21. u8 slices[UDF_NUM_SLICES];
  22. u32 mask_value;
  23. };
  24. /* UDF slices layout for a TCPv4/UDPv4 specification */
  25. static const struct cfp_udf_layout udf_tcpip4_layout = {
  26. .slices = {
  27. /* End of L2, byte offset 12, src IP[0:15] */
  28. CFG_UDF_EOL2 | 6,
  29. /* End of L2, byte offset 14, src IP[16:31] */
  30. CFG_UDF_EOL2 | 7,
  31. /* End of L2, byte offset 16, dst IP[0:15] */
  32. CFG_UDF_EOL2 | 8,
  33. /* End of L2, byte offset 18, dst IP[16:31] */
  34. CFG_UDF_EOL2 | 9,
  35. /* End of L3, byte offset 0, src port */
  36. CFG_UDF_EOL3 | 0,
  37. /* End of L3, byte offset 2, dst port */
  38. CFG_UDF_EOL3 | 1,
  39. 0, 0, 0
  40. },
  41. .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
  42. };
  43. static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout)
  44. {
  45. unsigned int i, count = 0;
  46. for (i = 0; i < UDF_NUM_SLICES; i++) {
  47. if (layout[i] != 0)
  48. count++;
  49. }
  50. return count;
  51. }
  52. static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv,
  53. unsigned int slice_num,
  54. const u8 *layout)
  55. {
  56. u32 offset = CORE_UDF_0_A_0_8_PORT_0 + slice_num * UDF_SLICE_OFFSET;
  57. unsigned int i;
  58. for (i = 0; i < UDF_NUM_SLICES; i++)
  59. core_writel(priv, layout[i], offset + i * 4);
  60. }
  61. static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op)
  62. {
  63. unsigned int timeout = 1000;
  64. u32 reg;
  65. reg = core_readl(priv, CORE_CFP_ACC);
  66. reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
  67. reg |= OP_STR_DONE | op;
  68. core_writel(priv, reg, CORE_CFP_ACC);
  69. do {
  70. reg = core_readl(priv, CORE_CFP_ACC);
  71. if (!(reg & OP_STR_DONE))
  72. break;
  73. cpu_relax();
  74. } while (timeout--);
  75. if (!timeout)
  76. return -ETIMEDOUT;
  77. return 0;
  78. }
  79. static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
  80. unsigned int addr)
  81. {
  82. u32 reg;
  83. WARN_ON(addr >= priv->num_cfp_rules);
  84. reg = core_readl(priv, CORE_CFP_ACC);
  85. reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
  86. reg |= addr << XCESS_ADDR_SHIFT;
  87. core_writel(priv, reg, CORE_CFP_ACC);
  88. }
  89. static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
  90. {
  91. /* Entry #0 is reserved */
  92. return priv->num_cfp_rules - 1;
  93. }
  94. static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
  95. struct ethtool_rx_flow_spec *fs)
  96. {
  97. struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
  98. struct ethtool_tcpip4_spec *v4_spec;
  99. const struct cfp_udf_layout *layout;
  100. unsigned int slice_num, rule_index;
  101. unsigned int queue_num, port_num;
  102. u8 ip_proto, ip_frag;
  103. u8 num_udf;
  104. u32 reg;
  105. int ret;
  106. /* Check for unsupported extensions */
  107. if ((fs->flow_type & FLOW_EXT) &&
  108. (fs->m_ext.vlan_etype || fs->m_ext.data[1]))
  109. return -EINVAL;
  110. if (fs->location != RX_CLS_LOC_ANY &&
  111. test_bit(fs->location, priv->cfp.used))
  112. return -EBUSY;
  113. if (fs->location != RX_CLS_LOC_ANY &&
  114. fs->location > bcm_sf2_cfp_rule_size(priv))
  115. return -EINVAL;
  116. ip_frag = be32_to_cpu(fs->m_ext.data[0]);
  117. /* We do not support discarding packets, check that the
  118. * destination port is enabled and that we are within the
  119. * number of ports supported by the switch
  120. */
  121. port_num = fs->ring_cookie / 8;
  122. if (fs->ring_cookie == RX_CLS_FLOW_DISC ||
  123. !(BIT(port_num) & ds->enabled_port_mask) ||
  124. port_num >= priv->hw_params.num_ports)
  125. return -EINVAL;
  126. switch (fs->flow_type & ~FLOW_EXT) {
  127. case TCP_V4_FLOW:
  128. ip_proto = IPPROTO_TCP;
  129. v4_spec = &fs->h_u.tcp_ip4_spec;
  130. break;
  131. case UDP_V4_FLOW:
  132. ip_proto = IPPROTO_UDP;
  133. v4_spec = &fs->h_u.udp_ip4_spec;
  134. break;
  135. default:
  136. return -EINVAL;
  137. }
  138. /* We only use one UDF slice for now */
  139. slice_num = 1;
  140. layout = &udf_tcpip4_layout;
  141. num_udf = bcm_sf2_get_num_udf_slices(layout->slices);
  142. /* Apply the UDF layout for this filter */
  143. bcm_sf2_cfp_udf_set(priv, slice_num, layout->slices);
  144. /* Apply to all packets received through this port */
  145. core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
  146. /* S-Tag status [31:30]
  147. * C-Tag status [29:28]
  148. * L2 framing [27:26]
  149. * L3 framing [25:24]
  150. * IP ToS [23:16]
  151. * IP proto [15:08]
  152. * IP Fragm [7]
  153. * Non 1st frag [6]
  154. * IP Authen [5]
  155. * TTL range [4:3]
  156. * PPPoE session [2]
  157. * Reserved [1]
  158. * UDF_Valid[8] [0]
  159. */
  160. core_writel(priv, v4_spec->tos << 16 | ip_proto << 8 | ip_frag << 7,
  161. CORE_CFP_DATA_PORT(6));
  162. /* UDF_Valid[7:0] [31:24]
  163. * S-Tag [23:8]
  164. * C-Tag [7:0]
  165. */
  166. core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_DATA_PORT(5));
  167. /* C-Tag [31:24]
  168. * UDF_n_A8 [23:8]
  169. * UDF_n_A7 [7:0]
  170. */
  171. core_writel(priv, 0, CORE_CFP_DATA_PORT(4));
  172. /* UDF_n_A7 [31:24]
  173. * UDF_n_A6 [23:8]
  174. * UDF_n_A5 [7:0]
  175. */
  176. core_writel(priv, be16_to_cpu(v4_spec->pdst) >> 8,
  177. CORE_CFP_DATA_PORT(3));
  178. /* UDF_n_A5 [31:24]
  179. * UDF_n_A4 [23:8]
  180. * UDF_n_A3 [7:0]
  181. */
  182. reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 |
  183. (u32)be16_to_cpu(v4_spec->psrc) << 8 |
  184. (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8;
  185. core_writel(priv, reg, CORE_CFP_DATA_PORT(2));
  186. /* UDF_n_A3 [31:24]
  187. * UDF_n_A2 [23:8]
  188. * UDF_n_A1 [7:0]
  189. */
  190. reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 |
  191. (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 |
  192. (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8;
  193. core_writel(priv, reg, CORE_CFP_DATA_PORT(1));
  194. /* UDF_n_A1 [31:24]
  195. * UDF_n_A0 [23:8]
  196. * Reserved [7:4]
  197. * Slice ID [3:2]
  198. * Slice valid [1:0]
  199. */
  200. reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 |
  201. (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 |
  202. SLICE_NUM(slice_num) | SLICE_VALID;
  203. core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
  204. /* Source port map match */
  205. core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
  206. /* Mask with the specific layout for IPv4 packets */
  207. core_writel(priv, layout->mask_value, CORE_CFP_MASK_PORT(6));
  208. /* Mask all but valid UDFs */
  209. core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_MASK_PORT(5));
  210. /* Mask all */
  211. core_writel(priv, 0, CORE_CFP_MASK_PORT(4));
  212. /* All other UDFs should be matched with the filter */
  213. core_writel(priv, 0xff, CORE_CFP_MASK_PORT(3));
  214. core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(2));
  215. core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(1));
  216. core_writel(priv, 0xffffff0f, CORE_CFP_MASK_PORT(0));
  217. /* Locate the first rule available */
  218. if (fs->location == RX_CLS_LOC_ANY)
  219. rule_index = find_first_zero_bit(priv->cfp.used,
  220. bcm_sf2_cfp_rule_size(priv));
  221. else
  222. rule_index = fs->location;
  223. /* Insert into TCAM now */
  224. bcm_sf2_cfp_rule_addr_set(priv, rule_index);
  225. ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
  226. if (ret) {
  227. pr_err("TCAM entry at addr %d failed\n", rule_index);
  228. return ret;
  229. }
  230. /* Replace ARL derived destination with DST_MAP derived, define
  231. * which port and queue this should be forwarded to.
  232. *
  233. * We have a small oddity where Port 6 just does not have a
  234. * valid bit here (so we subtract by one).
  235. */
  236. queue_num = fs->ring_cookie % 8;
  237. if (port_num >= 7)
  238. port_num -= 1;
  239. reg = CHANGE_FWRD_MAP_IB_REP_ARL | BIT(port_num + DST_MAP_IB_SHIFT) |
  240. CHANGE_TC | queue_num << NEW_TC_SHIFT;
  241. core_writel(priv, reg, CORE_ACT_POL_DATA0);
  242. /* Set classification ID that needs to be put in Broadcom tag */
  243. core_writel(priv, rule_index << CHAIN_ID_SHIFT,
  244. CORE_ACT_POL_DATA1);
  245. core_writel(priv, 0, CORE_ACT_POL_DATA2);
  246. /* Configure policer RAM now */
  247. ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM);
  248. if (ret) {
  249. pr_err("Policer entry at %d failed\n", rule_index);
  250. return ret;
  251. }
  252. /* Disable the policer */
  253. core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0);
  254. /* Now the rate meter */
  255. ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM);
  256. if (ret) {
  257. pr_err("Meter entry at %d failed\n", rule_index);
  258. return ret;
  259. }
  260. /* Turn on CFP for this rule now */
  261. reg = core_readl(priv, CORE_CFP_CTL_REG);
  262. reg |= BIT(port);
  263. core_writel(priv, reg, CORE_CFP_CTL_REG);
  264. /* Flag the rule as being used and return it */
  265. set_bit(rule_index, priv->cfp.used);
  266. fs->location = rule_index;
  267. return 0;
  268. }
  269. static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
  270. u32 loc)
  271. {
  272. int ret;
  273. u32 reg;
  274. /* Refuse deletion of unused rules, and the default reserved rule */
  275. if (!test_bit(loc, priv->cfp.used) || loc == 0)
  276. return -EINVAL;
  277. /* Indicate which rule we want to read */
  278. bcm_sf2_cfp_rule_addr_set(priv, loc);
  279. ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
  280. if (ret)
  281. return ret;
  282. /* Clear its valid bits */
  283. reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
  284. reg &= ~SLICE_VALID;
  285. core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
  286. /* Write back this entry into the TCAM now */
  287. ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
  288. if (ret)
  289. return ret;
  290. clear_bit(loc, priv->cfp.used);
  291. return 0;
  292. }
  293. static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
  294. {
  295. unsigned int i;
  296. for (i = 0; i < sizeof(flow->m_u); i++)
  297. flow->m_u.hdata[i] ^= 0xff;
  298. flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
  299. flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
  300. flow->m_ext.data[0] ^= cpu_to_be32(~0);
  301. flow->m_ext.data[1] ^= cpu_to_be32(~0);
  302. }
  303. static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
  304. struct ethtool_rxnfc *nfc, bool search)
  305. {
  306. struct ethtool_tcpip4_spec *v4_spec;
  307. unsigned int queue_num;
  308. u16 src_dst_port;
  309. u32 reg, ipv4;
  310. int ret;
  311. if (!search) {
  312. bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location);
  313. ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM);
  314. if (ret)
  315. return ret;
  316. reg = core_readl(priv, CORE_ACT_POL_DATA0);
  317. ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
  318. if (ret)
  319. return ret;
  320. } else {
  321. reg = core_readl(priv, CORE_ACT_POL_DATA0);
  322. }
  323. /* Extract the destination port */
  324. nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) &
  325. DST_MAP_IB_MASK) - 1;
  326. /* There is no Port 6, so we compensate for that here */
  327. if (nfc->fs.ring_cookie >= 6)
  328. nfc->fs.ring_cookie++;
  329. nfc->fs.ring_cookie *= 8;
  330. /* Extract the destination queue */
  331. queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK;
  332. nfc->fs.ring_cookie += queue_num;
  333. /* Extract the IP protocol */
  334. reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
  335. switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
  336. case IPPROTO_TCP:
  337. nfc->fs.flow_type = TCP_V4_FLOW;
  338. v4_spec = &nfc->fs.h_u.tcp_ip4_spec;
  339. break;
  340. case IPPROTO_UDP:
  341. nfc->fs.flow_type = UDP_V4_FLOW;
  342. v4_spec = &nfc->fs.h_u.udp_ip4_spec;
  343. break;
  344. default:
  345. /* Clear to exit the search process */
  346. if (search)
  347. core_readl(priv, CORE_CFP_DATA_PORT(7));
  348. return -EINVAL;
  349. }
  350. v4_spec->tos = (reg >> 16) & IPPROTO_MASK;
  351. nfc->fs.m_ext.data[0] = cpu_to_be32((reg >> 7) & 1);
  352. reg = core_readl(priv, CORE_CFP_DATA_PORT(3));
  353. /* src port [15:8] */
  354. src_dst_port = reg << 8;
  355. reg = core_readl(priv, CORE_CFP_DATA_PORT(2));
  356. /* src port [7:0] */
  357. src_dst_port |= (reg >> 24);
  358. v4_spec->pdst = cpu_to_be16(src_dst_port);
  359. nfc->fs.m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
  360. v4_spec->psrc = cpu_to_be16((u16)(reg >> 8));
  361. nfc->fs.m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
  362. /* IPv4 dst [15:8] */
  363. ipv4 = (reg & 0xff) << 8;
  364. reg = core_readl(priv, CORE_CFP_DATA_PORT(1));
  365. /* IPv4 dst [31:16] */
  366. ipv4 |= ((reg >> 8) & 0xffff) << 16;
  367. /* IPv4 dst [7:0] */
  368. ipv4 |= (reg >> 24) & 0xff;
  369. v4_spec->ip4dst = cpu_to_be32(ipv4);
  370. nfc->fs.m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
  371. /* IPv4 src [15:8] */
  372. ipv4 = (reg & 0xff) << 8;
  373. reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
  374. if (!(reg & SLICE_VALID))
  375. return -EINVAL;
  376. /* IPv4 src [7:0] */
  377. ipv4 |= (reg >> 24) & 0xff;
  378. /* IPv4 src [31:16] */
  379. ipv4 |= ((reg >> 8) & 0xffff) << 16;
  380. v4_spec->ip4src = cpu_to_be32(ipv4);
  381. nfc->fs.m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
  382. /* Read last to avoid next entry clobbering the results during search
  383. * operations
  384. */
  385. reg = core_readl(priv, CORE_CFP_DATA_PORT(7));
  386. if (!(reg & 1 << port))
  387. return -EINVAL;
  388. bcm_sf2_invert_masks(&nfc->fs);
  389. /* Put the TCAM size here */
  390. nfc->data = bcm_sf2_cfp_rule_size(priv);
  391. return 0;
  392. }
  393. /* We implement the search doing a TCAM search operation */
  394. static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
  395. int port, struct ethtool_rxnfc *nfc,
  396. u32 *rule_locs)
  397. {
  398. unsigned int index = 1, rules_cnt = 0;
  399. int ret;
  400. u32 reg;
  401. /* Do not poll on OP_STR_DONE to be self-clearing for search
  402. * operations, we cannot use bcm_sf2_cfp_op here because it completes
  403. * on clearing OP_STR_DONE which won't clear until the entire search
  404. * operation is over.
  405. */
  406. reg = core_readl(priv, CORE_CFP_ACC);
  407. reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
  408. reg |= index << XCESS_ADDR_SHIFT;
  409. reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
  410. reg |= OP_SEL_SEARCH | TCAM_SEL | OP_STR_DONE;
  411. core_writel(priv, reg, CORE_CFP_ACC);
  412. do {
  413. /* Wait for results to be ready */
  414. reg = core_readl(priv, CORE_CFP_ACC);
  415. /* Extract the address we are searching */
  416. index = reg >> XCESS_ADDR_SHIFT;
  417. index &= XCESS_ADDR_MASK;
  418. /* We have a valid search result, so flag it accordingly */
  419. if (reg & SEARCH_STS) {
  420. ret = bcm_sf2_cfp_rule_get(priv, port, nfc, true);
  421. if (ret)
  422. continue;
  423. rule_locs[rules_cnt] = index;
  424. rules_cnt++;
  425. }
  426. /* Search is over break out */
  427. if (!(reg & OP_STR_DONE))
  428. break;
  429. } while (index < priv->num_cfp_rules);
  430. /* Put the TCAM size here */
  431. nfc->data = bcm_sf2_cfp_rule_size(priv);
  432. nfc->rule_cnt = rules_cnt;
  433. return 0;
  434. }
  435. int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
  436. struct ethtool_rxnfc *nfc, u32 *rule_locs)
  437. {
  438. struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
  439. int ret = 0;
  440. mutex_lock(&priv->cfp.lock);
  441. switch (nfc->cmd) {
  442. case ETHTOOL_GRXCLSRLCNT:
  443. /* Subtract the default, unusable rule */
  444. nfc->rule_cnt = bitmap_weight(priv->cfp.used,
  445. priv->num_cfp_rules) - 1;
  446. /* We support specifying rule locations */
  447. nfc->data |= RX_CLS_LOC_SPECIAL;
  448. break;
  449. case ETHTOOL_GRXCLSRULE:
  450. ret = bcm_sf2_cfp_rule_get(priv, port, nfc, false);
  451. break;
  452. case ETHTOOL_GRXCLSRLALL:
  453. ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs);
  454. break;
  455. default:
  456. ret = -EOPNOTSUPP;
  457. break;
  458. }
  459. mutex_unlock(&priv->cfp.lock);
  460. return ret;
  461. }
  462. int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
  463. struct ethtool_rxnfc *nfc)
  464. {
  465. struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
  466. int ret = 0;
  467. mutex_lock(&priv->cfp.lock);
  468. switch (nfc->cmd) {
  469. case ETHTOOL_SRXCLSRLINS:
  470. ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs);
  471. break;
  472. case ETHTOOL_SRXCLSRLDEL:
  473. ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
  474. break;
  475. default:
  476. ret = -EOPNOTSUPP;
  477. break;
  478. }
  479. mutex_unlock(&priv->cfp.lock);
  480. return ret;
  481. }
  482. int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
  483. {
  484. unsigned int timeout = 1000;
  485. u32 reg;
  486. reg = core_readl(priv, CORE_CFP_ACC);
  487. reg |= TCAM_RESET;
  488. core_writel(priv, reg, CORE_CFP_ACC);
  489. do {
  490. reg = core_readl(priv, CORE_CFP_ACC);
  491. if (!(reg & TCAM_RESET))
  492. break;
  493. cpu_relax();
  494. } while (timeout--);
  495. if (!timeout)
  496. return -ETIMEDOUT;
  497. return 0;
  498. }