bcm_sf2_cfp.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613
  1. /*
  2. * Broadcom Starfighter 2 DSA switch CFP support
  3. *
  4. * Copyright (C) 2016, Broadcom
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/list.h>
  12. #include <net/dsa.h>
  13. #include <linux/ethtool.h>
  14. #include <linux/if_ether.h>
  15. #include <linux/in.h>
  16. #include <linux/bitmap.h>
  17. #include "bcm_sf2.h"
  18. #include "bcm_sf2_regs.h"
  19. struct cfp_udf_layout {
  20. u8 slices[UDF_NUM_SLICES];
  21. u32 mask_value;
  22. };
  23. /* UDF slices layout for a TCPv4/UDPv4 specification */
  24. static const struct cfp_udf_layout udf_tcpip4_layout = {
  25. .slices = {
  26. /* End of L2, byte offset 12, src IP[0:15] */
  27. CFG_UDF_EOL2 | 6,
  28. /* End of L2, byte offset 14, src IP[16:31] */
  29. CFG_UDF_EOL2 | 7,
  30. /* End of L2, byte offset 16, dst IP[0:15] */
  31. CFG_UDF_EOL2 | 8,
  32. /* End of L2, byte offset 18, dst IP[16:31] */
  33. CFG_UDF_EOL2 | 9,
  34. /* End of L3, byte offset 0, src port */
  35. CFG_UDF_EOL3 | 0,
  36. /* End of L3, byte offset 2, dst port */
  37. CFG_UDF_EOL3 | 1,
  38. 0, 0, 0
  39. },
  40. .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
  41. };
  42. static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout)
  43. {
  44. unsigned int i, count = 0;
  45. for (i = 0; i < UDF_NUM_SLICES; i++) {
  46. if (layout[i] != 0)
  47. count++;
  48. }
  49. return count;
  50. }
  51. static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv,
  52. unsigned int slice_num,
  53. const u8 *layout)
  54. {
  55. u32 offset = CORE_UDF_0_A_0_8_PORT_0 + slice_num * UDF_SLICE_OFFSET;
  56. unsigned int i;
  57. for (i = 0; i < UDF_NUM_SLICES; i++)
  58. core_writel(priv, layout[i], offset + i * 4);
  59. }
  60. static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op)
  61. {
  62. unsigned int timeout = 1000;
  63. u32 reg;
  64. reg = core_readl(priv, CORE_CFP_ACC);
  65. reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
  66. reg |= OP_STR_DONE | op;
  67. core_writel(priv, reg, CORE_CFP_ACC);
  68. do {
  69. reg = core_readl(priv, CORE_CFP_ACC);
  70. if (!(reg & OP_STR_DONE))
  71. break;
  72. cpu_relax();
  73. } while (timeout--);
  74. if (!timeout)
  75. return -ETIMEDOUT;
  76. return 0;
  77. }
  78. static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
  79. unsigned int addr)
  80. {
  81. u32 reg;
  82. WARN_ON(addr >= CFP_NUM_RULES);
  83. reg = core_readl(priv, CORE_CFP_ACC);
  84. reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
  85. reg |= addr << XCESS_ADDR_SHIFT;
  86. core_writel(priv, reg, CORE_CFP_ACC);
  87. }
  88. static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
  89. {
  90. /* Entry #0 is reserved */
  91. return CFP_NUM_RULES - 1;
  92. }
  93. static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
  94. struct ethtool_rx_flow_spec *fs)
  95. {
  96. struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
  97. struct ethtool_tcpip4_spec *v4_spec;
  98. const struct cfp_udf_layout *layout;
  99. unsigned int slice_num, rule_index;
  100. unsigned int queue_num, port_num;
  101. u8 ip_proto, ip_frag;
  102. u8 num_udf;
  103. u32 reg;
  104. int ret;
  105. /* Check for unsupported extensions */
  106. if ((fs->flow_type & FLOW_EXT) &&
  107. (fs->m_ext.vlan_etype || fs->m_ext.data[1]))
  108. return -EINVAL;
  109. if (fs->location != RX_CLS_LOC_ANY &&
  110. test_bit(fs->location, priv->cfp.used))
  111. return -EBUSY;
  112. if (fs->location != RX_CLS_LOC_ANY &&
  113. fs->location > bcm_sf2_cfp_rule_size(priv))
  114. return -EINVAL;
  115. ip_frag = be32_to_cpu(fs->m_ext.data[0]);
  116. /* We do not support discarding packets, check that the
  117. * destination port is enabled and that we are within the
  118. * number of ports supported by the switch
  119. */
  120. port_num = fs->ring_cookie / 8;
  121. if (fs->ring_cookie == RX_CLS_FLOW_DISC ||
  122. !(BIT(port_num) & ds->enabled_port_mask) ||
  123. port_num >= priv->hw_params.num_ports)
  124. return -EINVAL;
  125. switch (fs->flow_type & ~FLOW_EXT) {
  126. case TCP_V4_FLOW:
  127. ip_proto = IPPROTO_TCP;
  128. v4_spec = &fs->h_u.tcp_ip4_spec;
  129. break;
  130. case UDP_V4_FLOW:
  131. ip_proto = IPPROTO_UDP;
  132. v4_spec = &fs->h_u.udp_ip4_spec;
  133. break;
  134. default:
  135. return -EINVAL;
  136. }
  137. /* We only use one UDF slice for now */
  138. slice_num = 1;
  139. layout = &udf_tcpip4_layout;
  140. num_udf = bcm_sf2_get_num_udf_slices(layout->slices);
  141. /* Apply the UDF layout for this filter */
  142. bcm_sf2_cfp_udf_set(priv, slice_num, layout->slices);
  143. /* Apply to all packets received through this port */
  144. core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
  145. /* S-Tag status [31:30]
  146. * C-Tag status [29:28]
  147. * L2 framing [27:26]
  148. * L3 framing [25:24]
  149. * IP ToS [23:16]
  150. * IP proto [15:08]
  151. * IP Fragm [7]
  152. * Non 1st frag [6]
  153. * IP Authen [5]
  154. * TTL range [4:3]
  155. * PPPoE session [2]
  156. * Reserved [1]
  157. * UDF_Valid[8] [0]
  158. */
  159. core_writel(priv, v4_spec->tos << 16 | ip_proto << 8 | ip_frag << 7,
  160. CORE_CFP_DATA_PORT(6));
  161. /* UDF_Valid[7:0] [31:24]
  162. * S-Tag [23:8]
  163. * C-Tag [7:0]
  164. */
  165. core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_DATA_PORT(5));
  166. /* C-Tag [31:24]
  167. * UDF_n_A8 [23:8]
  168. * UDF_n_A7 [7:0]
  169. */
  170. core_writel(priv, 0, CORE_CFP_DATA_PORT(4));
  171. /* UDF_n_A7 [31:24]
  172. * UDF_n_A6 [23:8]
  173. * UDF_n_A5 [7:0]
  174. */
  175. core_writel(priv, be16_to_cpu(v4_spec->pdst) >> 8,
  176. CORE_CFP_DATA_PORT(3));
  177. /* UDF_n_A5 [31:24]
  178. * UDF_n_A4 [23:8]
  179. * UDF_n_A3 [7:0]
  180. */
  181. reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 |
  182. (u32)be16_to_cpu(v4_spec->psrc) << 8 |
  183. (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8;
  184. core_writel(priv, reg, CORE_CFP_DATA_PORT(2));
  185. /* UDF_n_A3 [31:24]
  186. * UDF_n_A2 [23:8]
  187. * UDF_n_A1 [7:0]
  188. */
  189. reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 |
  190. (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 |
  191. (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8;
  192. core_writel(priv, reg, CORE_CFP_DATA_PORT(1));
  193. /* UDF_n_A1 [31:24]
  194. * UDF_n_A0 [23:8]
  195. * Reserved [7:4]
  196. * Slice ID [3:2]
  197. * Slice valid [1:0]
  198. */
  199. reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 |
  200. (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 |
  201. SLICE_NUM(slice_num) | SLICE_VALID;
  202. core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
  203. /* Source port map match */
  204. core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
  205. /* Mask with the specific layout for IPv4 packets */
  206. core_writel(priv, layout->mask_value, CORE_CFP_MASK_PORT(6));
  207. /* Mask all but valid UDFs */
  208. core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_MASK_PORT(5));
  209. /* Mask all */
  210. core_writel(priv, 0, CORE_CFP_MASK_PORT(4));
  211. /* All other UDFs should be matched with the filter */
  212. core_writel(priv, 0xff, CORE_CFP_MASK_PORT(3));
  213. core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(2));
  214. core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(1));
  215. core_writel(priv, 0xffffff0f, CORE_CFP_MASK_PORT(0));
  216. /* Locate the first rule available */
  217. if (fs->location == RX_CLS_LOC_ANY)
  218. rule_index = find_first_zero_bit(priv->cfp.used,
  219. bcm_sf2_cfp_rule_size(priv));
  220. else
  221. rule_index = fs->location;
  222. /* Insert into TCAM now */
  223. bcm_sf2_cfp_rule_addr_set(priv, rule_index);
  224. ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
  225. if (ret) {
  226. pr_err("TCAM entry at addr %d failed\n", rule_index);
  227. return ret;
  228. }
  229. /* Replace ARL derived destination with DST_MAP derived, define
  230. * which port and queue this should be forwarded to.
  231. *
  232. * We have a small oddity where Port 6 just does not have a
  233. * valid bit here (so we subtract by one).
  234. */
  235. queue_num = fs->ring_cookie % 8;
  236. if (port_num >= 7)
  237. port_num -= 1;
  238. reg = CHANGE_FWRD_MAP_IB_REP_ARL | BIT(port_num + DST_MAP_IB_SHIFT) |
  239. CHANGE_TC | queue_num << NEW_TC_SHIFT;
  240. core_writel(priv, reg, CORE_ACT_POL_DATA0);
  241. /* Set classification ID that needs to be put in Broadcom tag */
  242. core_writel(priv, rule_index << CHAIN_ID_SHIFT,
  243. CORE_ACT_POL_DATA1);
  244. core_writel(priv, 0, CORE_ACT_POL_DATA2);
  245. /* Configure policer RAM now */
  246. ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM);
  247. if (ret) {
  248. pr_err("Policer entry at %d failed\n", rule_index);
  249. return ret;
  250. }
  251. /* Disable the policer */
  252. core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0);
  253. /* Now the rate meter */
  254. ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM);
  255. if (ret) {
  256. pr_err("Meter entry at %d failed\n", rule_index);
  257. return ret;
  258. }
  259. /* Turn on CFP for this rule now */
  260. reg = core_readl(priv, CORE_CFP_CTL_REG);
  261. reg |= BIT(port);
  262. core_writel(priv, reg, CORE_CFP_CTL_REG);
  263. /* Flag the rule as being used and return it */
  264. set_bit(rule_index, priv->cfp.used);
  265. fs->location = rule_index;
  266. return 0;
  267. }
  268. static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
  269. u32 loc)
  270. {
  271. int ret;
  272. u32 reg;
  273. /* Refuse deletion of unused rules, and the default reserved rule */
  274. if (!test_bit(loc, priv->cfp.used) || loc == 0)
  275. return -EINVAL;
  276. /* Indicate which rule we want to read */
  277. bcm_sf2_cfp_rule_addr_set(priv, loc);
  278. ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
  279. if (ret)
  280. return ret;
  281. /* Clear its valid bits */
  282. reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
  283. reg &= ~SLICE_VALID;
  284. core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
  285. /* Write back this entry into the TCAM now */
  286. ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
  287. if (ret)
  288. return ret;
  289. clear_bit(loc, priv->cfp.used);
  290. return 0;
  291. }
  292. static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
  293. {
  294. unsigned int i;
  295. for (i = 0; i < sizeof(flow->m_u); i++)
  296. flow->m_u.hdata[i] ^= 0xff;
  297. flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
  298. flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
  299. flow->m_ext.data[0] ^= cpu_to_be32(~0);
  300. flow->m_ext.data[1] ^= cpu_to_be32(~0);
  301. }
  302. static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
  303. struct ethtool_rxnfc *nfc, bool search)
  304. {
  305. struct ethtool_tcpip4_spec *v4_spec;
  306. unsigned int queue_num;
  307. u16 src_dst_port;
  308. u32 reg, ipv4;
  309. int ret;
  310. if (!search) {
  311. bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location);
  312. ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM);
  313. if (ret)
  314. return ret;
  315. reg = core_readl(priv, CORE_ACT_POL_DATA0);
  316. ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
  317. if (ret)
  318. return ret;
  319. } else {
  320. reg = core_readl(priv, CORE_ACT_POL_DATA0);
  321. }
  322. /* Extract the destination port */
  323. nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) &
  324. DST_MAP_IB_MASK) - 1;
  325. /* There is no Port 6, so we compensate for that here */
  326. if (nfc->fs.ring_cookie >= 6)
  327. nfc->fs.ring_cookie++;
  328. nfc->fs.ring_cookie *= 8;
  329. /* Extract the destination queue */
  330. queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK;
  331. nfc->fs.ring_cookie += queue_num;
  332. /* Extract the IP protocol */
  333. reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
  334. switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
  335. case IPPROTO_TCP:
  336. nfc->fs.flow_type = TCP_V4_FLOW;
  337. v4_spec = &nfc->fs.h_u.tcp_ip4_spec;
  338. break;
  339. case IPPROTO_UDP:
  340. nfc->fs.flow_type = UDP_V4_FLOW;
  341. v4_spec = &nfc->fs.h_u.udp_ip4_spec;
  342. break;
  343. default:
  344. /* Clear to exit the search process */
  345. if (search)
  346. core_readl(priv, CORE_CFP_DATA_PORT(7));
  347. return -EINVAL;
  348. }
  349. v4_spec->tos = (reg >> 16) & IPPROTO_MASK;
  350. nfc->fs.m_ext.data[0] = cpu_to_be32((reg >> 7) & 1);
  351. reg = core_readl(priv, CORE_CFP_DATA_PORT(3));
  352. /* src port [15:8] */
  353. src_dst_port = reg << 8;
  354. reg = core_readl(priv, CORE_CFP_DATA_PORT(2));
  355. /* src port [7:0] */
  356. src_dst_port |= (reg >> 24);
  357. v4_spec->pdst = cpu_to_be16(src_dst_port);
  358. nfc->fs.m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
  359. v4_spec->psrc = cpu_to_be16((u16)(reg >> 8));
  360. nfc->fs.m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
  361. /* IPv4 dst [15:8] */
  362. ipv4 = (reg & 0xff) << 8;
  363. reg = core_readl(priv, CORE_CFP_DATA_PORT(1));
  364. /* IPv4 dst [31:16] */
  365. ipv4 |= ((reg >> 8) & 0xffff) << 16;
  366. /* IPv4 dst [7:0] */
  367. ipv4 |= (reg >> 24) & 0xff;
  368. v4_spec->ip4dst = cpu_to_be32(ipv4);
  369. nfc->fs.m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
  370. /* IPv4 src [15:8] */
  371. ipv4 = (reg & 0xff) << 8;
  372. reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
  373. if (!(reg & SLICE_VALID))
  374. return -EINVAL;
  375. /* IPv4 src [7:0] */
  376. ipv4 |= (reg >> 24) & 0xff;
  377. /* IPv4 src [31:16] */
  378. ipv4 |= ((reg >> 8) & 0xffff) << 16;
  379. v4_spec->ip4src = cpu_to_be32(ipv4);
  380. nfc->fs.m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
  381. /* Read last to avoid next entry clobbering the results during search
  382. * operations
  383. */
  384. reg = core_readl(priv, CORE_CFP_DATA_PORT(7));
  385. if (!(reg & 1 << port))
  386. return -EINVAL;
  387. bcm_sf2_invert_masks(&nfc->fs);
  388. /* Put the TCAM size here */
  389. nfc->data = bcm_sf2_cfp_rule_size(priv);
  390. return 0;
  391. }
  392. /* We implement the search doing a TCAM search operation */
  393. static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
  394. int port, struct ethtool_rxnfc *nfc,
  395. u32 *rule_locs)
  396. {
  397. unsigned int index = 1, rules_cnt = 0;
  398. int ret;
  399. u32 reg;
  400. /* Do not poll on OP_STR_DONE to be self-clearing for search
  401. * operations, we cannot use bcm_sf2_cfp_op here because it completes
  402. * on clearing OP_STR_DONE which won't clear until the entire search
  403. * operation is over.
  404. */
  405. reg = core_readl(priv, CORE_CFP_ACC);
  406. reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
  407. reg |= index << XCESS_ADDR_SHIFT;
  408. reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
  409. reg |= OP_SEL_SEARCH | TCAM_SEL | OP_STR_DONE;
  410. core_writel(priv, reg, CORE_CFP_ACC);
  411. do {
  412. /* Wait for results to be ready */
  413. reg = core_readl(priv, CORE_CFP_ACC);
  414. /* Extract the address we are searching */
  415. index = reg >> XCESS_ADDR_SHIFT;
  416. index &= XCESS_ADDR_MASK;
  417. /* We have a valid search result, so flag it accordingly */
  418. if (reg & SEARCH_STS) {
  419. ret = bcm_sf2_cfp_rule_get(priv, port, nfc, true);
  420. if (ret)
  421. continue;
  422. rule_locs[rules_cnt] = index;
  423. rules_cnt++;
  424. }
  425. /* Search is over break out */
  426. if (!(reg & OP_STR_DONE))
  427. break;
  428. } while (index < CFP_NUM_RULES);
  429. /* Put the TCAM size here */
  430. nfc->data = bcm_sf2_cfp_rule_size(priv);
  431. nfc->rule_cnt = rules_cnt;
  432. return 0;
  433. }
  434. int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
  435. struct ethtool_rxnfc *nfc, u32 *rule_locs)
  436. {
  437. struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
  438. int ret = 0;
  439. mutex_lock(&priv->cfp.lock);
  440. switch (nfc->cmd) {
  441. case ETHTOOL_GRXCLSRLCNT:
  442. /* Subtract the default, unusable rule */
  443. nfc->rule_cnt = bitmap_weight(priv->cfp.used,
  444. CFP_NUM_RULES) - 1;
  445. /* We support specifying rule locations */
  446. nfc->data |= RX_CLS_LOC_SPECIAL;
  447. break;
  448. case ETHTOOL_GRXCLSRULE:
  449. ret = bcm_sf2_cfp_rule_get(priv, port, nfc, false);
  450. break;
  451. case ETHTOOL_GRXCLSRLALL:
  452. ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs);
  453. break;
  454. default:
  455. ret = -EOPNOTSUPP;
  456. break;
  457. }
  458. mutex_unlock(&priv->cfp.lock);
  459. return ret;
  460. }
  461. int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
  462. struct ethtool_rxnfc *nfc)
  463. {
  464. struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
  465. int ret = 0;
  466. mutex_lock(&priv->cfp.lock);
  467. switch (nfc->cmd) {
  468. case ETHTOOL_SRXCLSRLINS:
  469. ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs);
  470. break;
  471. case ETHTOOL_SRXCLSRLDEL:
  472. ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
  473. break;
  474. default:
  475. ret = -EOPNOTSUPP;
  476. break;
  477. }
  478. mutex_unlock(&priv->cfp.lock);
  479. return ret;
  480. }
  481. int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
  482. {
  483. unsigned int timeout = 1000;
  484. u32 reg;
  485. reg = core_readl(priv, CORE_CFP_ACC);
  486. reg |= TCAM_RESET;
  487. core_writel(priv, reg, CORE_CFP_ACC);
  488. do {
  489. reg = core_readl(priv, CORE_CFP_ACC);
  490. if (!(reg & TCAM_RESET))
  491. break;
  492. cpu_relax();
  493. } while (timeout--);
  494. if (!timeout)
  495. return -ETIMEDOUT;
  496. return 0;
  497. }