xgene_enet_cle.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834
  1. /* Applied Micro X-Gene SoC Ethernet Classifier structures
  2. *
  3. * Copyright (c) 2016, Applied Micro Circuits Corporation
  4. * Authors: Khuong Dinh <kdinh@apm.com>
  5. * Tanmay Inamdar <tinamdar@apm.com>
  6. * Iyappan Subramanian <isubramanian@apm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include "xgene_enet_main.h"
  22. /* interfaces to convert structures to HW recognized bit formats */
  23. static void xgene_cle_sband_to_hw(u8 frag, enum xgene_cle_prot_version ver,
  24. enum xgene_cle_prot_type type, u32 len,
  25. u32 *reg)
  26. {
  27. *reg = SET_VAL(SB_IPFRAG, frag) |
  28. SET_VAL(SB_IPPROT, type) |
  29. SET_VAL(SB_IPVER, ver) |
  30. SET_VAL(SB_HDRLEN, len);
  31. }
  32. static void xgene_cle_idt_to_hw(struct xgene_enet_pdata *pdata,
  33. u32 dstqid, u32 fpsel,
  34. u32 nfpsel, u32 *idt_reg)
  35. {
  36. if (pdata->enet_id == XGENE_ENET1) {
  37. *idt_reg = SET_VAL(IDT_DSTQID, dstqid) |
  38. SET_VAL(IDT_FPSEL1, fpsel) |
  39. SET_VAL(IDT_NFPSEL1, nfpsel);
  40. } else {
  41. *idt_reg = SET_VAL(IDT_DSTQID, dstqid) |
  42. SET_VAL(IDT_FPSEL, fpsel) |
  43. SET_VAL(IDT_NFPSEL, nfpsel);
  44. }
  45. }
  46. static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata,
  47. struct xgene_cle_dbptr *dbptr, u32 *buf)
  48. {
  49. buf[0] = SET_VAL(CLE_DROP, dbptr->drop);
  50. buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) |
  51. SET_VAL(CLE_NFPSEL, dbptr->nxtfpsel) |
  52. SET_VAL(CLE_DSTQIDL, dbptr->dstqid);
  53. buf[5] = SET_VAL(CLE_DSTQIDH, (u32)dbptr->dstqid >> CLE_DSTQIDL_LEN) |
  54. SET_VAL(CLE_PRIORITY, dbptr->cle_priority);
  55. }
  56. static void xgene_cle_kn_to_hw(struct xgene_cle_ptree_kn *kn, u32 *buf)
  57. {
  58. u32 i, j = 0;
  59. u32 data;
  60. buf[j++] = SET_VAL(CLE_TYPE, kn->node_type);
  61. for (i = 0; i < kn->num_keys; i++) {
  62. struct xgene_cle_ptree_key *key = &kn->key[i];
  63. if (!(i % 2)) {
  64. buf[j] = SET_VAL(CLE_KN_PRIO, key->priority) |
  65. SET_VAL(CLE_KN_RPTR, key->result_pointer);
  66. } else {
  67. data = SET_VAL(CLE_KN_PRIO, key->priority) |
  68. SET_VAL(CLE_KN_RPTR, key->result_pointer);
  69. buf[j++] |= (data << 16);
  70. }
  71. }
  72. }
  73. static void xgene_cle_dn_to_hw(const struct xgene_cle_ptree_ewdn *dn,
  74. u32 *buf, u32 jb)
  75. {
  76. const struct xgene_cle_ptree_branch *br;
  77. u32 i, j = 0;
  78. u32 npp;
  79. buf[j++] = SET_VAL(CLE_DN_TYPE, dn->node_type) |
  80. SET_VAL(CLE_DN_LASTN, dn->last_node) |
  81. SET_VAL(CLE_DN_HLS, dn->hdr_len_store) |
  82. SET_VAL(CLE_DN_EXT, dn->hdr_extn) |
  83. SET_VAL(CLE_DN_BSTOR, dn->byte_store) |
  84. SET_VAL(CLE_DN_SBSTOR, dn->search_byte_store) |
  85. SET_VAL(CLE_DN_RPTR, dn->result_pointer);
  86. for (i = 0; i < dn->num_branches; i++) {
  87. br = &dn->branch[i];
  88. npp = br->next_packet_pointer;
  89. if ((br->jump_rel == JMP_ABS) && (npp < CLE_PKTRAM_SIZE))
  90. npp += jb;
  91. buf[j++] = SET_VAL(CLE_BR_VALID, br->valid) |
  92. SET_VAL(CLE_BR_NPPTR, npp) |
  93. SET_VAL(CLE_BR_JB, br->jump_bw) |
  94. SET_VAL(CLE_BR_JR, br->jump_rel) |
  95. SET_VAL(CLE_BR_OP, br->operation) |
  96. SET_VAL(CLE_BR_NNODE, br->next_node) |
  97. SET_VAL(CLE_BR_NBR, br->next_branch);
  98. buf[j++] = SET_VAL(CLE_BR_DATA, br->data) |
  99. SET_VAL(CLE_BR_MASK, br->mask);
  100. }
  101. }
  102. static int xgene_cle_poll_cmd_done(void __iomem *base,
  103. enum xgene_cle_cmd_type cmd)
  104. {
  105. u32 status, loop = 10;
  106. int ret = -EBUSY;
  107. while (loop--) {
  108. status = ioread32(base + INDCMD_STATUS);
  109. if (status & cmd) {
  110. ret = 0;
  111. break;
  112. }
  113. usleep_range(1000, 2000);
  114. }
  115. return ret;
  116. }
  117. static int xgene_cle_dram_wr(struct xgene_enet_cle *cle, u32 *data, u8 nregs,
  118. u32 index, enum xgene_cle_dram_type type,
  119. enum xgene_cle_cmd_type cmd)
  120. {
  121. enum xgene_cle_parser parser = cle->active_parser;
  122. void __iomem *base = cle->base;
  123. u32 i, j, ind_addr;
  124. u8 port, nparsers;
  125. int ret = 0;
  126. /* PTREE_RAM onwards, DRAM regions are common for all parsers */
  127. nparsers = (type >= PTREE_RAM) ? 1 : cle->parsers;
  128. for (i = 0; i < nparsers; i++) {
  129. port = i;
  130. if ((type < PTREE_RAM) && (parser != PARSER_ALL))
  131. port = parser;
  132. ind_addr = XGENE_CLE_DRAM(type + (port * 4)) | index;
  133. iowrite32(ind_addr, base + INDADDR);
  134. for (j = 0; j < nregs; j++)
  135. iowrite32(data[j], base + DATA_RAM0 + (j * 4));
  136. iowrite32(cmd, base + INDCMD);
  137. ret = xgene_cle_poll_cmd_done(base, cmd);
  138. if (ret)
  139. break;
  140. }
  141. return ret;
  142. }
  143. static void xgene_cle_enable_ptree(struct xgene_enet_pdata *pdata,
  144. struct xgene_enet_cle *cle)
  145. {
  146. struct xgene_cle_ptree *ptree = &cle->ptree;
  147. void __iomem *addr, *base = cle->base;
  148. u32 offset = CLE_PORT_OFFSET;
  149. u32 i;
  150. /* 1G port has to advance 4 bytes and 10G has to advance 8 bytes */
  151. ptree->start_pkt += cle->jump_bytes;
  152. for (i = 0; i < cle->parsers; i++) {
  153. if (cle->active_parser != PARSER_ALL)
  154. addr = base + cle->active_parser * offset;
  155. else
  156. addr = base + (i * offset);
  157. iowrite32(ptree->start_node & 0x3fff, addr + SNPTR0);
  158. iowrite32(ptree->start_pkt & 0x1ff, addr + SPPTR0);
  159. }
  160. }
  161. static int xgene_cle_setup_dbptr(struct xgene_enet_pdata *pdata,
  162. struct xgene_enet_cle *cle)
  163. {
  164. struct xgene_cle_ptree *ptree = &cle->ptree;
  165. u32 buf[CLE_DRAM_REGS];
  166. u32 i;
  167. int ret;
  168. memset(buf, 0, sizeof(buf));
  169. for (i = 0; i < ptree->num_dbptr; i++) {
  170. xgene_cle_dbptr_to_hw(pdata, &ptree->dbptr[i], buf);
  171. ret = xgene_cle_dram_wr(cle, buf, 6, i + ptree->start_dbptr,
  172. DB_RAM, CLE_CMD_WR);
  173. if (ret)
  174. return ret;
  175. }
  176. return 0;
  177. }
  178. static const struct xgene_cle_ptree_ewdn xgene_init_ptree_dn[] = {
  179. {
  180. /* PKT_TYPE_NODE */
  181. .node_type = EWDN,
  182. .last_node = 0,
  183. .hdr_len_store = 1,
  184. .hdr_extn = NO_BYTE,
  185. .byte_store = NO_BYTE,
  186. .search_byte_store = NO_BYTE,
  187. .result_pointer = DB_RES_DROP,
  188. .num_branches = 2,
  189. .branch = {
  190. {
  191. /* IPV4 */
  192. .valid = 1,
  193. .next_packet_pointer = 22,
  194. .jump_bw = JMP_FW,
  195. .jump_rel = JMP_ABS,
  196. .operation = EQT,
  197. .next_node = PKT_PROT_NODE,
  198. .next_branch = 0,
  199. .data = 0x8,
  200. .mask = 0x0
  201. },
  202. {
  203. .valid = 0,
  204. .next_packet_pointer = 262,
  205. .jump_bw = JMP_FW,
  206. .jump_rel = JMP_ABS,
  207. .operation = EQT,
  208. .next_node = LAST_NODE,
  209. .next_branch = 0,
  210. .data = 0x0,
  211. .mask = 0xffff
  212. }
  213. },
  214. },
  215. {
  216. /* PKT_PROT_NODE */
  217. .node_type = EWDN,
  218. .last_node = 0,
  219. .hdr_len_store = 1,
  220. .hdr_extn = NO_BYTE,
  221. .byte_store = NO_BYTE,
  222. .search_byte_store = NO_BYTE,
  223. .result_pointer = DB_RES_DROP,
  224. .num_branches = 3,
  225. .branch = {
  226. {
  227. /* TCP */
  228. .valid = 1,
  229. .next_packet_pointer = 26,
  230. .jump_bw = JMP_FW,
  231. .jump_rel = JMP_ABS,
  232. .operation = EQT,
  233. .next_node = RSS_IPV4_TCP_NODE,
  234. .next_branch = 0,
  235. .data = 0x0600,
  236. .mask = 0x00ff
  237. },
  238. {
  239. /* UDP */
  240. .valid = 1,
  241. .next_packet_pointer = 26,
  242. .jump_bw = JMP_FW,
  243. .jump_rel = JMP_ABS,
  244. .operation = EQT,
  245. .next_node = RSS_IPV4_UDP_NODE,
  246. .next_branch = 0,
  247. .data = 0x1100,
  248. .mask = 0x00ff
  249. },
  250. {
  251. .valid = 0,
  252. .next_packet_pointer = 26,
  253. .jump_bw = JMP_FW,
  254. .jump_rel = JMP_ABS,
  255. .operation = EQT,
  256. .next_node = RSS_IPV4_OTHERS_NODE,
  257. .next_branch = 0,
  258. .data = 0x0,
  259. .mask = 0xffff
  260. }
  261. }
  262. },
  263. {
  264. /* RSS_IPV4_TCP_NODE */
  265. .node_type = EWDN,
  266. .last_node = 0,
  267. .hdr_len_store = 1,
  268. .hdr_extn = NO_BYTE,
  269. .byte_store = NO_BYTE,
  270. .search_byte_store = BOTH_BYTES,
  271. .result_pointer = DB_RES_DROP,
  272. .num_branches = 6,
  273. .branch = {
  274. {
  275. /* SRC IPV4 B01 */
  276. .valid = 0,
  277. .next_packet_pointer = 28,
  278. .jump_bw = JMP_FW,
  279. .jump_rel = JMP_ABS,
  280. .operation = EQT,
  281. .next_node = RSS_IPV4_TCP_NODE,
  282. .next_branch = 1,
  283. .data = 0x0,
  284. .mask = 0xffff
  285. },
  286. {
  287. /* SRC IPV4 B23 */
  288. .valid = 0,
  289. .next_packet_pointer = 30,
  290. .jump_bw = JMP_FW,
  291. .jump_rel = JMP_ABS,
  292. .operation = EQT,
  293. .next_node = RSS_IPV4_TCP_NODE,
  294. .next_branch = 2,
  295. .data = 0x0,
  296. .mask = 0xffff
  297. },
  298. {
  299. /* DST IPV4 B01 */
  300. .valid = 0,
  301. .next_packet_pointer = 32,
  302. .jump_bw = JMP_FW,
  303. .jump_rel = JMP_ABS,
  304. .operation = EQT,
  305. .next_node = RSS_IPV4_TCP_NODE,
  306. .next_branch = 3,
  307. .data = 0x0,
  308. .mask = 0xffff
  309. },
  310. {
  311. /* DST IPV4 B23 */
  312. .valid = 0,
  313. .next_packet_pointer = 34,
  314. .jump_bw = JMP_FW,
  315. .jump_rel = JMP_ABS,
  316. .operation = EQT,
  317. .next_node = RSS_IPV4_TCP_NODE,
  318. .next_branch = 4,
  319. .data = 0x0,
  320. .mask = 0xffff
  321. },
  322. {
  323. /* TCP SRC Port */
  324. .valid = 0,
  325. .next_packet_pointer = 36,
  326. .jump_bw = JMP_FW,
  327. .jump_rel = JMP_ABS,
  328. .operation = EQT,
  329. .next_node = RSS_IPV4_TCP_NODE,
  330. .next_branch = 5,
  331. .data = 0x0,
  332. .mask = 0xffff
  333. },
  334. {
  335. /* TCP DST Port */
  336. .valid = 0,
  337. .next_packet_pointer = 256,
  338. .jump_bw = JMP_FW,
  339. .jump_rel = JMP_ABS,
  340. .operation = EQT,
  341. .next_node = LAST_NODE,
  342. .next_branch = 0,
  343. .data = 0x0,
  344. .mask = 0xffff
  345. }
  346. }
  347. },
  348. {
  349. /* RSS_IPV4_UDP_NODE */
  350. .node_type = EWDN,
  351. .last_node = 0,
  352. .hdr_len_store = 1,
  353. .hdr_extn = NO_BYTE,
  354. .byte_store = NO_BYTE,
  355. .search_byte_store = BOTH_BYTES,
  356. .result_pointer = DB_RES_DROP,
  357. .num_branches = 6,
  358. .branch = {
  359. {
  360. /* SRC IPV4 B01 */
  361. .valid = 0,
  362. .next_packet_pointer = 28,
  363. .jump_bw = JMP_FW,
  364. .jump_rel = JMP_ABS,
  365. .operation = EQT,
  366. .next_node = RSS_IPV4_UDP_NODE,
  367. .next_branch = 1,
  368. .data = 0x0,
  369. .mask = 0xffff
  370. },
  371. {
  372. /* SRC IPV4 B23 */
  373. .valid = 0,
  374. .next_packet_pointer = 30,
  375. .jump_bw = JMP_FW,
  376. .jump_rel = JMP_ABS,
  377. .operation = EQT,
  378. .next_node = RSS_IPV4_UDP_NODE,
  379. .next_branch = 2,
  380. .data = 0x0,
  381. .mask = 0xffff
  382. },
  383. {
  384. /* DST IPV4 B01 */
  385. .valid = 0,
  386. .next_packet_pointer = 32,
  387. .jump_bw = JMP_FW,
  388. .jump_rel = JMP_ABS,
  389. .operation = EQT,
  390. .next_node = RSS_IPV4_UDP_NODE,
  391. .next_branch = 3,
  392. .data = 0x0,
  393. .mask = 0xffff
  394. },
  395. {
  396. /* DST IPV4 B23 */
  397. .valid = 0,
  398. .next_packet_pointer = 34,
  399. .jump_bw = JMP_FW,
  400. .jump_rel = JMP_ABS,
  401. .operation = EQT,
  402. .next_node = RSS_IPV4_UDP_NODE,
  403. .next_branch = 4,
  404. .data = 0x0,
  405. .mask = 0xffff
  406. },
  407. {
  408. /* TCP SRC Port */
  409. .valid = 0,
  410. .next_packet_pointer = 36,
  411. .jump_bw = JMP_FW,
  412. .jump_rel = JMP_ABS,
  413. .operation = EQT,
  414. .next_node = RSS_IPV4_UDP_NODE,
  415. .next_branch = 5,
  416. .data = 0x0,
  417. .mask = 0xffff
  418. },
  419. {
  420. /* TCP DST Port */
  421. .valid = 0,
  422. .next_packet_pointer = 258,
  423. .jump_bw = JMP_FW,
  424. .jump_rel = JMP_ABS,
  425. .operation = EQT,
  426. .next_node = LAST_NODE,
  427. .next_branch = 0,
  428. .data = 0x0,
  429. .mask = 0xffff
  430. }
  431. }
  432. },
  433. {
  434. /* RSS_IPV4_OTHERS_NODE */
  435. .node_type = EWDN,
  436. .last_node = 0,
  437. .hdr_len_store = 1,
  438. .hdr_extn = NO_BYTE,
  439. .byte_store = NO_BYTE,
  440. .search_byte_store = BOTH_BYTES,
  441. .result_pointer = DB_RES_DROP,
  442. .num_branches = 6,
  443. .branch = {
  444. {
  445. /* SRC IPV4 B01 */
  446. .valid = 0,
  447. .next_packet_pointer = 28,
  448. .jump_bw = JMP_FW,
  449. .jump_rel = JMP_ABS,
  450. .operation = EQT,
  451. .next_node = RSS_IPV4_OTHERS_NODE,
  452. .next_branch = 1,
  453. .data = 0x0,
  454. .mask = 0xffff
  455. },
  456. {
  457. /* SRC IPV4 B23 */
  458. .valid = 0,
  459. .next_packet_pointer = 30,
  460. .jump_bw = JMP_FW,
  461. .jump_rel = JMP_ABS,
  462. .operation = EQT,
  463. .next_node = RSS_IPV4_OTHERS_NODE,
  464. .next_branch = 2,
  465. .data = 0x0,
  466. .mask = 0xffff
  467. },
  468. {
  469. /* DST IPV4 B01 */
  470. .valid = 0,
  471. .next_packet_pointer = 32,
  472. .jump_bw = JMP_FW,
  473. .jump_rel = JMP_ABS,
  474. .operation = EQT,
  475. .next_node = RSS_IPV4_OTHERS_NODE,
  476. .next_branch = 3,
  477. .data = 0x0,
  478. .mask = 0xffff
  479. },
  480. {
  481. /* DST IPV4 B23 */
  482. .valid = 0,
  483. .next_packet_pointer = 34,
  484. .jump_bw = JMP_FW,
  485. .jump_rel = JMP_ABS,
  486. .operation = EQT,
  487. .next_node = RSS_IPV4_OTHERS_NODE,
  488. .next_branch = 4,
  489. .data = 0x0,
  490. .mask = 0xffff
  491. },
  492. {
  493. /* TCP SRC Port */
  494. .valid = 0,
  495. .next_packet_pointer = 36,
  496. .jump_bw = JMP_FW,
  497. .jump_rel = JMP_ABS,
  498. .operation = EQT,
  499. .next_node = RSS_IPV4_OTHERS_NODE,
  500. .next_branch = 5,
  501. .data = 0x0,
  502. .mask = 0xffff
  503. },
  504. {
  505. /* TCP DST Port */
  506. .valid = 0,
  507. .next_packet_pointer = 260,
  508. .jump_bw = JMP_FW,
  509. .jump_rel = JMP_ABS,
  510. .operation = EQT,
  511. .next_node = LAST_NODE,
  512. .next_branch = 0,
  513. .data = 0x0,
  514. .mask = 0xffff
  515. }
  516. }
  517. },
  518. {
  519. /* LAST NODE */
  520. .node_type = EWDN,
  521. .last_node = 1,
  522. .hdr_len_store = 1,
  523. .hdr_extn = NO_BYTE,
  524. .byte_store = NO_BYTE,
  525. .search_byte_store = NO_BYTE,
  526. .result_pointer = DB_RES_DROP,
  527. .num_branches = 1,
  528. .branch = {
  529. {
  530. .valid = 0,
  531. .next_packet_pointer = 0,
  532. .jump_bw = JMP_FW,
  533. .jump_rel = JMP_ABS,
  534. .operation = EQT,
  535. .next_node = MAX_NODES,
  536. .next_branch = 0,
  537. .data = 0,
  538. .mask = 0xffff
  539. }
  540. }
  541. }
  542. };
  543. static int xgene_cle_setup_node(struct xgene_enet_pdata *pdata,
  544. struct xgene_enet_cle *cle)
  545. {
  546. struct xgene_cle_ptree *ptree = &cle->ptree;
  547. const struct xgene_cle_ptree_ewdn *dn = xgene_init_ptree_dn;
  548. int num_dn = ARRAY_SIZE(xgene_init_ptree_dn);
  549. struct xgene_cle_ptree_kn *kn = ptree->kn;
  550. u32 buf[CLE_DRAM_REGS];
  551. int i, j, ret;
  552. memset(buf, 0, sizeof(buf));
  553. for (i = 0; i < num_dn; i++) {
  554. xgene_cle_dn_to_hw(&dn[i], buf, cle->jump_bytes);
  555. ret = xgene_cle_dram_wr(cle, buf, 17, i + ptree->start_node,
  556. PTREE_RAM, CLE_CMD_WR);
  557. if (ret)
  558. return ret;
  559. }
  560. /* continue node index for key node */
  561. memset(buf, 0, sizeof(buf));
  562. for (j = i; j < (ptree->num_kn + num_dn); j++) {
  563. xgene_cle_kn_to_hw(&kn[j - num_dn], buf);
  564. ret = xgene_cle_dram_wr(cle, buf, 17, j + ptree->start_node,
  565. PTREE_RAM, CLE_CMD_WR);
  566. if (ret)
  567. return ret;
  568. }
  569. return 0;
  570. }
  571. static int xgene_cle_setup_ptree(struct xgene_enet_pdata *pdata,
  572. struct xgene_enet_cle *cle)
  573. {
  574. int ret;
  575. ret = xgene_cle_setup_node(pdata, cle);
  576. if (ret)
  577. return ret;
  578. ret = xgene_cle_setup_dbptr(pdata, cle);
  579. if (ret)
  580. return ret;
  581. xgene_cle_enable_ptree(pdata, cle);
  582. return 0;
  583. }
  584. static void xgene_cle_setup_def_dbptr(struct xgene_enet_pdata *pdata,
  585. struct xgene_enet_cle *enet_cle,
  586. struct xgene_cle_dbptr *dbptr,
  587. u32 index, u8 priority)
  588. {
  589. void __iomem *base = enet_cle->base;
  590. void __iomem *base_addr;
  591. u32 buf[CLE_DRAM_REGS];
  592. u32 def_cls, offset;
  593. u32 i, j;
  594. memset(buf, 0, sizeof(buf));
  595. xgene_cle_dbptr_to_hw(pdata, dbptr, buf);
  596. for (i = 0; i < enet_cle->parsers; i++) {
  597. if (enet_cle->active_parser != PARSER_ALL) {
  598. offset = enet_cle->active_parser *
  599. CLE_PORT_OFFSET;
  600. } else {
  601. offset = i * CLE_PORT_OFFSET;
  602. }
  603. base_addr = base + DFCLSRESDB00 + offset;
  604. for (j = 0; j < 6; j++)
  605. iowrite32(buf[j], base_addr + (j * 4));
  606. def_cls = ((priority & 0x7) << 10) | (index & 0x3ff);
  607. iowrite32(def_cls, base + DFCLSRESDBPTR0 + offset);
  608. }
  609. }
  610. static int xgene_cle_set_rss_sband(struct xgene_enet_cle *cle)
  611. {
  612. u32 idx = CLE_PKTRAM_SIZE / sizeof(u32);
  613. u32 mac_hdr_len = ETH_HLEN;
  614. u32 sband, reg = 0;
  615. u32 ipv4_ihl = 5;
  616. u32 hdr_len;
  617. int ret;
  618. /* Sideband: IPV4/TCP packets */
  619. hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
  620. xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_TCP, hdr_len, &reg);
  621. sband = reg;
  622. /* Sideband: IPv4/UDP packets */
  623. hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
  624. xgene_cle_sband_to_hw(1, XGENE_CLE_IPV4, XGENE_CLE_UDP, hdr_len, &reg);
  625. sband |= (reg << 16);
  626. ret = xgene_cle_dram_wr(cle, &sband, 1, idx, PKT_RAM, CLE_CMD_WR);
  627. if (ret)
  628. return ret;
  629. /* Sideband: IPv4/RAW packets */
  630. hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
  631. xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER,
  632. hdr_len, &reg);
  633. sband = reg;
  634. /* Sideband: Ethernet II/RAW packets */
  635. hdr_len = (mac_hdr_len << 5);
  636. xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER,
  637. hdr_len, &reg);
  638. sband |= (reg << 16);
  639. ret = xgene_cle_dram_wr(cle, &sband, 1, idx + 1, PKT_RAM, CLE_CMD_WR);
  640. if (ret)
  641. return ret;
  642. return 0;
  643. }
  644. static int xgene_cle_set_rss_skeys(struct xgene_enet_cle *cle)
  645. {
  646. u32 secret_key_ipv4[4]; /* 16 Bytes*/
  647. int ret = 0;
  648. get_random_bytes(secret_key_ipv4, 16);
  649. ret = xgene_cle_dram_wr(cle, secret_key_ipv4, 4, 0,
  650. RSS_IPV4_HASH_SKEY, CLE_CMD_WR);
  651. return ret;
  652. }
  653. static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata)
  654. {
  655. u32 fpsel, dstqid, nfpsel, idt_reg, idx;
  656. int i, ret = 0;
  657. u16 pool_id;
  658. for (i = 0; i < XGENE_CLE_IDT_ENTRIES; i++) {
  659. idx = i % pdata->rxq_cnt;
  660. pool_id = pdata->rx_ring[idx]->buf_pool->id;
  661. fpsel = xgene_enet_get_fpsel(pool_id);
  662. dstqid = xgene_enet_dst_ring_num(pdata->rx_ring[idx]);
  663. nfpsel = 0;
  664. if (pdata->rx_ring[idx]->page_pool) {
  665. pool_id = pdata->rx_ring[idx]->page_pool->id;
  666. nfpsel = xgene_enet_get_fpsel(pool_id);
  667. }
  668. idt_reg = 0;
  669. xgene_cle_idt_to_hw(pdata, dstqid, fpsel, nfpsel, &idt_reg);
  670. ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i,
  671. RSS_IDT, CLE_CMD_WR);
  672. if (ret)
  673. return ret;
  674. }
  675. ret = xgene_cle_set_rss_skeys(&pdata->cle);
  676. if (ret)
  677. return ret;
  678. return 0;
  679. }
  680. static int xgene_cle_setup_rss(struct xgene_enet_pdata *pdata)
  681. {
  682. struct xgene_enet_cle *cle = &pdata->cle;
  683. void __iomem *base = cle->base;
  684. u32 offset, val = 0;
  685. int i, ret = 0;
  686. offset = CLE_PORT_OFFSET;
  687. for (i = 0; i < cle->parsers; i++) {
  688. if (cle->active_parser != PARSER_ALL)
  689. offset = cle->active_parser * CLE_PORT_OFFSET;
  690. else
  691. offset = i * CLE_PORT_OFFSET;
  692. /* enable RSS */
  693. val = (RSS_IPV4_12B << 1) | 0x1;
  694. writel(val, base + RSS_CTRL0 + offset);
  695. }
  696. /* setup sideband data */
  697. ret = xgene_cle_set_rss_sband(cle);
  698. if (ret)
  699. return ret;
  700. /* setup indirection table */
  701. ret = xgene_cle_set_rss_idt(pdata);
  702. if (ret)
  703. return ret;
  704. return 0;
  705. }
  706. static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
  707. {
  708. struct xgene_enet_cle *enet_cle = &pdata->cle;
  709. u32 def_qid, def_fpsel, def_nxtfpsel, pool_id;
  710. struct xgene_cle_dbptr dbptr[DB_MAX_PTRS];
  711. struct xgene_cle_ptree *ptree;
  712. struct xgene_cle_ptree_kn kn;
  713. int ret;
  714. if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
  715. return -EINVAL;
  716. ptree = &enet_cle->ptree;
  717. ptree->start_pkt = 12; /* Ethertype */
  718. ret = xgene_cle_setup_rss(pdata);
  719. if (ret) {
  720. netdev_err(pdata->ndev, "RSS initialization failed\n");
  721. return ret;
  722. }
  723. def_qid = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
  724. pool_id = pdata->rx_ring[0]->buf_pool->id;
  725. def_fpsel = xgene_enet_get_fpsel(pool_id);
  726. def_nxtfpsel = 0;
  727. if (pdata->rx_ring[0]->page_pool) {
  728. pool_id = pdata->rx_ring[0]->page_pool->id;
  729. def_nxtfpsel = xgene_enet_get_fpsel(pool_id);
  730. }
  731. memset(dbptr, 0, sizeof(struct xgene_cle_dbptr) * DB_MAX_PTRS);
  732. dbptr[DB_RES_ACCEPT].fpsel = def_fpsel;
  733. dbptr[DB_RES_ACCEPT].nxtfpsel = def_nxtfpsel;
  734. dbptr[DB_RES_ACCEPT].dstqid = def_qid;
  735. dbptr[DB_RES_ACCEPT].cle_priority = 1;
  736. dbptr[DB_RES_DEF].fpsel = def_fpsel;
  737. dbptr[DB_RES_DEF].nxtfpsel = def_nxtfpsel;
  738. dbptr[DB_RES_DEF].dstqid = def_qid;
  739. dbptr[DB_RES_DEF].cle_priority = 7;
  740. xgene_cle_setup_def_dbptr(pdata, enet_cle, &dbptr[DB_RES_DEF],
  741. DB_RES_ACCEPT, 7);
  742. dbptr[DB_RES_DROP].drop = 1;
  743. memset(&kn, 0, sizeof(kn));
  744. kn.node_type = KN;
  745. kn.num_keys = 1;
  746. kn.key[0].priority = 0;
  747. kn.key[0].result_pointer = DB_RES_ACCEPT;
  748. ptree->kn = &kn;
  749. ptree->dbptr = dbptr;
  750. ptree->num_kn = 1;
  751. ptree->num_dbptr = DB_MAX_PTRS;
  752. return xgene_cle_setup_ptree(pdata, enet_cle);
  753. }
  754. const struct xgene_cle_ops xgene_cle3in_ops = {
  755. .cle_init = xgene_enet_cle_init,
  756. };