qed_init_fw_funcs.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/types.h>
  33. #include <linux/delay.h>
  34. #include <linux/kernel.h>
  35. #include <linux/slab.h>
  36. #include <linux/string.h>
  37. #include "qed_hsi.h"
  38. #include "qed_hw.h"
  39. #include "qed_init_ops.h"
  40. #include "qed_reg_addr.h"
  41. enum cminterface {
  42. MCM_SEC,
  43. MCM_PRI,
  44. UCM_SEC,
  45. UCM_PRI,
  46. TCM_SEC,
  47. TCM_PRI,
  48. YCM_SEC,
  49. YCM_PRI,
  50. XCM_SEC,
  51. XCM_PRI,
  52. NUM_OF_CM_INTERFACES
  53. };
  54. /* general constants */
  55. #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
  56. QM_PQ_ELEMENT_SIZE, \
  57. 0x1000) : 0)
  58. #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
  59. 0x100) - 1 : 0)
  60. #define QM_INVALID_PQ_ID 0xffff
  61. /* feature enable */
  62. #define QM_BYPASS_EN 1
  63. #define QM_BYTE_CRD_EN 1
  64. /* other PQ constants */
  65. #define QM_OTHER_PQS_PER_PF 4
  66. /* WFQ constants */
  67. #define QM_WFQ_UPPER_BOUND 62500000
  68. #define QM_WFQ_VP_PQ_VOQ_SHIFT 0
  69. #define QM_WFQ_VP_PQ_PF_SHIFT 5
  70. #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
  71. #define QM_WFQ_MAX_INC_VAL 43750000
  72. /* RL constants */
  73. #define QM_RL_UPPER_BOUND 62500000
  74. #define QM_RL_PERIOD 5 /* in us */
  75. #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
  76. #define QM_RL_MAX_INC_VAL 43750000
  77. #define QM_RL_INC_VAL(rate) max_t(u32, \
  78. (u32)(((rate ? rate : \
  79. 1000000) * \
  80. QM_RL_PERIOD * \
  81. 101) / (8 * 100)), 1)
  82. /* AFullOprtnstcCrdMask constants */
  83. #define QM_OPPOR_LINE_VOQ_DEF 1
  84. #define QM_OPPOR_FW_STOP_DEF 0
  85. #define QM_OPPOR_PQ_EMPTY_DEF 1
  86. /* Command Queue constants */
  87. #define PBF_CMDQ_PURE_LB_LINES 150
  88. #define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \
  89. PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
  90. (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
  91. PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
  92. #define PBF_BTB_GUARANTEED_RT_OFFSET(voq) ( \
  93. PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
  94. (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
  95. PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
  96. #define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - \
  97. 4) * \
  98. 2) | QM_LINE_CRD_REG_SIGN_BIT)
  99. /* BTB: blocks constants (block size = 256B) */
  100. #define BTB_JUMBO_PKT_BLOCKS 38
  101. #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
  102. #define BTB_PURE_LB_FACTOR 10
  103. #define BTB_PURE_LB_RATIO 7
  104. /* QM stop command constants */
  105. #define QM_STOP_PQ_MASK_WIDTH 32
  106. #define QM_STOP_CMD_ADDR 0x2
  107. #define QM_STOP_CMD_STRUCT_SIZE 2
  108. #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
  109. #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
  110. #define QM_STOP_CMD_PAUSE_MASK_MASK -1
  111. #define QM_STOP_CMD_GROUP_ID_OFFSET 1
  112. #define QM_STOP_CMD_GROUP_ID_SHIFT 16
  113. #define QM_STOP_CMD_GROUP_ID_MASK 15
  114. #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
  115. #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
  116. #define QM_STOP_CMD_PQ_TYPE_MASK 1
  117. #define QM_STOP_CMD_MAX_POLL_COUNT 100
  118. #define QM_STOP_CMD_POLL_PERIOD_US 500
  119. /* QM command macros */
  120. #define QM_CMD_STRUCT_SIZE(cmd) cmd ## \
  121. _STRUCT_SIZE
  122. #define QM_CMD_SET_FIELD(var, cmd, field, \
  123. value) SET_FIELD(var[cmd ## _ ## field ## \
  124. _OFFSET], \
  125. cmd ## _ ## field, \
  126. value)
  127. /* QM: VOQ macros */
  128. #define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) * \
  129. (max_phys_tcs_per_port) + \
  130. (tc))
  131. #define LB_VOQ(port) ( \
  132. MAX_PHYS_VOQS + (port))
  133. #define VOQ(port, tc, max_phy_tcs_pr_port) \
  134. ((tc) < \
  135. LB_TC ? PHYS_VOQ(port, \
  136. tc, \
  137. max_phy_tcs_pr_port) \
  138. : LB_VOQ(port))
  139. /******************** INTERNAL IMPLEMENTATION *********************/
  140. /* Prepare PF RL enable/disable runtime init values */
  141. static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
  142. {
  143. STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
  144. if (pf_rl_en) {
  145. /* enable RLs for all VOQs */
  146. STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
  147. (1 << MAX_NUM_VOQS) - 1);
  148. /* write RL period */
  149. STORE_RT_REG(p_hwfn,
  150. QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
  151. STORE_RT_REG(p_hwfn,
  152. QM_REG_RLPFPERIODTIMER_RT_OFFSET,
  153. QM_RL_PERIOD_CLK_25M);
  154. /* set credit threshold for QM bypass flow */
  155. if (QM_BYPASS_EN)
  156. STORE_RT_REG(p_hwfn,
  157. QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
  158. QM_RL_UPPER_BOUND);
  159. }
  160. }
  161. /* Prepare PF WFQ enable/disable runtime init values */
  162. static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
  163. {
  164. STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
  165. /* set credit threshold for QM bypass flow */
  166. if (pf_wfq_en && QM_BYPASS_EN)
  167. STORE_RT_REG(p_hwfn,
  168. QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
  169. QM_WFQ_UPPER_BOUND);
  170. }
  171. /* Prepare VPORT RL enable/disable runtime init values */
  172. static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
  173. {
  174. STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
  175. vport_rl_en ? 1 : 0);
  176. if (vport_rl_en) {
  177. /* write RL period (use timer 0 only) */
  178. STORE_RT_REG(p_hwfn,
  179. QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
  180. QM_RL_PERIOD_CLK_25M);
  181. STORE_RT_REG(p_hwfn,
  182. QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
  183. QM_RL_PERIOD_CLK_25M);
  184. /* set credit threshold for QM bypass flow */
  185. if (QM_BYPASS_EN)
  186. STORE_RT_REG(p_hwfn,
  187. QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
  188. QM_RL_UPPER_BOUND);
  189. }
  190. }
  191. /* Prepare VPORT WFQ enable/disable runtime init values */
  192. static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
  193. {
  194. STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
  195. vport_wfq_en ? 1 : 0);
  196. /* set credit threshold for QM bypass flow */
  197. if (vport_wfq_en && QM_BYPASS_EN)
  198. STORE_RT_REG(p_hwfn,
  199. QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
  200. QM_WFQ_UPPER_BOUND);
  201. }
  202. /* Prepare runtime init values to allocate PBF command queue lines for
  203. * the specified VOQ
  204. */
  205. static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
  206. u8 voq, u16 cmdq_lines)
  207. {
  208. u32 qm_line_crd;
  209. qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
  210. OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
  211. (u32)cmdq_lines);
  212. STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
  213. STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
  214. qm_line_crd);
  215. }
  216. /* Prepare runtime init values to allocate PBF command queue lines. */
  217. static void qed_cmdq_lines_rt_init(
  218. struct qed_hwfn *p_hwfn,
  219. u8 max_ports_per_engine,
  220. u8 max_phys_tcs_per_port,
  221. struct init_qm_port_params port_params[MAX_NUM_PORTS])
  222. {
  223. u8 tc, voq, port_id, num_tcs_in_port;
  224. /* clear PBF lines for all VOQs */
  225. for (voq = 0; voq < MAX_NUM_VOQS; voq++)
  226. STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
  227. for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
  228. if (port_params[port_id].active) {
  229. u16 phys_lines, phys_lines_per_tc;
  230. /* find #lines to divide between active phys TCs */
  231. phys_lines = port_params[port_id].num_pbf_cmd_lines -
  232. PBF_CMDQ_PURE_LB_LINES;
  233. /* find #lines per active physical TC */
  234. num_tcs_in_port = 0;
  235. for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
  236. if (((port_params[port_id].active_phys_tcs >>
  237. tc) & 0x1) == 1)
  238. num_tcs_in_port++;
  239. }
  240. phys_lines_per_tc = phys_lines / num_tcs_in_port;
  241. /* init registers per active TC */
  242. for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
  243. if (((port_params[port_id].active_phys_tcs >>
  244. tc) & 0x1) != 1)
  245. continue;
  246. voq = PHYS_VOQ(port_id, tc,
  247. max_phys_tcs_per_port);
  248. qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
  249. phys_lines_per_tc);
  250. }
  251. /* init registers for pure LB TC */
  252. qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
  253. PBF_CMDQ_PURE_LB_LINES);
  254. }
  255. }
  256. }
  257. static void qed_btb_blocks_rt_init(
  258. struct qed_hwfn *p_hwfn,
  259. u8 max_ports_per_engine,
  260. u8 max_phys_tcs_per_port,
  261. struct init_qm_port_params port_params[MAX_NUM_PORTS])
  262. {
  263. u32 usable_blocks, pure_lb_blocks, phys_blocks;
  264. u8 tc, voq, port_id, num_tcs_in_port;
  265. for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
  266. u32 temp;
  267. if (!port_params[port_id].active)
  268. continue;
  269. /* subtract headroom blocks */
  270. usable_blocks = port_params[port_id].num_btb_blocks -
  271. BTB_HEADROOM_BLOCKS;
  272. /* find blocks per physical TC */
  273. num_tcs_in_port = 0;
  274. for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
  275. if (((port_params[port_id].active_phys_tcs >>
  276. tc) & 0x1) == 1)
  277. num_tcs_in_port++;
  278. }
  279. pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
  280. (num_tcs_in_port * BTB_PURE_LB_FACTOR +
  281. BTB_PURE_LB_RATIO);
  282. pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
  283. pure_lb_blocks / BTB_PURE_LB_FACTOR);
  284. phys_blocks = (usable_blocks - pure_lb_blocks) /
  285. num_tcs_in_port;
  286. /* init physical TCs */
  287. for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
  288. if (((port_params[port_id].active_phys_tcs >>
  289. tc) & 0x1) != 1)
  290. continue;
  291. voq = PHYS_VOQ(port_id, tc,
  292. max_phys_tcs_per_port);
  293. STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
  294. phys_blocks);
  295. }
  296. /* init pure LB TC */
  297. temp = LB_VOQ(port_id);
  298. STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
  299. pure_lb_blocks);
  300. }
  301. }
  302. /* Prepare Tx PQ mapping runtime init values for the specified PF */
  303. static void qed_tx_pq_map_rt_init(
  304. struct qed_hwfn *p_hwfn,
  305. struct qed_ptt *p_ptt,
  306. struct qed_qm_pf_rt_init_params *p_params,
  307. u32 base_mem_addr_4kb)
  308. {
  309. struct init_qm_vport_params *vport_params = p_params->vport_params;
  310. u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
  311. u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
  312. u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
  313. QM_PF_QUEUE_GROUP_SIZE;
  314. u16 i, pq_id, pq_group;
  315. /* a bit per Tx PQ indicating if the PQ is associated with a VF */
  316. u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
  317. u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
  318. u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
  319. u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
  320. u32 mem_addr_4kb = base_mem_addr_4kb;
  321. /* set mapping from PQ group to PF */
  322. for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
  323. STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
  324. (u32)(p_params->pf_id));
  325. /* set PQ sizes */
  326. STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
  327. QM_PQ_SIZE_256B(p_params->num_pf_cids));
  328. STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
  329. QM_PQ_SIZE_256B(p_params->num_vf_cids));
  330. /* go over all Tx PQs */
  331. for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
  332. u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
  333. p_params->max_phys_tcs_per_port);
  334. bool is_vf_pq = (i >= p_params->num_pf_pqs);
  335. struct qm_rf_pq_map tx_pq_map;
  336. bool rl_valid = p_params->pq_params[i].rl_valid &&
  337. (p_params->pq_params[i].vport_id <
  338. MAX_QM_GLOBAL_RLS);
  339. /* update first Tx PQ of VPORT/TC */
  340. u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
  341. p_params->start_vport;
  342. u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
  343. u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
  344. if (first_tx_pq_id == QM_INVALID_PQ_ID) {
  345. /* create new VP PQ */
  346. pq_ids[p_params->pq_params[i].tc_id] = pq_id;
  347. first_tx_pq_id = pq_id;
  348. /* map VP PQ to VOQ and PF */
  349. STORE_RT_REG(p_hwfn,
  350. QM_REG_WFQVPMAP_RT_OFFSET +
  351. first_tx_pq_id,
  352. (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
  353. (p_params->pf_id <<
  354. QM_WFQ_VP_PQ_PF_SHIFT));
  355. }
  356. if (p_params->pq_params[i].rl_valid && !rl_valid)
  357. DP_NOTICE(p_hwfn,
  358. "Invalid VPORT ID for rate limiter configuration");
  359. /* fill PQ map entry */
  360. memset(&tx_pq_map, 0, sizeof(tx_pq_map));
  361. SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
  362. SET_FIELD(tx_pq_map.reg,
  363. QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
  364. SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
  365. SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
  366. rl_valid ?
  367. p_params->pq_params[i].vport_id : 0);
  368. SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
  369. SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
  370. p_params->pq_params[i].wrr_group);
  371. /* write PQ map entry to CAM */
  372. STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
  373. *((u32 *)&tx_pq_map));
  374. /* set base address */
  375. STORE_RT_REG(p_hwfn,
  376. QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
  377. mem_addr_4kb);
  378. /* check if VF PQ */
  379. if (is_vf_pq) {
  380. /* if PQ is associated with a VF, add indication
  381. * to PQ VF mask
  382. */
  383. tx_pq_vf_mask[pq_id /
  384. QM_PF_QUEUE_GROUP_SIZE] |=
  385. BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
  386. mem_addr_4kb += vport_pq_mem_4kb;
  387. } else {
  388. mem_addr_4kb += pq_mem_4kb;
  389. }
  390. }
  391. /* store Tx PQ VF mask to size select register */
  392. for (i = 0; i < num_tx_pq_vf_masks; i++) {
  393. if (tx_pq_vf_mask[i]) {
  394. u32 addr;
  395. addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
  396. STORE_RT_REG(p_hwfn, addr,
  397. tx_pq_vf_mask[i]);
  398. }
  399. }
  400. }
  401. /* Prepare Other PQ mapping runtime init values for the specified PF */
  402. static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
  403. u8 port_id,
  404. u8 pf_id,
  405. u32 num_pf_cids,
  406. u32 num_tids, u32 base_mem_addr_4kb)
  407. {
  408. u16 i, pq_id;
  409. /* a single other PQ group is used in each PF,
  410. * where PQ group i is used in PF i.
  411. */
  412. u16 pq_group = pf_id;
  413. u32 pq_size = num_pf_cids + num_tids;
  414. u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
  415. u32 mem_addr_4kb = base_mem_addr_4kb;
  416. /* map PQ group to PF */
  417. STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
  418. (u32)(pf_id));
  419. /* set PQ sizes */
  420. STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
  421. QM_PQ_SIZE_256B(pq_size));
  422. /* set base address */
  423. for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
  424. i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
  425. STORE_RT_REG(p_hwfn,
  426. QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
  427. mem_addr_4kb);
  428. mem_addr_4kb += pq_mem_4kb;
  429. }
  430. }
  431. /* Prepare PF WFQ runtime init values for the specified PF.
  432. * Return -1 on error.
  433. */
  434. static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
  435. struct qed_qm_pf_rt_init_params *p_params)
  436. {
  437. u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
  438. u32 crd_reg_offset;
  439. u32 inc_val;
  440. u16 i;
  441. if (p_params->pf_id < MAX_NUM_PFS_BB)
  442. crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
  443. else
  444. crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
  445. crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
  446. inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
  447. if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
  448. DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
  449. return -1;
  450. }
  451. for (i = 0; i < num_tx_pqs; i++) {
  452. u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
  453. p_params->max_phys_tcs_per_port);
  454. OVERWRITE_RT_REG(p_hwfn,
  455. crd_reg_offset + voq * MAX_NUM_PFS_BB,
  456. QM_WFQ_CRD_REG_SIGN_BIT);
  457. }
  458. STORE_RT_REG(p_hwfn,
  459. QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
  460. QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
  461. STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
  462. inc_val);
  463. return 0;
  464. }
  465. /* Prepare PF RL runtime init values for the specified PF.
  466. * Return -1 on error.
  467. */
  468. static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
  469. {
  470. u32 inc_val = QM_RL_INC_VAL(pf_rl);
  471. if (inc_val > QM_RL_MAX_INC_VAL) {
  472. DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
  473. return -1;
  474. }
  475. STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
  476. QM_RL_CRD_REG_SIGN_BIT);
  477. STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
  478. QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
  479. STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
  480. return 0;
  481. }
  482. /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
  483. * Return -1 on error.
  484. */
  485. static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
  486. u8 num_vports,
  487. struct init_qm_vport_params *vport_params)
  488. {
  489. u32 inc_val;
  490. u8 tc, i;
  491. /* go over all PF VPORTs */
  492. for (i = 0; i < num_vports; i++) {
  493. if (!vport_params[i].vport_wfq)
  494. continue;
  495. inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
  496. if (inc_val > QM_WFQ_MAX_INC_VAL) {
  497. DP_NOTICE(p_hwfn,
  498. "Invalid VPORT WFQ weight configuration");
  499. return -1;
  500. }
  501. /* each VPORT can have several VPORT PQ IDs for
  502. * different TCs
  503. */
  504. for (tc = 0; tc < NUM_OF_TCS; tc++) {
  505. u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
  506. if (vport_pq_id != QM_INVALID_PQ_ID) {
  507. STORE_RT_REG(p_hwfn,
  508. QM_REG_WFQVPCRD_RT_OFFSET +
  509. vport_pq_id,
  510. QM_WFQ_CRD_REG_SIGN_BIT);
  511. STORE_RT_REG(p_hwfn,
  512. QM_REG_WFQVPWEIGHT_RT_OFFSET +
  513. vport_pq_id, inc_val);
  514. }
  515. }
  516. }
  517. return 0;
  518. }
  519. static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
  520. u8 start_vport,
  521. u8 num_vports,
  522. struct init_qm_vport_params *vport_params)
  523. {
  524. u8 i, vport_id;
  525. if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
  526. DP_NOTICE(p_hwfn,
  527. "Invalid VPORT ID for rate limiter configuration");
  528. return -1;
  529. }
  530. /* go over all PF VPORTs */
  531. for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
  532. u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
  533. if (inc_val > QM_RL_MAX_INC_VAL) {
  534. DP_NOTICE(p_hwfn,
  535. "Invalid VPORT rate-limit configuration");
  536. return -1;
  537. }
  538. STORE_RT_REG(p_hwfn,
  539. QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
  540. QM_RL_CRD_REG_SIGN_BIT);
  541. STORE_RT_REG(p_hwfn,
  542. QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
  543. QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
  544. STORE_RT_REG(p_hwfn,
  545. QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
  546. inc_val);
  547. }
  548. return 0;
  549. }
  550. static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
  551. struct qed_ptt *p_ptt)
  552. {
  553. u32 reg_val, i;
  554. for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
  555. i++) {
  556. udelay(QM_STOP_CMD_POLL_PERIOD_US);
  557. reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
  558. }
  559. /* check if timeout while waiting for SDM command ready */
  560. if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
  561. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  562. "Timeout when waiting for QM SDM command ready signal\n");
  563. return false;
  564. }
  565. return true;
  566. }
  567. static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
  568. struct qed_ptt *p_ptt,
  569. u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
  570. {
  571. if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
  572. return false;
  573. qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
  574. qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
  575. qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
  576. qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
  577. qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
  578. return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
  579. }
  580. /******************** INTERFACE IMPLEMENTATION *********************/
  581. u32 qed_qm_pf_mem_size(u8 pf_id,
  582. u32 num_pf_cids,
  583. u32 num_vf_cids,
  584. u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
  585. {
  586. return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
  587. QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
  588. QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
  589. }
  590. int qed_qm_common_rt_init(
  591. struct qed_hwfn *p_hwfn,
  592. struct qed_qm_common_rt_init_params *p_params)
  593. {
  594. /* init AFullOprtnstcCrdMask */
  595. u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
  596. QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
  597. (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
  598. (p_params->pf_wfq_en <<
  599. QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
  600. (p_params->vport_wfq_en <<
  601. QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
  602. (p_params->pf_rl_en <<
  603. QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
  604. (p_params->vport_rl_en <<
  605. QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
  606. (QM_OPPOR_FW_STOP_DEF <<
  607. QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
  608. (QM_OPPOR_PQ_EMPTY_DEF <<
  609. QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
  610. STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
  611. qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
  612. qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
  613. qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
  614. qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
  615. qed_cmdq_lines_rt_init(p_hwfn,
  616. p_params->max_ports_per_engine,
  617. p_params->max_phys_tcs_per_port,
  618. p_params->port_params);
  619. qed_btb_blocks_rt_init(p_hwfn,
  620. p_params->max_ports_per_engine,
  621. p_params->max_phys_tcs_per_port,
  622. p_params->port_params);
  623. return 0;
  624. }
  625. int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
  626. struct qed_ptt *p_ptt,
  627. struct qed_qm_pf_rt_init_params *p_params)
  628. {
  629. struct init_qm_vport_params *vport_params = p_params->vport_params;
  630. u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
  631. p_params->num_tids) *
  632. QM_OTHER_PQS_PER_PF;
  633. u8 tc, i;
  634. /* clear first Tx PQ ID array for each VPORT */
  635. for (i = 0; i < p_params->num_vports; i++)
  636. for (tc = 0; tc < NUM_OF_TCS; tc++)
  637. vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
  638. /* map Other PQs (if any) */
  639. qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
  640. p_params->num_pf_cids, p_params->num_tids, 0);
  641. /* map Tx PQs */
  642. qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
  643. if (p_params->pf_wfq)
  644. if (qed_pf_wfq_rt_init(p_hwfn, p_params))
  645. return -1;
  646. if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
  647. return -1;
  648. if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
  649. return -1;
  650. if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
  651. p_params->num_vports, vport_params))
  652. return -1;
  653. return 0;
  654. }
  655. int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
  656. struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
  657. {
  658. u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
  659. if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
  660. DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
  661. return -1;
  662. }
  663. qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
  664. return 0;
  665. }
  666. int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
  667. struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl)
  668. {
  669. u32 inc_val = QM_RL_INC_VAL(pf_rl);
  670. if (inc_val > QM_RL_MAX_INC_VAL) {
  671. DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
  672. return -1;
  673. }
  674. qed_wr(p_hwfn, p_ptt,
  675. QM_REG_RLPFCRD + pf_id * 4,
  676. QM_RL_CRD_REG_SIGN_BIT);
  677. qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
  678. return 0;
  679. }
  680. int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
  681. struct qed_ptt *p_ptt,
  682. u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
  683. {
  684. u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
  685. u8 tc;
  686. if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
  687. DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration");
  688. return -1;
  689. }
  690. for (tc = 0; tc < NUM_OF_TCS; tc++) {
  691. u16 vport_pq_id = first_tx_pq_id[tc];
  692. if (vport_pq_id != QM_INVALID_PQ_ID)
  693. qed_wr(p_hwfn, p_ptt,
  694. QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
  695. inc_val);
  696. }
  697. return 0;
  698. }
  699. int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
  700. struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl)
  701. {
  702. u32 inc_val = QM_RL_INC_VAL(vport_rl);
  703. if (vport_id >= MAX_QM_GLOBAL_RLS) {
  704. DP_NOTICE(p_hwfn,
  705. "Invalid VPORT ID for rate limiter configuration");
  706. return -1;
  707. }
  708. if (inc_val > QM_RL_MAX_INC_VAL) {
  709. DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
  710. return -1;
  711. }
  712. qed_wr(p_hwfn, p_ptt,
  713. QM_REG_RLGLBLCRD + vport_id * 4,
  714. QM_RL_CRD_REG_SIGN_BIT);
  715. qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
  716. return 0;
  717. }
  718. bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
  719. struct qed_ptt *p_ptt,
  720. bool is_release_cmd,
  721. bool is_tx_pq, u16 start_pq, u16 num_pqs)
  722. {
  723. u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
  724. u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
  725. /* set command's PQ type */
  726. QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
  727. for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
  728. /* set PQ bit in mask (stop command only) */
  729. if (!is_release_cmd)
  730. pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
  731. /* if last PQ or end of PQ mask, write command */
  732. if ((pq_id == last_pq) ||
  733. (pq_id % QM_STOP_PQ_MASK_WIDTH ==
  734. (QM_STOP_PQ_MASK_WIDTH - 1))) {
  735. QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
  736. PAUSE_MASK, pq_mask);
  737. QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
  738. GROUP_ID,
  739. pq_id / QM_STOP_PQ_MASK_WIDTH);
  740. if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
  741. cmd_arr[0], cmd_arr[1]))
  742. return false;
  743. pq_mask = 0;
  744. }
  745. }
  746. return true;
  747. }
  748. static void
  749. qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
  750. {
  751. if (enable)
  752. set_bit(bit, var);
  753. else
  754. clear_bit(bit, var);
  755. }
  756. #define PRS_ETH_TUNN_FIC_FORMAT -188897008
  757. void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
  758. struct qed_ptt *p_ptt, u16 dest_port)
  759. {
  760. qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
  761. qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
  762. qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
  763. }
  764. void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
  765. struct qed_ptt *p_ptt, bool vxlan_enable)
  766. {
  767. unsigned long reg_val = 0;
  768. u8 shift;
  769. reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
  770. shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
  771. qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
  772. qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
  773. if (reg_val)
  774. qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
  775. PRS_ETH_TUNN_FIC_FORMAT);
  776. reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
  777. shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
  778. qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
  779. qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
  780. qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
  781. vxlan_enable ? 1 : 0);
  782. }
  783. void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
  784. bool eth_gre_enable, bool ip_gre_enable)
  785. {
  786. unsigned long reg_val = 0;
  787. u8 shift;
  788. reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
  789. shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
  790. qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
  791. shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
  792. qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
  793. qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
  794. if (reg_val)
  795. qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
  796. PRS_ETH_TUNN_FIC_FORMAT);
  797. reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
  798. shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
  799. qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
  800. shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
  801. qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
  802. qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
  803. qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
  804. eth_gre_enable ? 1 : 0);
  805. qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
  806. ip_gre_enable ? 1 : 0);
  807. }
  808. void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
  809. struct qed_ptt *p_ptt, u16 dest_port)
  810. {
  811. qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
  812. qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
  813. qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
  814. }
  815. void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
  816. struct qed_ptt *p_ptt,
  817. bool eth_geneve_enable, bool ip_geneve_enable)
  818. {
  819. unsigned long reg_val = 0;
  820. u8 shift;
  821. reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
  822. shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
  823. qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);
  824. shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
  825. qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);
  826. qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
  827. if (reg_val)
  828. qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
  829. PRS_ETH_TUNN_FIC_FORMAT);
  830. qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
  831. eth_geneve_enable ? 1 : 0);
  832. qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
  833. /* EDPM with geneve tunnel not supported in BB_B0 */
  834. if (QED_IS_BB_B0(p_hwfn->cdev))
  835. return;
  836. qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
  837. eth_geneve_enable ? 1 : 0);
  838. qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
  839. ip_geneve_enable ? 1 : 0);
  840. }
  841. #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
  842. #define PARSER_ETH_CONN_CM_HDR (0x0)
  843. #define CAM_LINE_SIZE sizeof(u32)
  844. #define RAM_LINE_SIZE sizeof(u64)
  845. #define REG_SIZE sizeof(u32)
  846. void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
  847. struct qed_ptt *p_ptt, u16 pf_id)
  848. {
  849. union gft_cam_line_union camline;
  850. struct gft_ram_line ramline;
  851. u32 *p_ramline, i;
  852. p_ramline = (u32 *)&ramline;
  853. /*stop using gft logic */
  854. qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
  855. qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
  856. memset(&camline, 0, sizeof(union gft_cam_line_union));
  857. qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
  858. camline.cam_line_mapped.camline);
  859. memset(&ramline, 0, sizeof(ramline));
  860. for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) {
  861. u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM;
  862. hw_addr += (RAM_LINE_SIZE * pf_id + i * REG_SIZE);
  863. qed_wr(p_hwfn, p_ptt, hw_addr, *(p_ramline + i));
  864. }
  865. }
  866. void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
  867. u16 pf_id, bool tcp, bool udp,
  868. bool ipv4, bool ipv6)
  869. {
  870. u32 rfs_cm_hdr_event_id, *p_ramline;
  871. union gft_cam_line_union camline;
  872. struct gft_ram_line ramline;
  873. int i;
  874. rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
  875. p_ramline = (u32 *)&ramline;
  876. if (!ipv6 && !ipv4)
  877. DP_NOTICE(p_hwfn,
  878. "set_rfs_mode_enable: must accept at least on of - ipv4 or ipv6");
  879. if (!tcp && !udp)
  880. DP_NOTICE(p_hwfn,
  881. "set_rfs_mode_enable: must accept at least on of - udp or tcp");
  882. rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
  883. PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
  884. rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
  885. PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
  886. qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
  887. /* Configure Registers for RFS mode */
  888. qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
  889. qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
  890. camline.cam_line_mapped.camline = 0;
  891. /* cam line is now valid!! */
  892. SET_FIELD(camline.cam_line_mapped.camline,
  893. GFT_CAM_LINE_MAPPED_VALID, 1);
  894. /* filters are per PF!! */
  895. SET_FIELD(camline.cam_line_mapped.camline,
  896. GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1);
  897. SET_FIELD(camline.cam_line_mapped.camline,
  898. GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
  899. if (!(tcp && udp)) {
  900. SET_FIELD(camline.cam_line_mapped.camline,
  901. GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, 1);
  902. if (tcp)
  903. SET_FIELD(camline.cam_line_mapped.camline,
  904. GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
  905. GFT_PROFILE_TCP_PROTOCOL);
  906. else
  907. SET_FIELD(camline.cam_line_mapped.camline,
  908. GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
  909. GFT_PROFILE_UDP_PROTOCOL);
  910. }
  911. if (!(ipv4 && ipv6)) {
  912. SET_FIELD(camline.cam_line_mapped.camline,
  913. GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
  914. if (ipv4)
  915. SET_FIELD(camline.cam_line_mapped.camline,
  916. GFT_CAM_LINE_MAPPED_IP_VERSION,
  917. GFT_PROFILE_IPV4);
  918. else
  919. SET_FIELD(camline.cam_line_mapped.camline,
  920. GFT_CAM_LINE_MAPPED_IP_VERSION,
  921. GFT_PROFILE_IPV6);
  922. }
  923. /* write characteristics to cam */
  924. qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
  925. camline.cam_line_mapped.camline);
  926. camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt,
  927. PRS_REG_GFT_CAM +
  928. CAM_LINE_SIZE * pf_id);
  929. /* write line to RAM - compare to filter 4 tuple */
  930. ramline.low32bits = 0;
  931. ramline.high32bits = 0;
  932. SET_FIELD(ramline.high32bits, GFT_RAM_LINE_DST_IP, 1);
  933. SET_FIELD(ramline.high32bits, GFT_RAM_LINE_SRC_IP, 1);
  934. SET_FIELD(ramline.low32bits, GFT_RAM_LINE_SRC_PORT, 1);
  935. SET_FIELD(ramline.low32bits, GFT_RAM_LINE_DST_PORT, 1);
  936. /* each iteration write to reg */
  937. for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
  938. qed_wr(p_hwfn, p_ptt,
  939. PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
  940. i * REG_SIZE, *(p_ramline + i));
  941. /* set default profile so that no filter match will happen */
  942. ramline.low32bits = 0xffff;
  943. ramline.high32bits = 0xffff;
  944. for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
  945. qed_wr(p_hwfn, p_ptt,
  946. PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
  947. PRS_GFT_CAM_LINES_NO_MATCH + i * REG_SIZE,
  948. *(p_ramline + i));
  949. }