qed_init_fw_funcs.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/types.h>
  33. #include <linux/delay.h>
  34. #include <linux/kernel.h>
  35. #include <linux/slab.h>
  36. #include <linux/string.h>
  37. #include "qed_hsi.h"
  38. #include "qed_hw.h"
  39. #include "qed_init_ops.h"
  40. #include "qed_reg_addr.h"
  41. enum cminterface {
  42. MCM_SEC,
  43. MCM_PRI,
  44. UCM_SEC,
  45. UCM_PRI,
  46. TCM_SEC,
  47. TCM_PRI,
  48. YCM_SEC,
  49. YCM_PRI,
  50. XCM_SEC,
  51. XCM_PRI,
  52. NUM_OF_CM_INTERFACES
  53. };
  54. /* general constants */
  55. #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
  56. QM_PQ_ELEMENT_SIZE, \
  57. 0x1000) : 0)
  58. #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
  59. 0x100) - 1 : 0)
  60. #define QM_INVALID_PQ_ID 0xffff
  61. /* feature enable */
  62. #define QM_BYPASS_EN 1
  63. #define QM_BYTE_CRD_EN 1
  64. /* other PQ constants */
  65. #define QM_OTHER_PQS_PER_PF 4
  66. /* WFQ constants */
  67. #define QM_WFQ_UPPER_BOUND 62500000
  68. #define QM_WFQ_VP_PQ_VOQ_SHIFT 0
  69. #define QM_WFQ_VP_PQ_PF_SHIFT 5
  70. #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
  71. #define QM_WFQ_MAX_INC_VAL 43750000
  72. /* RL constants */
  73. #define QM_RL_UPPER_BOUND 62500000
  74. #define QM_RL_PERIOD 5 /* in us */
  75. #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
  76. #define QM_RL_MAX_INC_VAL 43750000
  77. #define QM_RL_INC_VAL(rate) max_t(u32, \
  78. (u32)(((rate ? rate : \
  79. 1000000) * \
  80. QM_RL_PERIOD * \
  81. 101) / (8 * 100)), 1)
  82. /* AFullOprtnstcCrdMask constants */
  83. #define QM_OPPOR_LINE_VOQ_DEF 1
  84. #define QM_OPPOR_FW_STOP_DEF 0
  85. #define QM_OPPOR_PQ_EMPTY_DEF 1
  86. /* Command Queue constants */
  87. #define PBF_CMDQ_PURE_LB_LINES 150
  88. #define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \
  89. PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
  90. (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
  91. PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
  92. #define PBF_BTB_GUARANTEED_RT_OFFSET(voq) ( \
  93. PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
  94. (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
  95. PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
  96. #define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - \
  97. 4) * \
  98. 2) | QM_LINE_CRD_REG_SIGN_BIT)
  99. /* BTB: blocks constants (block size = 256B) */
  100. #define BTB_JUMBO_PKT_BLOCKS 38
  101. #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
  102. #define BTB_PURE_LB_FACTOR 10
  103. #define BTB_PURE_LB_RATIO 7
  104. /* QM stop command constants */
  105. #define QM_STOP_PQ_MASK_WIDTH 32
  106. #define QM_STOP_CMD_ADDR 0x2
  107. #define QM_STOP_CMD_STRUCT_SIZE 2
  108. #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
  109. #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
  110. #define QM_STOP_CMD_PAUSE_MASK_MASK -1
  111. #define QM_STOP_CMD_GROUP_ID_OFFSET 1
  112. #define QM_STOP_CMD_GROUP_ID_SHIFT 16
  113. #define QM_STOP_CMD_GROUP_ID_MASK 15
  114. #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
  115. #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
  116. #define QM_STOP_CMD_PQ_TYPE_MASK 1
  117. #define QM_STOP_CMD_MAX_POLL_COUNT 100
  118. #define QM_STOP_CMD_POLL_PERIOD_US 500
  119. /* QM command macros */
  120. #define QM_CMD_STRUCT_SIZE(cmd) cmd ## \
  121. _STRUCT_SIZE
  122. #define QM_CMD_SET_FIELD(var, cmd, field, \
  123. value) SET_FIELD(var[cmd ## _ ## field ## \
  124. _OFFSET], \
  125. cmd ## _ ## field, \
  126. value)
  127. /* QM: VOQ macros */
  128. #define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) * \
  129. (max_phys_tcs_per_port) + \
  130. (tc))
  131. #define LB_VOQ(port) ( \
  132. MAX_PHYS_VOQS + (port))
  133. #define VOQ(port, tc, max_phy_tcs_pr_port) \
  134. ((tc) < \
  135. LB_TC ? PHYS_VOQ(port, \
  136. tc, \
  137. max_phy_tcs_pr_port) \
  138. : LB_VOQ(port))
  139. /******************** INTERNAL IMPLEMENTATION *********************/
  140. /* Prepare PF RL enable/disable runtime init values */
  141. static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
  142. {
  143. STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
  144. if (pf_rl_en) {
  145. /* enable RLs for all VOQs */
  146. STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
  147. (1 << MAX_NUM_VOQS) - 1);
  148. /* write RL period */
  149. STORE_RT_REG(p_hwfn,
  150. QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
  151. STORE_RT_REG(p_hwfn,
  152. QM_REG_RLPFPERIODTIMER_RT_OFFSET,
  153. QM_RL_PERIOD_CLK_25M);
  154. /* set credit threshold for QM bypass flow */
  155. if (QM_BYPASS_EN)
  156. STORE_RT_REG(p_hwfn,
  157. QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
  158. QM_RL_UPPER_BOUND);
  159. }
  160. }
  161. /* Prepare PF WFQ enable/disable runtime init values */
  162. static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
  163. {
  164. STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
  165. /* set credit threshold for QM bypass flow */
  166. if (pf_wfq_en && QM_BYPASS_EN)
  167. STORE_RT_REG(p_hwfn,
  168. QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
  169. QM_WFQ_UPPER_BOUND);
  170. }
  171. /* Prepare VPORT RL enable/disable runtime init values */
  172. static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
  173. {
  174. STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
  175. vport_rl_en ? 1 : 0);
  176. if (vport_rl_en) {
  177. /* write RL period (use timer 0 only) */
  178. STORE_RT_REG(p_hwfn,
  179. QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
  180. QM_RL_PERIOD_CLK_25M);
  181. STORE_RT_REG(p_hwfn,
  182. QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
  183. QM_RL_PERIOD_CLK_25M);
  184. /* set credit threshold for QM bypass flow */
  185. if (QM_BYPASS_EN)
  186. STORE_RT_REG(p_hwfn,
  187. QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
  188. QM_RL_UPPER_BOUND);
  189. }
  190. }
  191. /* Prepare VPORT WFQ enable/disable runtime init values */
  192. static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
  193. {
  194. STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
  195. vport_wfq_en ? 1 : 0);
  196. /* set credit threshold for QM bypass flow */
  197. if (vport_wfq_en && QM_BYPASS_EN)
  198. STORE_RT_REG(p_hwfn,
  199. QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
  200. QM_WFQ_UPPER_BOUND);
  201. }
  202. /* Prepare runtime init values to allocate PBF command queue lines for
  203. * the specified VOQ
  204. */
  205. static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
  206. u8 voq, u16 cmdq_lines)
  207. {
  208. u32 qm_line_crd;
  209. /* In A0 - Limit the size of pbf queue so that only 511 commands with
  210. * the minimum size of 4 (FCoE minimum size)
  211. */
  212. bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
  213. if (is_bb_a0)
  214. cmdq_lines = min_t(u32, cmdq_lines, 1022);
  215. qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
  216. OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
  217. (u32)cmdq_lines);
  218. STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
  219. STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
  220. qm_line_crd);
  221. }
  222. /* Prepare runtime init values to allocate PBF command queue lines. */
  223. static void qed_cmdq_lines_rt_init(
  224. struct qed_hwfn *p_hwfn,
  225. u8 max_ports_per_engine,
  226. u8 max_phys_tcs_per_port,
  227. struct init_qm_port_params port_params[MAX_NUM_PORTS])
  228. {
  229. u8 tc, voq, port_id, num_tcs_in_port;
  230. /* clear PBF lines for all VOQs */
  231. for (voq = 0; voq < MAX_NUM_VOQS; voq++)
  232. STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
  233. for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
  234. if (port_params[port_id].active) {
  235. u16 phys_lines, phys_lines_per_tc;
  236. /* find #lines to divide between active phys TCs */
  237. phys_lines = port_params[port_id].num_pbf_cmd_lines -
  238. PBF_CMDQ_PURE_LB_LINES;
  239. /* find #lines per active physical TC */
  240. num_tcs_in_port = 0;
  241. for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
  242. if (((port_params[port_id].active_phys_tcs >>
  243. tc) & 0x1) == 1)
  244. num_tcs_in_port++;
  245. }
  246. phys_lines_per_tc = phys_lines / num_tcs_in_port;
  247. /* init registers per active TC */
  248. for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
  249. if (((port_params[port_id].active_phys_tcs >>
  250. tc) & 0x1) != 1)
  251. continue;
  252. voq = PHYS_VOQ(port_id, tc,
  253. max_phys_tcs_per_port);
  254. qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
  255. phys_lines_per_tc);
  256. }
  257. /* init registers for pure LB TC */
  258. qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
  259. PBF_CMDQ_PURE_LB_LINES);
  260. }
  261. }
  262. }
  263. static void qed_btb_blocks_rt_init(
  264. struct qed_hwfn *p_hwfn,
  265. u8 max_ports_per_engine,
  266. u8 max_phys_tcs_per_port,
  267. struct init_qm_port_params port_params[MAX_NUM_PORTS])
  268. {
  269. u32 usable_blocks, pure_lb_blocks, phys_blocks;
  270. u8 tc, voq, port_id, num_tcs_in_port;
  271. for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
  272. u32 temp;
  273. if (!port_params[port_id].active)
  274. continue;
  275. /* subtract headroom blocks */
  276. usable_blocks = port_params[port_id].num_btb_blocks -
  277. BTB_HEADROOM_BLOCKS;
  278. /* find blocks per physical TC */
  279. num_tcs_in_port = 0;
  280. for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
  281. if (((port_params[port_id].active_phys_tcs >>
  282. tc) & 0x1) == 1)
  283. num_tcs_in_port++;
  284. }
  285. pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
  286. (num_tcs_in_port * BTB_PURE_LB_FACTOR +
  287. BTB_PURE_LB_RATIO);
  288. pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
  289. pure_lb_blocks / BTB_PURE_LB_FACTOR);
  290. phys_blocks = (usable_blocks - pure_lb_blocks) /
  291. num_tcs_in_port;
  292. /* init physical TCs */
  293. for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
  294. if (((port_params[port_id].active_phys_tcs >>
  295. tc) & 0x1) != 1)
  296. continue;
  297. voq = PHYS_VOQ(port_id, tc,
  298. max_phys_tcs_per_port);
  299. STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
  300. phys_blocks);
  301. }
  302. /* init pure LB TC */
  303. temp = LB_VOQ(port_id);
  304. STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
  305. pure_lb_blocks);
  306. }
  307. }
  308. /* Prepare Tx PQ mapping runtime init values for the specified PF */
  309. static void qed_tx_pq_map_rt_init(
  310. struct qed_hwfn *p_hwfn,
  311. struct qed_ptt *p_ptt,
  312. struct qed_qm_pf_rt_init_params *p_params,
  313. u32 base_mem_addr_4kb)
  314. {
  315. struct init_qm_vport_params *vport_params = p_params->vport_params;
  316. u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
  317. u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
  318. u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
  319. QM_PF_QUEUE_GROUP_SIZE;
  320. bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
  321. u16 i, pq_id, pq_group;
  322. /* a bit per Tx PQ indicating if the PQ is associated with a VF */
  323. u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
  324. u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
  325. u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
  326. u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
  327. u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
  328. u32 mem_addr_4kb = base_mem_addr_4kb;
  329. /* set mapping from PQ group to PF */
  330. for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
  331. STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
  332. (u32)(p_params->pf_id));
  333. /* set PQ sizes */
  334. STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
  335. QM_PQ_SIZE_256B(p_params->num_pf_cids));
  336. STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
  337. QM_PQ_SIZE_256B(p_params->num_vf_cids));
  338. /* go over all Tx PQs */
  339. for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
  340. u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
  341. p_params->max_phys_tcs_per_port);
  342. bool is_vf_pq = (i >= p_params->num_pf_pqs);
  343. struct qm_rf_pq_map tx_pq_map;
  344. /* update first Tx PQ of VPORT/TC */
  345. u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
  346. p_params->start_vport;
  347. u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
  348. u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
  349. if (first_tx_pq_id == QM_INVALID_PQ_ID) {
  350. /* create new VP PQ */
  351. pq_ids[p_params->pq_params[i].tc_id] = pq_id;
  352. first_tx_pq_id = pq_id;
  353. /* map VP PQ to VOQ and PF */
  354. STORE_RT_REG(p_hwfn,
  355. QM_REG_WFQVPMAP_RT_OFFSET +
  356. first_tx_pq_id,
  357. (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
  358. (p_params->pf_id <<
  359. QM_WFQ_VP_PQ_PF_SHIFT));
  360. }
  361. /* fill PQ map entry */
  362. memset(&tx_pq_map, 0, sizeof(tx_pq_map));
  363. SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
  364. SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
  365. p_params->pq_params[i].rl_valid ? 1 : 0);
  366. SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
  367. SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
  368. p_params->pq_params[i].rl_valid ?
  369. p_params->pq_params[i].vport_id : 0);
  370. SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
  371. SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
  372. p_params->pq_params[i].wrr_group);
  373. /* write PQ map entry to CAM */
  374. STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
  375. *((u32 *)&tx_pq_map));
  376. /* set base address */
  377. STORE_RT_REG(p_hwfn,
  378. QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
  379. mem_addr_4kb);
  380. /* check if VF PQ */
  381. if (is_vf_pq) {
  382. /* if PQ is associated with a VF, add indication
  383. * to PQ VF mask
  384. */
  385. tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
  386. (1 << (pq_id % tx_pq_vf_mask_width));
  387. mem_addr_4kb += vport_pq_mem_4kb;
  388. } else {
  389. mem_addr_4kb += pq_mem_4kb;
  390. }
  391. }
  392. /* store Tx PQ VF mask to size select register */
  393. for (i = 0; i < num_tx_pq_vf_masks; i++) {
  394. if (tx_pq_vf_mask[i]) {
  395. u32 addr;
  396. addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
  397. STORE_RT_REG(p_hwfn, addr,
  398. tx_pq_vf_mask[i]);
  399. }
  400. }
  401. }
  402. /* Prepare Other PQ mapping runtime init values for the specified PF */
  403. static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
  404. u8 port_id,
  405. u8 pf_id,
  406. u32 num_pf_cids,
  407. u32 num_tids, u32 base_mem_addr_4kb)
  408. {
  409. u16 i, pq_id;
  410. /* a single other PQ group is used in each PF,
  411. * where PQ group i is used in PF i.
  412. */
  413. u16 pq_group = pf_id;
  414. u32 pq_size = num_pf_cids + num_tids;
  415. u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
  416. u32 mem_addr_4kb = base_mem_addr_4kb;
  417. /* map PQ group to PF */
  418. STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
  419. (u32)(pf_id));
  420. /* set PQ sizes */
  421. STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
  422. QM_PQ_SIZE_256B(pq_size));
  423. /* set base address */
  424. for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
  425. i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
  426. STORE_RT_REG(p_hwfn,
  427. QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
  428. mem_addr_4kb);
  429. mem_addr_4kb += pq_mem_4kb;
  430. }
  431. }
  432. /* Prepare PF WFQ runtime init values for the specified PF.
  433. * Return -1 on error.
  434. */
  435. static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
  436. struct qed_qm_pf_rt_init_params *p_params)
  437. {
  438. u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
  439. u32 crd_reg_offset;
  440. u32 inc_val;
  441. u16 i;
  442. if (p_params->pf_id < MAX_NUM_PFS_BB)
  443. crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
  444. else
  445. crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET +
  446. (p_params->pf_id % MAX_NUM_PFS_BB);
  447. inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
  448. if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
  449. DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
  450. return -1;
  451. }
  452. for (i = 0; i < num_tx_pqs; i++) {
  453. u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
  454. p_params->max_phys_tcs_per_port);
  455. OVERWRITE_RT_REG(p_hwfn,
  456. crd_reg_offset + voq * MAX_NUM_PFS_BB,
  457. QM_WFQ_CRD_REG_SIGN_BIT);
  458. }
  459. STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
  460. inc_val);
  461. STORE_RT_REG(p_hwfn,
  462. QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
  463. QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
  464. return 0;
  465. }
  466. /* Prepare PF RL runtime init values for the specified PF.
  467. * Return -1 on error.
  468. */
  469. static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
  470. {
  471. u32 inc_val = QM_RL_INC_VAL(pf_rl);
  472. if (inc_val > QM_RL_MAX_INC_VAL) {
  473. DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
  474. return -1;
  475. }
  476. STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
  477. QM_RL_CRD_REG_SIGN_BIT);
  478. STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
  479. QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
  480. STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
  481. return 0;
  482. }
  483. /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
  484. * Return -1 on error.
  485. */
  486. static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
  487. u8 num_vports,
  488. struct init_qm_vport_params *vport_params)
  489. {
  490. u32 inc_val;
  491. u8 tc, i;
  492. /* go over all PF VPORTs */
  493. for (i = 0; i < num_vports; i++) {
  494. if (!vport_params[i].vport_wfq)
  495. continue;
  496. inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
  497. if (inc_val > QM_WFQ_MAX_INC_VAL) {
  498. DP_NOTICE(p_hwfn,
  499. "Invalid VPORT WFQ weight configuration");
  500. return -1;
  501. }
  502. /* each VPORT can have several VPORT PQ IDs for
  503. * different TCs
  504. */
  505. for (tc = 0; tc < NUM_OF_TCS; tc++) {
  506. u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
  507. if (vport_pq_id != QM_INVALID_PQ_ID) {
  508. STORE_RT_REG(p_hwfn,
  509. QM_REG_WFQVPCRD_RT_OFFSET +
  510. vport_pq_id,
  511. QM_WFQ_CRD_REG_SIGN_BIT);
  512. STORE_RT_REG(p_hwfn,
  513. QM_REG_WFQVPWEIGHT_RT_OFFSET +
  514. vport_pq_id, inc_val);
  515. }
  516. }
  517. }
  518. return 0;
  519. }
  520. static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
  521. u8 start_vport,
  522. u8 num_vports,
  523. struct init_qm_vport_params *vport_params)
  524. {
  525. u8 i, vport_id;
  526. /* go over all PF VPORTs */
  527. for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
  528. u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
  529. if (inc_val > QM_RL_MAX_INC_VAL) {
  530. DP_NOTICE(p_hwfn,
  531. "Invalid VPORT rate-limit configuration");
  532. return -1;
  533. }
  534. STORE_RT_REG(p_hwfn,
  535. QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
  536. QM_RL_CRD_REG_SIGN_BIT);
  537. STORE_RT_REG(p_hwfn,
  538. QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
  539. QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
  540. STORE_RT_REG(p_hwfn,
  541. QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
  542. inc_val);
  543. }
  544. return 0;
  545. }
  546. static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
  547. struct qed_ptt *p_ptt)
  548. {
  549. u32 reg_val, i;
  550. for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
  551. i++) {
  552. udelay(QM_STOP_CMD_POLL_PERIOD_US);
  553. reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
  554. }
  555. /* check if timeout while waiting for SDM command ready */
  556. if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
  557. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  558. "Timeout when waiting for QM SDM command ready signal\n");
  559. return false;
  560. }
  561. return true;
  562. }
  563. static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
  564. struct qed_ptt *p_ptt,
  565. u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
  566. {
  567. if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
  568. return false;
  569. qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
  570. qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
  571. qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
  572. qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
  573. qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
  574. return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
  575. }
  576. /******************** INTERFACE IMPLEMENTATION *********************/
  577. u32 qed_qm_pf_mem_size(u8 pf_id,
  578. u32 num_pf_cids,
  579. u32 num_vf_cids,
  580. u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
  581. {
  582. return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
  583. QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
  584. QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
  585. }
  586. int qed_qm_common_rt_init(
  587. struct qed_hwfn *p_hwfn,
  588. struct qed_qm_common_rt_init_params *p_params)
  589. {
  590. /* init AFullOprtnstcCrdMask */
  591. u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
  592. QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
  593. (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
  594. (p_params->pf_wfq_en <<
  595. QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
  596. (p_params->vport_wfq_en <<
  597. QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
  598. (p_params->pf_rl_en <<
  599. QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
  600. (p_params->vport_rl_en <<
  601. QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
  602. (QM_OPPOR_FW_STOP_DEF <<
  603. QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
  604. (QM_OPPOR_PQ_EMPTY_DEF <<
  605. QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
  606. STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
  607. qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
  608. qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
  609. qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
  610. qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
  611. qed_cmdq_lines_rt_init(p_hwfn,
  612. p_params->max_ports_per_engine,
  613. p_params->max_phys_tcs_per_port,
  614. p_params->port_params);
  615. qed_btb_blocks_rt_init(p_hwfn,
  616. p_params->max_ports_per_engine,
  617. p_params->max_phys_tcs_per_port,
  618. p_params->port_params);
  619. return 0;
  620. }
  621. int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
  622. struct qed_ptt *p_ptt,
  623. struct qed_qm_pf_rt_init_params *p_params)
  624. {
  625. struct init_qm_vport_params *vport_params = p_params->vport_params;
  626. u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
  627. p_params->num_tids) *
  628. QM_OTHER_PQS_PER_PF;
  629. u8 tc, i;
  630. /* clear first Tx PQ ID array for each VPORT */
  631. for (i = 0; i < p_params->num_vports; i++)
  632. for (tc = 0; tc < NUM_OF_TCS; tc++)
  633. vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
  634. /* map Other PQs (if any) */
  635. qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
  636. p_params->num_pf_cids, p_params->num_tids, 0);
  637. /* map Tx PQs */
  638. qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
  639. if (p_params->pf_wfq)
  640. if (qed_pf_wfq_rt_init(p_hwfn, p_params))
  641. return -1;
  642. if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
  643. return -1;
  644. if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
  645. return -1;
  646. if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
  647. p_params->num_vports, vport_params))
  648. return -1;
  649. return 0;
  650. }
  651. int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
  652. struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
  653. {
  654. u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
  655. if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
  656. DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
  657. return -1;
  658. }
  659. qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
  660. return 0;
  661. }
  662. int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
  663. struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl)
  664. {
  665. u32 inc_val = QM_RL_INC_VAL(pf_rl);
  666. if (inc_val > QM_RL_MAX_INC_VAL) {
  667. DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
  668. return -1;
  669. }
  670. qed_wr(p_hwfn, p_ptt,
  671. QM_REG_RLPFCRD + pf_id * 4,
  672. QM_RL_CRD_REG_SIGN_BIT);
  673. qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
  674. return 0;
  675. }
  676. int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
  677. struct qed_ptt *p_ptt,
  678. u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
  679. {
  680. u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
  681. u8 tc;
  682. if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
  683. DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration");
  684. return -1;
  685. }
  686. for (tc = 0; tc < NUM_OF_TCS; tc++) {
  687. u16 vport_pq_id = first_tx_pq_id[tc];
  688. if (vport_pq_id != QM_INVALID_PQ_ID)
  689. qed_wr(p_hwfn, p_ptt,
  690. QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
  691. inc_val);
  692. }
  693. return 0;
  694. }
  695. int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
  696. struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl)
  697. {
  698. u32 inc_val = QM_RL_INC_VAL(vport_rl);
  699. if (inc_val > QM_RL_MAX_INC_VAL) {
  700. DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
  701. return -1;
  702. }
  703. qed_wr(p_hwfn, p_ptt,
  704. QM_REG_RLGLBLCRD + vport_id * 4,
  705. QM_RL_CRD_REG_SIGN_BIT);
  706. qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
  707. return 0;
  708. }
  709. bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
  710. struct qed_ptt *p_ptt,
  711. bool is_release_cmd,
  712. bool is_tx_pq, u16 start_pq, u16 num_pqs)
  713. {
  714. u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
  715. u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
  716. /* set command's PQ type */
  717. QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
  718. for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
  719. /* set PQ bit in mask (stop command only) */
  720. if (!is_release_cmd)
  721. pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
  722. /* if last PQ or end of PQ mask, write command */
  723. if ((pq_id == last_pq) ||
  724. (pq_id % QM_STOP_PQ_MASK_WIDTH ==
  725. (QM_STOP_PQ_MASK_WIDTH - 1))) {
  726. QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
  727. PAUSE_MASK, pq_mask);
  728. QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
  729. GROUP_ID,
  730. pq_id / QM_STOP_PQ_MASK_WIDTH);
  731. if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
  732. cmd_arr[0], cmd_arr[1]))
  733. return false;
  734. pq_mask = 0;
  735. }
  736. }
  737. return true;
  738. }
  739. static void
  740. qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
  741. {
  742. if (enable)
  743. set_bit(bit, var);
  744. else
  745. clear_bit(bit, var);
  746. }
  747. #define PRS_ETH_TUNN_FIC_FORMAT -188897008
  748. void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
  749. struct qed_ptt *p_ptt, u16 dest_port)
  750. {
  751. qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
  752. qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
  753. qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
  754. }
  755. void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
  756. struct qed_ptt *p_ptt, bool vxlan_enable)
  757. {
  758. unsigned long reg_val = 0;
  759. u8 shift;
  760. reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
  761. shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
  762. qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
  763. qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
  764. if (reg_val)
  765. qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
  766. PRS_ETH_TUNN_FIC_FORMAT);
  767. reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
  768. shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
  769. qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
  770. qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
  771. qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
  772. vxlan_enable ? 1 : 0);
  773. }
  774. void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
  775. bool eth_gre_enable, bool ip_gre_enable)
  776. {
  777. unsigned long reg_val = 0;
  778. u8 shift;
  779. reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
  780. shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
  781. qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
  782. shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
  783. qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
  784. qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
  785. if (reg_val)
  786. qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
  787. PRS_ETH_TUNN_FIC_FORMAT);
  788. reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
  789. shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
  790. qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
  791. shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
  792. qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
  793. qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
  794. qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
  795. eth_gre_enable ? 1 : 0);
  796. qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
  797. ip_gre_enable ? 1 : 0);
  798. }
  799. void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
  800. struct qed_ptt *p_ptt, u16 dest_port)
  801. {
  802. qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
  803. qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
  804. qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
  805. }
  806. void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
  807. struct qed_ptt *p_ptt,
  808. bool eth_geneve_enable, bool ip_geneve_enable)
  809. {
  810. unsigned long reg_val = 0;
  811. u8 shift;
  812. reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
  813. shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
  814. qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);
  815. shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
  816. qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);
  817. qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
  818. if (reg_val)
  819. qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
  820. PRS_ETH_TUNN_FIC_FORMAT);
  821. qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
  822. eth_geneve_enable ? 1 : 0);
  823. qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
  824. /* comp ver */
  825. reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
  826. qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
  827. qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
  828. qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
  829. /* EDPM with geneve tunnel not supported in BB_B0 */
  830. if (QED_IS_BB_B0(p_hwfn->cdev))
  831. return;
  832. qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
  833. eth_geneve_enable ? 1 : 0);
  834. qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
  835. ip_geneve_enable ? 1 : 0);
  836. }