qed_init_fw_funcs.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/types.h>
  9. #include <linux/delay.h>
  10. #include <linux/kernel.h>
  11. #include <linux/slab.h>
  12. #include <linux/string.h>
  13. #include "qed_hsi.h"
  14. #include "qed_hw.h"
  15. #include "qed_init_ops.h"
  16. #include "qed_reg_addr.h"
  17. enum cminterface {
  18. MCM_SEC,
  19. MCM_PRI,
  20. UCM_SEC,
  21. UCM_PRI,
  22. TCM_SEC,
  23. TCM_PRI,
  24. YCM_SEC,
  25. YCM_PRI,
  26. XCM_SEC,
  27. XCM_PRI,
  28. NUM_OF_CM_INTERFACES
  29. };
  30. /* general constants */
  31. #define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
  32. #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
  33. QM_PQ_ELEMENT_SIZE, \
  34. 0x1000) : 0)
  35. #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
  36. 0x100) - 1 : 0)
  37. #define QM_INVALID_PQ_ID 0xffff
  38. /* feature enable */
  39. #define QM_BYPASS_EN 1
  40. #define QM_BYTE_CRD_EN 1
  41. /* other PQ constants */
  42. #define QM_OTHER_PQS_PER_PF 4
  43. /* WFQ constants */
  44. #define QM_WFQ_UPPER_BOUND 6250000
  45. #define QM_WFQ_VP_PQ_VOQ_SHIFT 0
  46. #define QM_WFQ_VP_PQ_PF_SHIFT 5
  47. #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
  48. #define QM_WFQ_MAX_INC_VAL 4375000
  49. #define QM_WFQ_INIT_CRD(inc_val) (2 * (inc_val))
  50. /* RL constants */
  51. #define QM_RL_UPPER_BOUND 6250000
  52. #define QM_RL_PERIOD 5 /* in us */
  53. #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
  54. #define QM_RL_INC_VAL(rate) max_t(u32, \
  55. (((rate ? rate : 1000000) \
  56. * QM_RL_PERIOD) / 8), 1)
  57. #define QM_RL_MAX_INC_VAL 4375000
  58. /* AFullOprtnstcCrdMask constants */
  59. #define QM_OPPOR_LINE_VOQ_DEF 1
  60. #define QM_OPPOR_FW_STOP_DEF 0
  61. #define QM_OPPOR_PQ_EMPTY_DEF 1
  62. #define EAGLE_WORKAROUND_TC 7
  63. /* Command Queue constants */
  64. #define PBF_CMDQ_PURE_LB_LINES 150
  65. #define PBF_CMDQ_EAGLE_WORKAROUND_LINES 8
  66. #define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \
  67. PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
  68. (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
  69. PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
  70. #define PBF_BTB_GUARANTEED_RT_OFFSET(voq) ( \
  71. PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
  72. (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
  73. PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
  74. #define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - \
  75. 4) * \
  76. 2) | QM_LINE_CRD_REG_SIGN_BIT)
  77. /* BTB: blocks constants (block size = 256B) */
  78. #define BTB_JUMBO_PKT_BLOCKS 38
  79. #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
  80. #define BTB_EAGLE_WORKAROUND_BLOCKS 4
  81. #define BTB_PURE_LB_FACTOR 10
  82. #define BTB_PURE_LB_RATIO 7
  83. /* QM stop command constants */
  84. #define QM_STOP_PQ_MASK_WIDTH 32
  85. #define QM_STOP_CMD_ADDR 0x2
  86. #define QM_STOP_CMD_STRUCT_SIZE 2
  87. #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
  88. #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
  89. #define QM_STOP_CMD_PAUSE_MASK_MASK -1
  90. #define QM_STOP_CMD_GROUP_ID_OFFSET 1
  91. #define QM_STOP_CMD_GROUP_ID_SHIFT 16
  92. #define QM_STOP_CMD_GROUP_ID_MASK 15
  93. #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
  94. #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
  95. #define QM_STOP_CMD_PQ_TYPE_MASK 1
  96. #define QM_STOP_CMD_MAX_POLL_COUNT 100
  97. #define QM_STOP_CMD_POLL_PERIOD_US 500
  98. /* QM command macros */
  99. #define QM_CMD_STRUCT_SIZE(cmd) cmd ## \
  100. _STRUCT_SIZE
  101. #define QM_CMD_SET_FIELD(var, cmd, field, \
  102. value) SET_FIELD(var[cmd ## _ ## field ## \
  103. _OFFSET], \
  104. cmd ## _ ## field, \
  105. value)
  106. /* QM: VOQ macros */
  107. #define PHYS_VOQ(port, tc, max_phy_tcs_pr_port) ((port) * \
  108. (max_phy_tcs_pr_port) \
  109. + (tc))
  110. #define LB_VOQ(port) ( \
  111. MAX_PHYS_VOQS + (port))
  112. #define VOQ(port, tc, max_phy_tcs_pr_port) \
  113. ((tc) < \
  114. LB_TC ? PHYS_VOQ(port, \
  115. tc, \
  116. max_phy_tcs_pr_port) \
  117. : LB_VOQ(port))
  118. /******************** INTERNAL IMPLEMENTATION *********************/
  119. /* Prepare PF RL enable/disable runtime init values */
  120. static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn,
  121. bool pf_rl_en)
  122. {
  123. STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
  124. if (pf_rl_en) {
  125. /* enable RLs for all VOQs */
  126. STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
  127. (1 << MAX_NUM_VOQS) - 1);
  128. /* write RL period */
  129. STORE_RT_REG(p_hwfn,
  130. QM_REG_RLPFPERIOD_RT_OFFSET,
  131. QM_RL_PERIOD_CLK_25M);
  132. STORE_RT_REG(p_hwfn,
  133. QM_REG_RLPFPERIODTIMER_RT_OFFSET,
  134. QM_RL_PERIOD_CLK_25M);
  135. /* set credit threshold for QM bypass flow */
  136. if (QM_BYPASS_EN)
  137. STORE_RT_REG(p_hwfn,
  138. QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
  139. QM_RL_UPPER_BOUND);
  140. }
  141. }
  142. /* Prepare PF WFQ enable/disable runtime init values */
  143. static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn,
  144. bool pf_wfq_en)
  145. {
  146. STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
  147. /* set credit threshold for QM bypass flow */
  148. if (pf_wfq_en && QM_BYPASS_EN)
  149. STORE_RT_REG(p_hwfn,
  150. QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
  151. QM_WFQ_UPPER_BOUND);
  152. }
  153. /* Prepare VPORT RL enable/disable runtime init values */
  154. static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn,
  155. bool vport_rl_en)
  156. {
  157. STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
  158. vport_rl_en ? 1 : 0);
  159. if (vport_rl_en) {
  160. /* write RL period (use timer 0 only) */
  161. STORE_RT_REG(p_hwfn,
  162. QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
  163. QM_RL_PERIOD_CLK_25M);
  164. STORE_RT_REG(p_hwfn,
  165. QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
  166. QM_RL_PERIOD_CLK_25M);
  167. /* set credit threshold for QM bypass flow */
  168. if (QM_BYPASS_EN)
  169. STORE_RT_REG(p_hwfn,
  170. QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
  171. QM_RL_UPPER_BOUND);
  172. }
  173. }
  174. /* Prepare VPORT WFQ enable/disable runtime init values */
  175. static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn,
  176. bool vport_wfq_en)
  177. {
  178. STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
  179. vport_wfq_en ? 1 : 0);
  180. /* set credit threshold for QM bypass flow */
  181. if (vport_wfq_en && QM_BYPASS_EN)
  182. STORE_RT_REG(p_hwfn,
  183. QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
  184. QM_WFQ_UPPER_BOUND);
  185. }
  186. /* Prepare runtime init values to allocate PBF command queue lines for
  187. * the specified VOQ
  188. */
  189. static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
  190. u8 voq,
  191. u16 cmdq_lines)
  192. {
  193. u32 qm_line_crd;
  194. /* In A0 - Limit the size of pbf queue so that only 511 commands with
  195. * the minimum size of 4 (FCoE minimum size)
  196. */
  197. bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
  198. if (is_bb_a0)
  199. cmdq_lines = min_t(u32, cmdq_lines, 1022);
  200. qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
  201. OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
  202. (u32)cmdq_lines);
  203. STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
  204. STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
  205. qm_line_crd);
  206. }
  207. /* Prepare runtime init values to allocate PBF command queue lines. */
  208. static void qed_cmdq_lines_rt_init(
  209. struct qed_hwfn *p_hwfn,
  210. u8 max_ports_per_engine,
  211. u8 max_phys_tcs_per_port,
  212. struct init_qm_port_params port_params[MAX_NUM_PORTS])
  213. {
  214. u8 tc, voq, port_id;
  215. /* clear PBF lines for all VOQs */
  216. for (voq = 0; voq < MAX_NUM_VOQS; voq++)
  217. STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
  218. for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
  219. if (port_params[port_id].active) {
  220. u16 phys_lines, phys_lines_per_tc;
  221. u8 phys_tcs = port_params[port_id].num_active_phys_tcs;
  222. /* find #lines to divide between the active
  223. * physical TCs.
  224. */
  225. phys_lines = port_params[port_id].num_pbf_cmd_lines -
  226. PBF_CMDQ_PURE_LB_LINES;
  227. /* find #lines per active physical TC */
  228. phys_lines_per_tc = phys_lines / phys_tcs;
  229. /* init registers per active TC */
  230. for (tc = 0; tc < phys_tcs; tc++) {
  231. voq = PHYS_VOQ(port_id, tc,
  232. max_phys_tcs_per_port);
  233. qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
  234. phys_lines_per_tc);
  235. }
  236. /* init registers for pure LB TC */
  237. qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
  238. PBF_CMDQ_PURE_LB_LINES);
  239. }
  240. }
  241. }
  242. static void qed_btb_blocks_rt_init(
  243. struct qed_hwfn *p_hwfn,
  244. u8 max_ports_per_engine,
  245. u8 max_phys_tcs_per_port,
  246. struct init_qm_port_params port_params[MAX_NUM_PORTS])
  247. {
  248. u32 usable_blocks, pure_lb_blocks, phys_blocks;
  249. u8 tc, voq, port_id;
  250. for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
  251. u32 temp;
  252. u8 phys_tcs;
  253. if (!port_params[port_id].active)
  254. continue;
  255. phys_tcs = port_params[port_id].num_active_phys_tcs;
  256. /* subtract headroom blocks */
  257. usable_blocks = port_params[port_id].num_btb_blocks -
  258. BTB_HEADROOM_BLOCKS;
  259. /* find blocks per physical TC. use factor to avoid
  260. * floating arithmethic.
  261. */
  262. pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
  263. (phys_tcs * BTB_PURE_LB_FACTOR +
  264. BTB_PURE_LB_RATIO);
  265. pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
  266. pure_lb_blocks / BTB_PURE_LB_FACTOR);
  267. phys_blocks = (usable_blocks - pure_lb_blocks) / phys_tcs;
  268. /* init physical TCs */
  269. for (tc = 0; tc < phys_tcs; tc++) {
  270. voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port);
  271. STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
  272. phys_blocks);
  273. }
  274. /* init pure LB TC */
  275. temp = LB_VOQ(port_id);
  276. STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
  277. pure_lb_blocks);
  278. }
  279. }
  280. /* Prepare Tx PQ mapping runtime init values for the specified PF */
  281. static void qed_tx_pq_map_rt_init(
  282. struct qed_hwfn *p_hwfn,
  283. struct qed_ptt *p_ptt,
  284. struct qed_qm_pf_rt_init_params *p_params,
  285. u32 base_mem_addr_4kb)
  286. {
  287. struct init_qm_vport_params *vport_params = p_params->vport_params;
  288. u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
  289. u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
  290. u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
  291. QM_PF_QUEUE_GROUP_SIZE;
  292. bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
  293. u16 i, pq_id, pq_group;
  294. /* a bit per Tx PQ indicating if the PQ is associated with a VF */
  295. u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
  296. u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
  297. u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
  298. u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
  299. u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
  300. u32 mem_addr_4kb = base_mem_addr_4kb;
  301. /* set mapping from PQ group to PF */
  302. for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
  303. STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
  304. (u32)(p_params->pf_id));
  305. /* set PQ sizes */
  306. STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
  307. QM_PQ_SIZE_256B(p_params->num_pf_cids));
  308. STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
  309. QM_PQ_SIZE_256B(p_params->num_vf_cids));
  310. /* go over all Tx PQs */
  311. for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
  312. u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
  313. p_params->max_phys_tcs_per_port);
  314. bool is_vf_pq = (i >= p_params->num_pf_pqs);
  315. struct qm_rf_pq_map tx_pq_map;
  316. /* update first Tx PQ of VPORT/TC */
  317. u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
  318. p_params->start_vport;
  319. u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
  320. u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
  321. if (first_tx_pq_id == QM_INVALID_PQ_ID) {
  322. /* create new VP PQ */
  323. pq_ids[p_params->pq_params[i].tc_id] = pq_id;
  324. first_tx_pq_id = pq_id;
  325. /* map VP PQ to VOQ and PF */
  326. STORE_RT_REG(p_hwfn,
  327. QM_REG_WFQVPMAP_RT_OFFSET +
  328. first_tx_pq_id,
  329. (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
  330. (p_params->pf_id <<
  331. QM_WFQ_VP_PQ_PF_SHIFT));
  332. }
  333. /* fill PQ map entry */
  334. memset(&tx_pq_map, 0, sizeof(tx_pq_map));
  335. SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
  336. SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
  337. is_vf_pq ? 1 : 0);
  338. SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
  339. SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
  340. is_vf_pq ? p_params->pq_params[i].vport_id : 0);
  341. SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
  342. SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
  343. p_params->pq_params[i].wrr_group);
  344. /* write PQ map entry to CAM */
  345. STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
  346. *((u32 *)&tx_pq_map));
  347. /* set base address */
  348. STORE_RT_REG(p_hwfn,
  349. QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
  350. mem_addr_4kb);
  351. /* check if VF PQ */
  352. if (is_vf_pq) {
  353. /* if PQ is associated with a VF, add indication
  354. * to PQ VF mask
  355. */
  356. tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
  357. (1 << (pq_id % tx_pq_vf_mask_width));
  358. mem_addr_4kb += vport_pq_mem_4kb;
  359. } else {
  360. mem_addr_4kb += pq_mem_4kb;
  361. }
  362. }
  363. /* store Tx PQ VF mask to size select register */
  364. for (i = 0; i < num_tx_pq_vf_masks; i++) {
  365. if (tx_pq_vf_mask[i]) {
  366. if (is_bb_a0) {
  367. u32 curr_mask = 0, addr;
  368. addr = QM_REG_MAXPQSIZETXSEL_0 + (i * 4);
  369. if (!p_params->is_first_pf)
  370. curr_mask = qed_rd(p_hwfn, p_ptt,
  371. addr);
  372. addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
  373. STORE_RT_REG(p_hwfn, addr,
  374. curr_mask | tx_pq_vf_mask[i]);
  375. } else {
  376. u32 addr;
  377. addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
  378. STORE_RT_REG(p_hwfn, addr,
  379. tx_pq_vf_mask[i]);
  380. }
  381. }
  382. }
  383. }
  384. /* Prepare Other PQ mapping runtime init values for the specified PF */
  385. static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
  386. u8 port_id,
  387. u8 pf_id,
  388. u32 num_pf_cids,
  389. u32 num_tids,
  390. u32 base_mem_addr_4kb)
  391. {
  392. u16 i, pq_id;
  393. /* a single other PQ group is used in each PF,
  394. * where PQ group i is used in PF i.
  395. */
  396. u16 pq_group = pf_id;
  397. u32 pq_size = num_pf_cids + num_tids;
  398. u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
  399. u32 mem_addr_4kb = base_mem_addr_4kb;
  400. /* map PQ group to PF */
  401. STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
  402. (u32)(pf_id));
  403. /* set PQ sizes */
  404. STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
  405. QM_PQ_SIZE_256B(pq_size));
  406. /* set base address */
  407. for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
  408. i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
  409. STORE_RT_REG(p_hwfn,
  410. QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
  411. mem_addr_4kb);
  412. mem_addr_4kb += pq_mem_4kb;
  413. }
  414. }
  415. /* Prepare PF WFQ runtime init values for the specified PF.
  416. * Return -1 on error.
  417. */
  418. static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
  419. struct qed_qm_pf_rt_init_params *p_params)
  420. {
  421. u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
  422. u32 crd_reg_offset;
  423. u32 inc_val;
  424. u16 i;
  425. if (p_params->pf_id < MAX_NUM_PFS_BB)
  426. crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
  427. else
  428. crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET +
  429. (p_params->pf_id % MAX_NUM_PFS_BB);
  430. inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
  431. if (inc_val > QM_WFQ_MAX_INC_VAL) {
  432. DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
  433. return -1;
  434. }
  435. STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
  436. inc_val);
  437. STORE_RT_REG(p_hwfn,
  438. QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
  439. QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
  440. for (i = 0; i < num_tx_pqs; i++) {
  441. u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
  442. p_params->max_phys_tcs_per_port);
  443. OVERWRITE_RT_REG(p_hwfn,
  444. crd_reg_offset + voq * MAX_NUM_PFS_BB,
  445. QM_WFQ_INIT_CRD(inc_val) |
  446. QM_WFQ_CRD_REG_SIGN_BIT);
  447. }
  448. return 0;
  449. }
  450. /* Prepare PF RL runtime init values for the specified PF.
  451. * Return -1 on error.
  452. */
  453. static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn,
  454. u8 pf_id,
  455. u32 pf_rl)
  456. {
  457. u32 inc_val = QM_RL_INC_VAL(pf_rl);
  458. if (inc_val > QM_RL_MAX_INC_VAL) {
  459. DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
  460. return -1;
  461. }
  462. STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
  463. QM_RL_CRD_REG_SIGN_BIT);
  464. STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
  465. QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
  466. STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
  467. return 0;
  468. }
  469. /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
  470. * Return -1 on error.
  471. */
  472. static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
  473. u8 num_vports,
  474. struct init_qm_vport_params *vport_params)
  475. {
  476. u32 inc_val;
  477. u8 tc, i;
  478. /* go over all PF VPORTs */
  479. for (i = 0; i < num_vports; i++) {
  480. if (!vport_params[i].vport_wfq)
  481. continue;
  482. inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
  483. if (inc_val > QM_WFQ_MAX_INC_VAL) {
  484. DP_NOTICE(p_hwfn,
  485. "Invalid VPORT WFQ weight configuration");
  486. return -1;
  487. }
  488. /* each VPORT can have several VPORT PQ IDs for
  489. * different TCs
  490. */
  491. for (tc = 0; tc < NUM_OF_TCS; tc++) {
  492. u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
  493. if (vport_pq_id != QM_INVALID_PQ_ID) {
  494. STORE_RT_REG(p_hwfn,
  495. QM_REG_WFQVPCRD_RT_OFFSET +
  496. vport_pq_id,
  497. QM_WFQ_CRD_REG_SIGN_BIT);
  498. STORE_RT_REG(p_hwfn,
  499. QM_REG_WFQVPWEIGHT_RT_OFFSET +
  500. vport_pq_id, inc_val);
  501. }
  502. }
  503. }
  504. return 0;
  505. }
  506. static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
  507. u8 start_vport,
  508. u8 num_vports,
  509. struct init_qm_vport_params *vport_params)
  510. {
  511. u8 i, vport_id;
  512. /* go over all PF VPORTs */
  513. for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
  514. u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
  515. if (inc_val > QM_RL_MAX_INC_VAL) {
  516. DP_NOTICE(p_hwfn,
  517. "Invalid VPORT rate-limit configuration");
  518. return -1;
  519. }
  520. STORE_RT_REG(p_hwfn,
  521. QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
  522. QM_RL_CRD_REG_SIGN_BIT);
  523. STORE_RT_REG(p_hwfn,
  524. QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
  525. QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
  526. STORE_RT_REG(p_hwfn,
  527. QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
  528. inc_val);
  529. }
  530. return 0;
  531. }
  532. static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
  533. struct qed_ptt *p_ptt)
  534. {
  535. u32 reg_val, i;
  536. for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
  537. i++) {
  538. udelay(QM_STOP_CMD_POLL_PERIOD_US);
  539. reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
  540. }
  541. /* check if timeout while waiting for SDM command ready */
  542. if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
  543. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  544. "Timeout when waiting for QM SDM command ready signal\n");
  545. return false;
  546. }
  547. return true;
  548. }
  549. static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
  550. struct qed_ptt *p_ptt,
  551. u32 cmd_addr,
  552. u32 cmd_data_lsb,
  553. u32 cmd_data_msb)
  554. {
  555. if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
  556. return false;
  557. qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
  558. qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
  559. qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
  560. qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
  561. qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
  562. return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
  563. }
  564. /******************** INTERFACE IMPLEMENTATION *********************/
  565. u32 qed_qm_pf_mem_size(u8 pf_id,
  566. u32 num_pf_cids,
  567. u32 num_vf_cids,
  568. u32 num_tids,
  569. u16 num_pf_pqs,
  570. u16 num_vf_pqs)
  571. {
  572. return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
  573. QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
  574. QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
  575. }
  576. int qed_qm_common_rt_init(
  577. struct qed_hwfn *p_hwfn,
  578. struct qed_qm_common_rt_init_params *p_params)
  579. {
  580. /* init AFullOprtnstcCrdMask */
  581. u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
  582. QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
  583. (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
  584. (p_params->pf_wfq_en <<
  585. QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
  586. (p_params->vport_wfq_en <<
  587. QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
  588. (p_params->pf_rl_en <<
  589. QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
  590. (p_params->vport_rl_en <<
  591. QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
  592. (QM_OPPOR_FW_STOP_DEF <<
  593. QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
  594. (QM_OPPOR_PQ_EMPTY_DEF <<
  595. QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
  596. STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
  597. qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
  598. qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
  599. qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
  600. qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
  601. qed_cmdq_lines_rt_init(p_hwfn,
  602. p_params->max_ports_per_engine,
  603. p_params->max_phys_tcs_per_port,
  604. p_params->port_params);
  605. qed_btb_blocks_rt_init(p_hwfn,
  606. p_params->max_ports_per_engine,
  607. p_params->max_phys_tcs_per_port,
  608. p_params->port_params);
  609. return 0;
  610. }
  611. int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
  612. struct qed_ptt *p_ptt,
  613. struct qed_qm_pf_rt_init_params *p_params)
  614. {
  615. struct init_qm_vport_params *vport_params = p_params->vport_params;
  616. u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
  617. p_params->num_tids) *
  618. QM_OTHER_PQS_PER_PF;
  619. u8 tc, i;
  620. /* clear first Tx PQ ID array for each VPORT */
  621. for (i = 0; i < p_params->num_vports; i++)
  622. for (tc = 0; tc < NUM_OF_TCS; tc++)
  623. vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
  624. /* map Other PQs (if any) */
  625. qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
  626. p_params->num_pf_cids, p_params->num_tids, 0);
  627. /* map Tx PQs */
  628. qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
  629. if (p_params->pf_wfq)
  630. if (qed_pf_wfq_rt_init(p_hwfn, p_params))
  631. return -1;
  632. if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
  633. return -1;
  634. if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
  635. return -1;
  636. if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
  637. p_params->num_vports, vport_params))
  638. return -1;
  639. return 0;
  640. }
  641. int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
  642. struct qed_ptt *p_ptt,
  643. u8 pf_id,
  644. u32 pf_rl)
  645. {
  646. u32 inc_val = QM_RL_INC_VAL(pf_rl);
  647. if (inc_val > QM_RL_MAX_INC_VAL) {
  648. DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
  649. return -1;
  650. }
  651. qed_wr(p_hwfn, p_ptt,
  652. QM_REG_RLPFCRD + pf_id * 4,
  653. QM_RL_CRD_REG_SIGN_BIT);
  654. qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
  655. return 0;
  656. }
  657. int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
  658. struct qed_ptt *p_ptt,
  659. u8 vport_id,
  660. u32 vport_rl)
  661. {
  662. u32 inc_val = QM_RL_INC_VAL(vport_rl);
  663. if (inc_val > QM_RL_MAX_INC_VAL) {
  664. DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
  665. return -1;
  666. }
  667. qed_wr(p_hwfn, p_ptt,
  668. QM_REG_RLGLBLCRD + vport_id * 4,
  669. QM_RL_CRD_REG_SIGN_BIT);
  670. qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
  671. return 0;
  672. }
  673. bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
  674. struct qed_ptt *p_ptt,
  675. bool is_release_cmd,
  676. bool is_tx_pq,
  677. u16 start_pq,
  678. u16 num_pqs)
  679. {
  680. u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
  681. u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
  682. /* set command's PQ type */
  683. QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
  684. for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
  685. /* set PQ bit in mask (stop command only) */
  686. if (!is_release_cmd)
  687. pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
  688. /* if last PQ or end of PQ mask, write command */
  689. if ((pq_id == last_pq) ||
  690. (pq_id % QM_STOP_PQ_MASK_WIDTH ==
  691. (QM_STOP_PQ_MASK_WIDTH - 1))) {
  692. QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
  693. PAUSE_MASK, pq_mask);
  694. QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
  695. GROUP_ID,
  696. pq_id / QM_STOP_PQ_MASK_WIDTH);
  697. if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
  698. cmd_arr[0], cmd_arr[1]))
  699. return false;
  700. pq_mask = 0;
  701. }
  702. }
  703. return true;
  704. }