qed_init_fw_funcs.c 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/types.h>
  33. #include <linux/crc8.h>
  34. #include <linux/delay.h>
  35. #include <linux/kernel.h>
  36. #include <linux/slab.h>
  37. #include <linux/string.h>
  38. #include "qed_hsi.h"
  39. #include "qed_hw.h"
  40. #include "qed_init_ops.h"
  41. #include "qed_reg_addr.h"
  42. #define CDU_VALIDATION_DEFAULT_CFG 61
  43. static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
  44. {400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
  45. {528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
  46. {608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
  47. };
  48. static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
  49. {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
  50. };
  51. /* General constants */
  52. #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
  53. QM_PQ_ELEMENT_SIZE, \
  54. 0x1000) : 0)
  55. #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
  56. 0x100) - 1 : 0)
  57. #define QM_INVALID_PQ_ID 0xffff
  58. /* Feature enable */
  59. #define QM_BYPASS_EN 1
  60. #define QM_BYTE_CRD_EN 1
  61. /* Other PQ constants */
  62. #define QM_OTHER_PQS_PER_PF 4
  63. /* WFQ constants */
  64. /* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
  65. #define QM_WFQ_UPPER_BOUND 62500000
  66. /* Bit of VOQ in WFQ VP PQ map */
  67. #define QM_WFQ_VP_PQ_VOQ_SHIFT 0
  68. /* Bit of PF in WFQ VP PQ map */
  69. #define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
  70. /* 0x9000 = 4*9*1024 */
  71. #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
  72. /* Max WFQ increment value is 0.7 * upper bound */
  73. #define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10)
  74. /* RL constants */
  75. /* Period in us */
  76. #define QM_RL_PERIOD 5
  77. /* Period in 25MHz cycles */
  78. #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
  79. /* RL increment value - rate is specified in mbps */
  80. #define QM_RL_INC_VAL(rate) ({ \
  81. typeof(rate) __rate = (rate); \
  82. max_t(u32, \
  83. (u32)(((__rate ? __rate : 1000000) * QM_RL_PERIOD * 101) / \
  84. (8 * 100)), \
  85. 1); })
  86. /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
  87. #define QM_PF_RL_UPPER_BOUND 62500000
  88. /* Max PF RL increment value is 0.7 * upper bound */
  89. #define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
  90. /* Vport RL Upper bound, link speed is in Mpbs */
  91. #define QM_VP_RL_UPPER_BOUND(speed) ((u32)max_t(u32, \
  92. QM_RL_INC_VAL(speed), \
  93. 9700 + 1000))
  94. /* Max Vport RL increment value is the Vport RL upper bound */
  95. #define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed)
  96. /* Vport RL credit threshold in case of QM bypass */
  97. #define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1)
  98. /* AFullOprtnstcCrdMask constants */
  99. #define QM_OPPOR_LINE_VOQ_DEF 1
  100. #define QM_OPPOR_FW_STOP_DEF 0
  101. #define QM_OPPOR_PQ_EMPTY_DEF 1
  102. /* Command Queue constants */
  103. /* Pure LB CmdQ lines (+spare) */
  104. #define PBF_CMDQ_PURE_LB_LINES 150
  105. #define PBF_CMDQ_LINES_E5_RSVD_RATIO 8
  106. #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
  107. (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
  108. (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
  109. PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
  110. #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
  111. (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
  112. (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
  113. PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
  114. #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
  115. ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
  116. /* BTB: blocks constants (block size = 256B) */
  117. /* 256B blocks in 9700B packet */
  118. #define BTB_JUMBO_PKT_BLOCKS 38
  119. /* Headroom per-port */
  120. #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
  121. #define BTB_PURE_LB_FACTOR 10
  122. /* Factored (hence really 0.7) */
  123. #define BTB_PURE_LB_RATIO 7
  124. /* QM stop command constants */
  125. #define QM_STOP_PQ_MASK_WIDTH 32
  126. #define QM_STOP_CMD_ADDR 2
  127. #define QM_STOP_CMD_STRUCT_SIZE 2
  128. #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
  129. #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
  130. #define QM_STOP_CMD_PAUSE_MASK_MASK -1
  131. #define QM_STOP_CMD_GROUP_ID_OFFSET 1
  132. #define QM_STOP_CMD_GROUP_ID_SHIFT 16
  133. #define QM_STOP_CMD_GROUP_ID_MASK 15
  134. #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
  135. #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
  136. #define QM_STOP_CMD_PQ_TYPE_MASK 1
  137. #define QM_STOP_CMD_MAX_POLL_COUNT 100
  138. #define QM_STOP_CMD_POLL_PERIOD_US 500
  139. /* QM command macros */
  140. #define QM_CMD_STRUCT_SIZE(cmd) cmd ## _STRUCT_SIZE
  141. #define QM_CMD_SET_FIELD(var, cmd, field, value) \
  142. SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \
  143. cmd ## _ ## field, \
  144. value)
  145. #define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, vp_pq_id, rl_id, \
  146. ext_voq, wrr) \
  147. do { \
  148. typeof(map) __map; \
  149. memset(&__map, 0, sizeof(__map)); \
  150. SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _PQ_VALID, 1); \
  151. SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_VALID, \
  152. rl_valid); \
  153. SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VP_PQ_ID, \
  154. vp_pq_id); \
  155. SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_ID, rl_id); \
  156. SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VOQ, ext_voq); \
  157. SET_FIELD(__map.reg, \
  158. QM_RF_PQ_MAP_ ## chip ## _WRR_WEIGHT_GROUP, wrr); \
  159. STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
  160. *((u32 *)&__map)); \
  161. (map) = __map; \
  162. } while (0)
  163. #define WRITE_PQ_INFO_TO_RAM 1
  164. #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
  165. (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \
  166. ((rl_valid) << 22) | ((rl) << 24))
  167. #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
  168. (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
  169. /******************** INTERNAL IMPLEMENTATION *********************/
  170. /* Returns the external VOQ number */
  171. static u8 qed_get_ext_voq(struct qed_hwfn *p_hwfn,
  172. u8 port_id, u8 tc, u8 max_phys_tcs_per_port)
  173. {
  174. if (tc == PURE_LB_TC)
  175. return NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + port_id;
  176. else
  177. return port_id * max_phys_tcs_per_port + tc;
  178. }
  179. /* Prepare PF RL enable/disable runtime init values */
  180. static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
  181. {
  182. STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
  183. if (pf_rl_en) {
  184. u8 num_ext_voqs = MAX_NUM_VOQS_E4;
  185. u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
  186. /* Enable RLs for all VOQs */
  187. STORE_RT_REG(p_hwfn,
  188. QM_REG_RLPFVOQENABLE_RT_OFFSET,
  189. (u32)voq_bit_mask);
  190. if (num_ext_voqs >= 32)
  191. STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
  192. (u32)(voq_bit_mask >> 32));
  193. /* Write RL period */
  194. STORE_RT_REG(p_hwfn,
  195. QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
  196. STORE_RT_REG(p_hwfn,
  197. QM_REG_RLPFPERIODTIMER_RT_OFFSET,
  198. QM_RL_PERIOD_CLK_25M);
  199. /* Set credit threshold for QM bypass flow */
  200. if (QM_BYPASS_EN)
  201. STORE_RT_REG(p_hwfn,
  202. QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
  203. QM_PF_RL_UPPER_BOUND);
  204. }
  205. }
  206. /* Prepare PF WFQ enable/disable runtime init values */
  207. static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
  208. {
  209. STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
  210. /* Set credit threshold for QM bypass flow */
  211. if (pf_wfq_en && QM_BYPASS_EN)
  212. STORE_RT_REG(p_hwfn,
  213. QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
  214. QM_WFQ_UPPER_BOUND);
  215. }
  216. /* Prepare VPORT RL enable/disable runtime init values */
  217. static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
  218. {
  219. STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
  220. vport_rl_en ? 1 : 0);
  221. if (vport_rl_en) {
  222. /* Write RL period (use timer 0 only) */
  223. STORE_RT_REG(p_hwfn,
  224. QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
  225. QM_RL_PERIOD_CLK_25M);
  226. STORE_RT_REG(p_hwfn,
  227. QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
  228. QM_RL_PERIOD_CLK_25M);
  229. /* Set credit threshold for QM bypass flow */
  230. if (QM_BYPASS_EN)
  231. STORE_RT_REG(p_hwfn,
  232. QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
  233. QM_VP_RL_BYPASS_THRESH_SPEED);
  234. }
  235. }
  236. /* Prepare VPORT WFQ enable/disable runtime init values */
  237. static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
  238. {
  239. STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
  240. vport_wfq_en ? 1 : 0);
  241. /* Set credit threshold for QM bypass flow */
  242. if (vport_wfq_en && QM_BYPASS_EN)
  243. STORE_RT_REG(p_hwfn,
  244. QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
  245. QM_WFQ_UPPER_BOUND);
  246. }
  247. /* Prepare runtime init values to allocate PBF command queue lines for
  248. * the specified VOQ.
  249. */
  250. static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
  251. u8 ext_voq, u16 cmdq_lines)
  252. {
  253. u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
  254. OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
  255. (u32)cmdq_lines);
  256. STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
  257. qm_line_crd);
  258. STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
  259. qm_line_crd);
  260. }
  261. /* Prepare runtime init values to allocate PBF command queue lines. */
  262. static void qed_cmdq_lines_rt_init(
  263. struct qed_hwfn *p_hwfn,
  264. u8 max_ports_per_engine,
  265. u8 max_phys_tcs_per_port,
  266. struct init_qm_port_params port_params[MAX_NUM_PORTS])
  267. {
  268. u8 tc, ext_voq, port_id, num_tcs_in_port;
  269. u8 num_ext_voqs = MAX_NUM_VOQS_E4;
  270. /* Clear PBF lines of all VOQs */
  271. for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
  272. STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
  273. for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
  274. u16 phys_lines, phys_lines_per_tc;
  275. if (!port_params[port_id].active)
  276. continue;
  277. /* Find number of command queue lines to divide between the
  278. * active physical TCs. In E5, 1/8 of the lines are reserved.
  279. * the lines for pure LB TC are subtracted.
  280. */
  281. phys_lines = port_params[port_id].num_pbf_cmd_lines;
  282. phys_lines -= PBF_CMDQ_PURE_LB_LINES;
  283. /* Find #lines per active physical TC */
  284. num_tcs_in_port = 0;
  285. for (tc = 0; tc < max_phys_tcs_per_port; tc++)
  286. if (((port_params[port_id].active_phys_tcs >>
  287. tc) & 0x1) == 1)
  288. num_tcs_in_port++;
  289. phys_lines_per_tc = phys_lines / num_tcs_in_port;
  290. /* Init registers per active TC */
  291. for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
  292. ext_voq = qed_get_ext_voq(p_hwfn,
  293. port_id,
  294. tc, max_phys_tcs_per_port);
  295. if (((port_params[port_id].active_phys_tcs >>
  296. tc) & 0x1) == 1)
  297. qed_cmdq_lines_voq_rt_init(p_hwfn,
  298. ext_voq,
  299. phys_lines_per_tc);
  300. }
  301. /* Init registers for pure LB TC */
  302. ext_voq = qed_get_ext_voq(p_hwfn,
  303. port_id,
  304. PURE_LB_TC, max_phys_tcs_per_port);
  305. qed_cmdq_lines_voq_rt_init(p_hwfn,
  306. ext_voq, PBF_CMDQ_PURE_LB_LINES);
  307. }
  308. }
  309. static void qed_btb_blocks_rt_init(
  310. struct qed_hwfn *p_hwfn,
  311. u8 max_ports_per_engine,
  312. u8 max_phys_tcs_per_port,
  313. struct init_qm_port_params port_params[MAX_NUM_PORTS])
  314. {
  315. u32 usable_blocks, pure_lb_blocks, phys_blocks;
  316. u8 tc, ext_voq, port_id, num_tcs_in_port;
  317. for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
  318. if (!port_params[port_id].active)
  319. continue;
  320. /* Subtract headroom blocks */
  321. usable_blocks = port_params[port_id].num_btb_blocks -
  322. BTB_HEADROOM_BLOCKS;
  323. /* Find blocks per physical TC. Use factor to avoid floating
  324. * arithmethic.
  325. */
  326. num_tcs_in_port = 0;
  327. for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
  328. if (((port_params[port_id].active_phys_tcs >>
  329. tc) & 0x1) == 1)
  330. num_tcs_in_port++;
  331. pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
  332. (num_tcs_in_port * BTB_PURE_LB_FACTOR +
  333. BTB_PURE_LB_RATIO);
  334. pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
  335. pure_lb_blocks / BTB_PURE_LB_FACTOR);
  336. phys_blocks = (usable_blocks - pure_lb_blocks) /
  337. num_tcs_in_port;
  338. /* Init physical TCs */
  339. for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
  340. if (((port_params[port_id].active_phys_tcs >>
  341. tc) & 0x1) == 1) {
  342. ext_voq =
  343. qed_get_ext_voq(p_hwfn,
  344. port_id,
  345. tc,
  346. max_phys_tcs_per_port);
  347. STORE_RT_REG(p_hwfn,
  348. PBF_BTB_GUARANTEED_RT_OFFSET
  349. (ext_voq), phys_blocks);
  350. }
  351. }
  352. /* Init pure LB TC */
  353. ext_voq = qed_get_ext_voq(p_hwfn,
  354. port_id,
  355. PURE_LB_TC, max_phys_tcs_per_port);
  356. STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
  357. pure_lb_blocks);
  358. }
  359. }
  360. /* Prepare Tx PQ mapping runtime init values for the specified PF */
  361. static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
  362. struct qed_ptt *p_ptt,
  363. struct qed_qm_pf_rt_init_params *p_params,
  364. u32 base_mem_addr_4kb)
  365. {
  366. u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
  367. struct init_qm_vport_params *vport_params = p_params->vport_params;
  368. u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
  369. u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
  370. struct init_qm_pq_params *pq_params = p_params->pq_params;
  371. u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
  372. num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
  373. first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
  374. last_pq_group = (p_params->start_pq + num_pqs - 1) /
  375. QM_PF_QUEUE_GROUP_SIZE;
  376. pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
  377. vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
  378. mem_addr_4kb = base_mem_addr_4kb;
  379. /* Set mapping from PQ group to PF */
  380. for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
  381. STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
  382. (u32)(p_params->pf_id));
  383. /* Set PQ sizes */
  384. STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
  385. QM_PQ_SIZE_256B(p_params->num_pf_cids));
  386. STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
  387. QM_PQ_SIZE_256B(p_params->num_vf_cids));
  388. /* Go over all Tx PQs */
  389. for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
  390. u8 ext_voq, vport_id_in_pf, tc_id = pq_params[i].tc_id;
  391. u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
  392. struct qm_rf_pq_map_e4 tx_pq_map;
  393. bool is_vf_pq, rl_valid;
  394. u16 *p_first_tx_pq_id;
  395. ext_voq = qed_get_ext_voq(p_hwfn,
  396. pq_params[i].port_id,
  397. tc_id,
  398. p_params->max_phys_tcs_per_port);
  399. is_vf_pq = (i >= p_params->num_pf_pqs);
  400. rl_valid = pq_params[i].rl_valid > 0;
  401. /* Update first Tx PQ of VPORT/TC */
  402. vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport;
  403. p_first_tx_pq_id =
  404. &vport_params[vport_id_in_pf].first_tx_pq_id[tc_id];
  405. if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) {
  406. u32 map_val =
  407. (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
  408. (p_params->pf_id << QM_WFQ_VP_PQ_PF_E4_SHIFT);
  409. /* Create new VP PQ */
  410. *p_first_tx_pq_id = pq_id;
  411. /* Map VP PQ to VOQ and PF */
  412. STORE_RT_REG(p_hwfn,
  413. QM_REG_WFQVPMAP_RT_OFFSET +
  414. *p_first_tx_pq_id,
  415. map_val);
  416. }
  417. /* Check RL ID */
  418. if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) {
  419. DP_NOTICE(p_hwfn,
  420. "Invalid VPORT ID for rate limiter configuration\n");
  421. rl_valid = false;
  422. }
  423. /* Prepare PQ map entry */
  424. QM_INIT_TX_PQ_MAP(p_hwfn,
  425. tx_pq_map,
  426. E4,
  427. pq_id,
  428. rl_valid ? 1 : 0,
  429. *p_first_tx_pq_id,
  430. rl_valid ? pq_params[i].vport_id : 0,
  431. ext_voq, pq_params[i].wrr_group);
  432. /* Set PQ base address */
  433. STORE_RT_REG(p_hwfn,
  434. QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
  435. mem_addr_4kb);
  436. /* Clear PQ pointer table entry (64 bit) */
  437. if (p_params->is_pf_loading)
  438. for (j = 0; j < 2; j++)
  439. STORE_RT_REG(p_hwfn,
  440. QM_REG_PTRTBLTX_RT_OFFSET +
  441. (pq_id * 2) + j, 0);
  442. /* Write PQ info to RAM */
  443. if (WRITE_PQ_INFO_TO_RAM != 0) {
  444. u32 pq_info = 0;
  445. pq_info = PQ_INFO_ELEMENT(*p_first_tx_pq_id,
  446. p_params->pf_id,
  447. tc_id,
  448. pq_params[i].port_id,
  449. rl_valid ? 1 : 0,
  450. rl_valid ?
  451. pq_params[i].vport_id : 0);
  452. qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
  453. pq_info);
  454. }
  455. /* If VF PQ, add indication to PQ VF mask */
  456. if (is_vf_pq) {
  457. tx_pq_vf_mask[pq_id /
  458. QM_PF_QUEUE_GROUP_SIZE] |=
  459. BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
  460. mem_addr_4kb += vport_pq_mem_4kb;
  461. } else {
  462. mem_addr_4kb += pq_mem_4kb;
  463. }
  464. }
  465. /* Store Tx PQ VF mask to size select register */
  466. for (i = 0; i < num_tx_pq_vf_masks; i++)
  467. if (tx_pq_vf_mask[i])
  468. STORE_RT_REG(p_hwfn,
  469. QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i,
  470. tx_pq_vf_mask[i]);
  471. }
  472. /* Prepare Other PQ mapping runtime init values for the specified PF */
  473. static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
  474. u8 pf_id,
  475. bool is_pf_loading,
  476. u32 num_pf_cids,
  477. u32 num_tids, u32 base_mem_addr_4kb)
  478. {
  479. u32 pq_size, pq_mem_4kb, mem_addr_4kb;
  480. u16 i, j, pq_id, pq_group;
  481. /* A single other PQ group is used in each PF, where PQ group i is used
  482. * in PF i.
  483. */
  484. pq_group = pf_id;
  485. pq_size = num_pf_cids + num_tids;
  486. pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
  487. mem_addr_4kb = base_mem_addr_4kb;
  488. /* Map PQ group to PF */
  489. STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
  490. (u32)(pf_id));
  491. /* Set PQ sizes */
  492. STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
  493. QM_PQ_SIZE_256B(pq_size));
  494. for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
  495. i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
  496. /* Set PQ base address */
  497. STORE_RT_REG(p_hwfn,
  498. QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
  499. mem_addr_4kb);
  500. /* Clear PQ pointer table entry */
  501. if (is_pf_loading)
  502. for (j = 0; j < 2; j++)
  503. STORE_RT_REG(p_hwfn,
  504. QM_REG_PTRTBLOTHER_RT_OFFSET +
  505. (pq_id * 2) + j, 0);
  506. mem_addr_4kb += pq_mem_4kb;
  507. }
  508. }
  509. /* Prepare PF WFQ runtime init values for the specified PF.
  510. * Return -1 on error.
  511. */
  512. static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
  513. struct qed_qm_pf_rt_init_params *p_params)
  514. {
  515. u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
  516. struct init_qm_pq_params *pq_params = p_params->pq_params;
  517. u32 inc_val, crd_reg_offset;
  518. u8 ext_voq;
  519. u16 i;
  520. inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
  521. if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
  522. DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
  523. return -1;
  524. }
  525. for (i = 0; i < num_tx_pqs; i++) {
  526. ext_voq = qed_get_ext_voq(p_hwfn,
  527. pq_params[i].port_id,
  528. pq_params[i].tc_id,
  529. p_params->max_phys_tcs_per_port);
  530. crd_reg_offset =
  531. (p_params->pf_id < MAX_NUM_PFS_BB ?
  532. QM_REG_WFQPFCRD_RT_OFFSET :
  533. QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
  534. ext_voq * MAX_NUM_PFS_BB +
  535. (p_params->pf_id % MAX_NUM_PFS_BB);
  536. OVERWRITE_RT_REG(p_hwfn,
  537. crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
  538. }
  539. STORE_RT_REG(p_hwfn,
  540. QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
  541. QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
  542. STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
  543. inc_val);
  544. return 0;
  545. }
  546. /* Prepare PF RL runtime init values for the specified PF.
  547. * Return -1 on error.
  548. */
  549. static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
  550. {
  551. u32 inc_val = QM_RL_INC_VAL(pf_rl);
  552. if (inc_val > QM_PF_RL_MAX_INC_VAL) {
  553. DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
  554. return -1;
  555. }
  556. STORE_RT_REG(p_hwfn,
  557. QM_REG_RLPFCRD_RT_OFFSET + pf_id,
  558. (u32)QM_RL_CRD_REG_SIGN_BIT);
  559. STORE_RT_REG(p_hwfn,
  560. QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
  561. QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
  562. STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
  563. return 0;
  564. }
  565. /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
  566. * Return -1 on error.
  567. */
  568. static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
  569. u8 num_vports,
  570. struct init_qm_vport_params *vport_params)
  571. {
  572. u16 vport_pq_id;
  573. u32 inc_val;
  574. u8 tc, i;
  575. /* Go over all PF VPORTs */
  576. for (i = 0; i < num_vports; i++) {
  577. if (!vport_params[i].vport_wfq)
  578. continue;
  579. inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
  580. if (inc_val > QM_WFQ_MAX_INC_VAL) {
  581. DP_NOTICE(p_hwfn,
  582. "Invalid VPORT WFQ weight configuration\n");
  583. return -1;
  584. }
  585. /* Each VPORT can have several VPORT PQ IDs for various TCs */
  586. for (tc = 0; tc < NUM_OF_TCS; tc++) {
  587. vport_pq_id = vport_params[i].first_tx_pq_id[tc];
  588. if (vport_pq_id != QM_INVALID_PQ_ID) {
  589. STORE_RT_REG(p_hwfn,
  590. QM_REG_WFQVPCRD_RT_OFFSET +
  591. vport_pq_id,
  592. (u32)QM_WFQ_CRD_REG_SIGN_BIT);
  593. STORE_RT_REG(p_hwfn,
  594. QM_REG_WFQVPWEIGHT_RT_OFFSET +
  595. vport_pq_id, inc_val);
  596. }
  597. }
  598. }
  599. return 0;
  600. }
  601. /* Prepare VPORT RL runtime init values for the specified VPORTs.
  602. * Return -1 on error.
  603. */
  604. static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
  605. u8 start_vport,
  606. u8 num_vports,
  607. u32 link_speed,
  608. struct init_qm_vport_params *vport_params)
  609. {
  610. u8 i, vport_id;
  611. u32 inc_val;
  612. if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
  613. DP_NOTICE(p_hwfn,
  614. "Invalid VPORT ID for rate limiter configuration\n");
  615. return -1;
  616. }
  617. /* Go over all PF VPORTs */
  618. for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
  619. inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
  620. vport_params[i].vport_rl :
  621. link_speed);
  622. if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
  623. DP_NOTICE(p_hwfn,
  624. "Invalid VPORT rate-limit configuration\n");
  625. return -1;
  626. }
  627. STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
  628. (u32)QM_RL_CRD_REG_SIGN_BIT);
  629. STORE_RT_REG(p_hwfn,
  630. QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
  631. QM_VP_RL_UPPER_BOUND(link_speed) |
  632. (u32)QM_RL_CRD_REG_SIGN_BIT);
  633. STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
  634. inc_val);
  635. }
  636. return 0;
  637. }
  638. static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
  639. struct qed_ptt *p_ptt)
  640. {
  641. u32 reg_val, i;
  642. for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
  643. i++) {
  644. udelay(QM_STOP_CMD_POLL_PERIOD_US);
  645. reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
  646. }
  647. /* Check if timeout while waiting for SDM command ready */
  648. if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
  649. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  650. "Timeout when waiting for QM SDM command ready signal\n");
  651. return false;
  652. }
  653. return true;
  654. }
  655. static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
  656. struct qed_ptt *p_ptt,
  657. u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
  658. {
  659. if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
  660. return false;
  661. qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
  662. qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
  663. qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
  664. qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
  665. qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
  666. return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
  667. }
  668. /******************** INTERFACE IMPLEMENTATION *********************/
  669. u32 qed_qm_pf_mem_size(u32 num_pf_cids,
  670. u32 num_vf_cids,
  671. u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
  672. {
  673. return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
  674. QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
  675. QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
  676. }
  677. int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
  678. struct qed_qm_common_rt_init_params *p_params)
  679. {
  680. /* Init AFullOprtnstcCrdMask */
  681. u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
  682. QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
  683. (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
  684. (p_params->pf_wfq_en <<
  685. QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
  686. (p_params->vport_wfq_en <<
  687. QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
  688. (p_params->pf_rl_en <<
  689. QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
  690. (p_params->vport_rl_en <<
  691. QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
  692. (QM_OPPOR_FW_STOP_DEF <<
  693. QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
  694. (QM_OPPOR_PQ_EMPTY_DEF <<
  695. QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
  696. STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
  697. /* Enable/disable PF RL */
  698. qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
  699. /* Enable/disable PF WFQ */
  700. qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
  701. /* Enable/disable VPORT RL */
  702. qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
  703. /* Enable/disable VPORT WFQ */
  704. qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
  705. /* Init PBF CMDQ line credit */
  706. qed_cmdq_lines_rt_init(p_hwfn,
  707. p_params->max_ports_per_engine,
  708. p_params->max_phys_tcs_per_port,
  709. p_params->port_params);
  710. /* Init BTB blocks in PBF */
  711. qed_btb_blocks_rt_init(p_hwfn,
  712. p_params->max_ports_per_engine,
  713. p_params->max_phys_tcs_per_port,
  714. p_params->port_params);
  715. return 0;
  716. }
  717. int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
  718. struct qed_ptt *p_ptt,
  719. struct qed_qm_pf_rt_init_params *p_params)
  720. {
  721. struct init_qm_vport_params *vport_params = p_params->vport_params;
  722. u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
  723. p_params->num_tids) *
  724. QM_OTHER_PQS_PER_PF;
  725. u8 tc, i;
  726. /* Clear first Tx PQ ID array for each VPORT */
  727. for (i = 0; i < p_params->num_vports; i++)
  728. for (tc = 0; tc < NUM_OF_TCS; tc++)
  729. vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
  730. /* Map Other PQs (if any) */
  731. qed_other_pq_map_rt_init(p_hwfn,
  732. p_params->pf_id,
  733. p_params->is_pf_loading, p_params->num_pf_cids,
  734. p_params->num_tids, 0);
  735. /* Map Tx PQs */
  736. qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
  737. /* Init PF WFQ */
  738. if (p_params->pf_wfq)
  739. if (qed_pf_wfq_rt_init(p_hwfn, p_params))
  740. return -1;
  741. /* Init PF RL */
  742. if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
  743. return -1;
  744. /* Set VPORT WFQ */
  745. if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
  746. return -1;
  747. /* Set VPORT RL */
  748. if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
  749. p_params->num_vports, p_params->link_speed,
  750. vport_params))
  751. return -1;
  752. return 0;
  753. }
  754. int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
  755. struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
  756. {
  757. u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
  758. if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
  759. DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
  760. return -1;
  761. }
  762. qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
  763. return 0;
  764. }
  765. int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
  766. struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl)
  767. {
  768. u32 inc_val = QM_RL_INC_VAL(pf_rl);
  769. if (inc_val > QM_PF_RL_MAX_INC_VAL) {
  770. DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
  771. return -1;
  772. }
  773. qed_wr(p_hwfn,
  774. p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
  775. qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
  776. return 0;
  777. }
  778. int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
  779. struct qed_ptt *p_ptt,
  780. u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
  781. {
  782. u16 vport_pq_id;
  783. u32 inc_val;
  784. u8 tc;
  785. inc_val = QM_WFQ_INC_VAL(vport_wfq);
  786. if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
  787. DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration\n");
  788. return -1;
  789. }
  790. for (tc = 0; tc < NUM_OF_TCS; tc++) {
  791. vport_pq_id = first_tx_pq_id[tc];
  792. if (vport_pq_id != QM_INVALID_PQ_ID)
  793. qed_wr(p_hwfn,
  794. p_ptt,
  795. QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
  796. }
  797. return 0;
  798. }
  799. int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
  800. struct qed_ptt *p_ptt,
  801. u8 vport_id, u32 vport_rl, u32 link_speed)
  802. {
  803. u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
  804. if (vport_id >= max_qm_global_rls) {
  805. DP_NOTICE(p_hwfn,
  806. "Invalid VPORT ID for rate limiter configuration\n");
  807. return -1;
  808. }
  809. inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
  810. if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
  811. DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n");
  812. return -1;
  813. }
  814. qed_wr(p_hwfn,
  815. p_ptt,
  816. QM_REG_RLGLBLCRD + vport_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
  817. qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
  818. return 0;
  819. }
  820. bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
  821. struct qed_ptt *p_ptt,
  822. bool is_release_cmd,
  823. bool is_tx_pq, u16 start_pq, u16 num_pqs)
  824. {
  825. u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
  826. u32 pq_mask = 0, last_pq, pq_id;
  827. last_pq = start_pq + num_pqs - 1;
  828. /* Set command's PQ type */
  829. QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
  830. /* Go over requested PQs */
  831. for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
  832. /* Set PQ bit in mask (stop command only) */
  833. if (!is_release_cmd)
  834. pq_mask |= BIT((pq_id % QM_STOP_PQ_MASK_WIDTH));
  835. /* If last PQ or end of PQ mask, write command */
  836. if ((pq_id == last_pq) ||
  837. (pq_id % QM_STOP_PQ_MASK_WIDTH ==
  838. (QM_STOP_PQ_MASK_WIDTH - 1))) {
  839. QM_CMD_SET_FIELD(cmd_arr,
  840. QM_STOP_CMD, PAUSE_MASK, pq_mask);
  841. QM_CMD_SET_FIELD(cmd_arr,
  842. QM_STOP_CMD,
  843. GROUP_ID,
  844. pq_id / QM_STOP_PQ_MASK_WIDTH);
  845. if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
  846. cmd_arr[0], cmd_arr[1]))
  847. return false;
  848. pq_mask = 0;
  849. }
  850. }
  851. return true;
  852. }
  853. #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
  854. do { \
  855. typeof(var) *__p_var = &(var); \
  856. typeof(offset) __offset = offset; \
  857. *__p_var = (*__p_var & ~BIT(__offset)) | \
  858. ((enable) ? BIT(__offset) : 0); \
  859. } while (0)
  860. #define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008
  861. #define PRS_ETH_OUTPUT_FORMAT -46832
  862. void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
  863. struct qed_ptt *p_ptt, u16 dest_port)
  864. {
  865. /* Update PRS register */
  866. qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
  867. /* Update NIG register */
  868. qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
  869. /* Update PBF register */
  870. qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
  871. }
  872. void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
  873. struct qed_ptt *p_ptt, bool vxlan_enable)
  874. {
  875. u32 reg_val;
  876. u8 shift;
  877. /* Update PRS register */
  878. reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
  879. shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
  880. SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
  881. qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
  882. if (reg_val) {
  883. reg_val =
  884. qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
  885. /* Update output only if tunnel blocks not included. */
  886. if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
  887. qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
  888. (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
  889. }
  890. /* Update NIG register */
  891. reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
  892. shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
  893. SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
  894. qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
  895. /* Update DORQ register */
  896. qed_wr(p_hwfn,
  897. p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0);
  898. }
  899. void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
  900. struct qed_ptt *p_ptt,
  901. bool eth_gre_enable, bool ip_gre_enable)
  902. {
  903. u32 reg_val;
  904. u8 shift;
  905. /* Update PRS register */
  906. reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
  907. shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
  908. SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
  909. shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
  910. SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
  911. qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
  912. if (reg_val) {
  913. reg_val =
  914. qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
  915. /* Update output only if tunnel blocks not included. */
  916. if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
  917. qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
  918. (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
  919. }
  920. /* Update NIG register */
  921. reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
  922. shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
  923. SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
  924. shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
  925. SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
  926. qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
  927. /* Update DORQ registers */
  928. qed_wr(p_hwfn,
  929. p_ptt,
  930. DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0);
  931. qed_wr(p_hwfn,
  932. p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0);
  933. }
  934. void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
  935. struct qed_ptt *p_ptt, u16 dest_port)
  936. {
  937. /* Update PRS register */
  938. qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
  939. /* Update NIG register */
  940. qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
  941. /* Update PBF register */
  942. qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
  943. }
  944. void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
  945. struct qed_ptt *p_ptt,
  946. bool eth_geneve_enable, bool ip_geneve_enable)
  947. {
  948. u32 reg_val;
  949. u8 shift;
  950. /* Update PRS register */
  951. reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
  952. shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
  953. SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_geneve_enable);
  954. shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
  955. SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_geneve_enable);
  956. qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
  957. if (reg_val) {
  958. reg_val =
  959. qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
  960. /* Update output only if tunnel blocks not included. */
  961. if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
  962. qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
  963. (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
  964. }
  965. /* Update NIG register */
  966. qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
  967. eth_geneve_enable ? 1 : 0);
  968. qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
  969. /* EDPM with geneve tunnel not supported in BB */
  970. if (QED_IS_BB_B0(p_hwfn->cdev))
  971. return;
  972. /* Update DORQ registers */
  973. qed_wr(p_hwfn,
  974. p_ptt,
  975. DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
  976. eth_geneve_enable ? 1 : 0);
  977. qed_wr(p_hwfn,
  978. p_ptt,
  979. DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
  980. ip_geneve_enable ? 1 : 0);
  981. }
  982. #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4
  983. #define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512
  984. void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
  985. struct qed_ptt *p_ptt, bool enable)
  986. {
  987. u32 reg_val, cfg_mask;
  988. /* read PRS config register */
  989. reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO);
  990. /* set VXLAN_NO_L2_ENABLE mask */
  991. cfg_mask = BIT(PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);
  992. if (enable) {
  993. /* set VXLAN_NO_L2_ENABLE flag */
  994. reg_val |= cfg_mask;
  995. /* update PRS FIC register */
  996. qed_wr(p_hwfn,
  997. p_ptt,
  998. PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
  999. (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
  1000. } else {
  1001. /* clear VXLAN_NO_L2_ENABLE flag */
  1002. reg_val &= ~cfg_mask;
  1003. }
  1004. /* write PRS config register */
  1005. qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
  1006. }
  1007. #define T_ETH_PACKET_ACTION_GFT_EVENTID 23
  1008. #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
  1009. #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
  1010. #define PARSER_ETH_CONN_CM_HDR 0
  1011. #define CAM_LINE_SIZE sizeof(u32)
  1012. #define RAM_LINE_SIZE sizeof(u64)
  1013. #define REG_SIZE sizeof(u32)
  1014. void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
  1015. {
  1016. /* Disable gft search for PF */
  1017. qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
  1018. /* Clean ram & cam for next gft session */
  1019. /* Zero camline */
  1020. qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
  1021. /* Zero ramline */
  1022. qed_wr(p_hwfn,
  1023. p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, 0);
  1024. qed_wr(p_hwfn,
  1025. p_ptt,
  1026. PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
  1027. 0);
  1028. }
  1029. void qed_set_gft_event_id_cm_hdr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  1030. {
  1031. u32 rfs_cm_hdr_event_id;
  1032. /* Set RFS event ID to be awakened i Tstorm By Prs */
  1033. rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
  1034. rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
  1035. PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
  1036. rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
  1037. PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
  1038. qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
  1039. }
  1040. void qed_gft_config(struct qed_hwfn *p_hwfn,
  1041. struct qed_ptt *p_ptt,
  1042. u16 pf_id,
  1043. bool tcp,
  1044. bool udp,
  1045. bool ipv4, bool ipv6, enum gft_profile_type profile_type)
  1046. {
  1047. u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft;
  1048. if (!ipv6 && !ipv4)
  1049. DP_NOTICE(p_hwfn,
  1050. "gft_config: must accept at least on of - ipv4 or ipv6'\n");
  1051. if (!tcp && !udp)
  1052. DP_NOTICE(p_hwfn,
  1053. "gft_config: must accept at least on of - udp or tcp\n");
  1054. if (profile_type >= MAX_GFT_PROFILE_TYPE)
  1055. DP_NOTICE(p_hwfn, "gft_config: unsupported gft_profile_type\n");
  1056. /* Set RFS event ID to be awakened i Tstorm By Prs */
  1057. reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
  1058. PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
  1059. reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
  1060. qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
  1061. /* Do not load context only cid in PRS on match. */
  1062. qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
  1063. /* Do not use tenant ID exist bit for gft search */
  1064. qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
  1065. /* Set Cam */
  1066. cam_line = 0;
  1067. SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
  1068. /* Filters are per PF!! */
  1069. SET_FIELD(cam_line,
  1070. GFT_CAM_LINE_MAPPED_PF_ID_MASK,
  1071. GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
  1072. SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
  1073. if (!(tcp && udp)) {
  1074. SET_FIELD(cam_line,
  1075. GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
  1076. GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
  1077. if (tcp)
  1078. SET_FIELD(cam_line,
  1079. GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
  1080. GFT_PROFILE_TCP_PROTOCOL);
  1081. else
  1082. SET_FIELD(cam_line,
  1083. GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
  1084. GFT_PROFILE_UDP_PROTOCOL);
  1085. }
  1086. if (!(ipv4 && ipv6)) {
  1087. SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
  1088. if (ipv4)
  1089. SET_FIELD(cam_line,
  1090. GFT_CAM_LINE_MAPPED_IP_VERSION,
  1091. GFT_PROFILE_IPV4);
  1092. else
  1093. SET_FIELD(cam_line,
  1094. GFT_CAM_LINE_MAPPED_IP_VERSION,
  1095. GFT_PROFILE_IPV6);
  1096. }
  1097. /* Write characteristics to cam */
  1098. qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
  1099. cam_line);
  1100. cam_line =
  1101. qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
  1102. /* Write line to RAM - compare to filter 4 tuple */
  1103. ram_line_lo = 0;
  1104. ram_line_hi = 0;
  1105. /* Search no IP as GFT */
  1106. search_non_ip_as_gft = 0;
  1107. /* Tunnel type */
  1108. SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
  1109. SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
  1110. if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
  1111. SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
  1112. SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
  1113. SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
  1114. SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
  1115. SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
  1116. SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
  1117. } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
  1118. SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
  1119. SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
  1120. SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
  1121. } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
  1122. SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
  1123. SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
  1124. } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
  1125. SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
  1126. SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
  1127. } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
  1128. SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
  1129. /* Allow tunneled traffic without inner IP */
  1130. search_non_ip_as_gft = 1;
  1131. }
  1132. qed_wr(p_hwfn,
  1133. p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft);
  1134. qed_wr(p_hwfn,
  1135. p_ptt,
  1136. PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
  1137. ram_line_lo);
  1138. qed_wr(p_hwfn,
  1139. p_ptt,
  1140. PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
  1141. ram_line_hi);
  1142. /* Set default profile so that no filter match will happen */
  1143. qed_wr(p_hwfn,
  1144. p_ptt,
  1145. PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
  1146. PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
  1147. qed_wr(p_hwfn,
  1148. p_ptt,
  1149. PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
  1150. PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
  1151. /* Enable gft search */
  1152. qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
  1153. }
  1154. DECLARE_CRC8_TABLE(cdu_crc8_table);
  1155. /* Calculate and return CDU validation byte per connection type/region/cid */
  1156. static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
  1157. {
  1158. const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
  1159. u8 crc, validation_byte = 0;
  1160. static u8 crc8_table_valid; /* automatically initialized to 0 */
  1161. u32 validation_string = 0;
  1162. u32 data_to_crc;
  1163. if (!crc8_table_valid) {
  1164. crc8_populate_msb(cdu_crc8_table, 0x07);
  1165. crc8_table_valid = 1;
  1166. }
  1167. /* The CRC is calculated on the String-to-compress:
  1168. * [31:8] = {CID[31:20],CID[11:0]}
  1169. * [7:4] = Region
  1170. * [3:0] = Type
  1171. */
  1172. if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
  1173. validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
  1174. if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
  1175. validation_string |= ((region & 0xF) << 4);
  1176. if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
  1177. validation_string |= (conn_type & 0xF);
  1178. /* Convert to big-endian and calculate CRC8 */
  1179. data_to_crc = be32_to_cpu(validation_string);
  1180. crc = crc8(cdu_crc8_table,
  1181. (u8 *)&data_to_crc, sizeof(data_to_crc), CRC8_INIT_VALUE);
  1182. /* The validation byte [7:0] is composed:
  1183. * for type A validation
  1184. * [7] = active configuration bit
  1185. * [6:0] = crc[6:0]
  1186. *
  1187. * for type B validation
  1188. * [7] = active configuration bit
  1189. * [6:3] = connection_type[3:0]
  1190. * [2:0] = crc[2:0]
  1191. */
  1192. validation_byte |=
  1193. ((validation_cfg >>
  1194. CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
  1195. if ((validation_cfg >>
  1196. CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
  1197. validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
  1198. else
  1199. validation_byte |= crc & 0x7F;
  1200. return validation_byte;
  1201. }
  1202. /* Calcualte and set validation bytes for session context */
  1203. void qed_calc_session_ctx_validation(void *p_ctx_mem,
  1204. u16 ctx_size, u8 ctx_type, u32 cid)
  1205. {
  1206. u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
  1207. p_ctx = (u8 * const)p_ctx_mem;
  1208. x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
  1209. t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
  1210. u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
  1211. memset(p_ctx, 0, ctx_size);
  1212. *x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid);
  1213. *t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid);
  1214. *u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid);
  1215. }
  1216. /* Calcualte and set validation bytes for task context */
  1217. void qed_calc_task_ctx_validation(void *p_ctx_mem,
  1218. u16 ctx_size, u8 ctx_type, u32 tid)
  1219. {
  1220. u8 *p_ctx, *region1_val_ptr;
  1221. p_ctx = (u8 * const)p_ctx_mem;
  1222. region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
  1223. memset(p_ctx, 0, ctx_size);
  1224. *region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid);
  1225. }
  1226. /* Memset session context to 0 while preserving validation bytes */
  1227. void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
  1228. {
  1229. u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
  1230. u8 x_val, t_val, u_val;
  1231. p_ctx = (u8 * const)p_ctx_mem;
  1232. x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
  1233. t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
  1234. u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
  1235. x_val = *x_val_ptr;
  1236. t_val = *t_val_ptr;
  1237. u_val = *u_val_ptr;
  1238. memset(p_ctx, 0, ctx_size);
  1239. *x_val_ptr = x_val;
  1240. *t_val_ptr = t_val;
  1241. *u_val_ptr = u_val;
  1242. }
  1243. /* Memset task context to 0 while preserving validation bytes */
  1244. void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
  1245. {
  1246. u8 *p_ctx, *region1_val_ptr;
  1247. u8 region1_val;
  1248. p_ctx = (u8 * const)p_ctx_mem;
  1249. region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
  1250. region1_val = *region1_val_ptr;
  1251. memset(p_ctx, 0, ctx_size);
  1252. *region1_val_ptr = region1_val;
  1253. }
  1254. /* Enable and configure context validation */
  1255. void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
  1256. struct qed_ptt *p_ptt)
  1257. {
  1258. u32 ctx_validation;
  1259. /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */
  1260. ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
  1261. qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
  1262. /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */
  1263. ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
  1264. qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
  1265. /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */
  1266. ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
  1267. qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
  1268. }
  1269. static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id)
  1270. {
  1271. switch (storm_id) {
  1272. case 0:
  1273. return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
  1274. TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
  1275. case 1:
  1276. return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
  1277. MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
  1278. case 2:
  1279. return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
  1280. USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
  1281. case 3:
  1282. return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
  1283. XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
  1284. case 4:
  1285. return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
  1286. YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
  1287. case 5:
  1288. return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
  1289. PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
  1290. default:
  1291. return 0;
  1292. }
  1293. }
  1294. void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
  1295. struct qed_ptt *p_ptt,
  1296. u8 assert_level[NUM_STORMS])
  1297. {
  1298. u8 storm_id;
  1299. for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
  1300. u32 ram_addr = qed_get_rdma_assert_ram_addr(p_hwfn, storm_id);
  1301. qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);
  1302. }
  1303. }