qed_dev.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/types.h>
  9. #include <asm/byteorder.h>
  10. #include <linux/io.h>
  11. #include <linux/delay.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/errno.h>
  14. #include <linux/kernel.h>
  15. #include <linux/mutex.h>
  16. #include <linux/pci.h>
  17. #include <linux/slab.h>
  18. #include <linux/string.h>
  19. #include <linux/etherdevice.h>
  20. #include <linux/qed/qed_chain.h>
  21. #include <linux/qed/qed_if.h>
  22. #include "qed.h"
  23. #include "qed_cxt.h"
  24. #include "qed_dev_api.h"
  25. #include "qed_hsi.h"
  26. #include "qed_hw.h"
  27. #include "qed_init_ops.h"
  28. #include "qed_int.h"
  29. #include "qed_mcp.h"
  30. #include "qed_reg_addr.h"
  31. #include "qed_sp.h"
  32. /* API common to all protocols */
  33. enum BAR_ID {
  34. BAR_ID_0, /* used for GRC */
  35. BAR_ID_1 /* Used for doorbells */
  36. };
  37. static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
  38. enum BAR_ID bar_id)
  39. {
  40. u32 bar_reg = (bar_id == BAR_ID_0 ?
  41. PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
  42. u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
  43. if (val)
  44. return 1 << (val + 15);
  45. /* Old MFW initialized above registered only conditionally */
  46. if (p_hwfn->cdev->num_hwfns > 1) {
  47. DP_INFO(p_hwfn,
  48. "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
  49. return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
  50. } else {
  51. DP_INFO(p_hwfn,
  52. "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
  53. return 512 * 1024;
  54. }
  55. }
  56. void qed_init_dp(struct qed_dev *cdev,
  57. u32 dp_module, u8 dp_level)
  58. {
  59. u32 i;
  60. cdev->dp_level = dp_level;
  61. cdev->dp_module = dp_module;
  62. for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
  63. struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
  64. p_hwfn->dp_level = dp_level;
  65. p_hwfn->dp_module = dp_module;
  66. }
  67. }
  68. void qed_init_struct(struct qed_dev *cdev)
  69. {
  70. u8 i;
  71. for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
  72. struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
  73. p_hwfn->cdev = cdev;
  74. p_hwfn->my_id = i;
  75. p_hwfn->b_active = false;
  76. mutex_init(&p_hwfn->dmae_info.mutex);
  77. }
  78. /* hwfn 0 is always active */
  79. cdev->hwfns[0].b_active = true;
  80. /* set the default cache alignment to 128 */
  81. cdev->cache_shift = 7;
  82. }
  83. static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
  84. {
  85. struct qed_qm_info *qm_info = &p_hwfn->qm_info;
  86. kfree(qm_info->qm_pq_params);
  87. qm_info->qm_pq_params = NULL;
  88. kfree(qm_info->qm_vport_params);
  89. qm_info->qm_vport_params = NULL;
  90. kfree(qm_info->qm_port_params);
  91. qm_info->qm_port_params = NULL;
  92. }
  93. void qed_resc_free(struct qed_dev *cdev)
  94. {
  95. int i;
  96. kfree(cdev->fw_data);
  97. cdev->fw_data = NULL;
  98. kfree(cdev->reset_stats);
  99. for_each_hwfn(cdev, i) {
  100. struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
  101. kfree(p_hwfn->p_tx_cids);
  102. p_hwfn->p_tx_cids = NULL;
  103. kfree(p_hwfn->p_rx_cids);
  104. p_hwfn->p_rx_cids = NULL;
  105. }
  106. for_each_hwfn(cdev, i) {
  107. struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
  108. qed_cxt_mngr_free(p_hwfn);
  109. qed_qm_info_free(p_hwfn);
  110. qed_spq_free(p_hwfn);
  111. qed_eq_free(p_hwfn, p_hwfn->p_eq);
  112. qed_consq_free(p_hwfn, p_hwfn->p_consq);
  113. qed_int_free(p_hwfn);
  114. qed_dmae_info_free(p_hwfn);
  115. }
  116. }
  117. static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
  118. {
  119. struct qed_qm_info *qm_info = &p_hwfn->qm_info;
  120. struct init_qm_port_params *p_qm_port;
  121. u8 num_vports, i, vport_id, num_ports;
  122. u16 num_pqs, multi_cos_tcs = 1;
  123. memset(qm_info, 0, sizeof(*qm_info));
  124. num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
  125. num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
  126. /* Sanity checking that setup requires legal number of resources */
  127. if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
  128. DP_ERR(p_hwfn,
  129. "Need too many Physical queues - 0x%04x when only %04x are available\n",
  130. num_pqs, RESC_NUM(p_hwfn, QED_PQ));
  131. return -EINVAL;
  132. }
  133. /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
  134. */
  135. qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
  136. num_pqs, GFP_KERNEL);
  137. if (!qm_info->qm_pq_params)
  138. goto alloc_err;
  139. qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
  140. num_vports, GFP_KERNEL);
  141. if (!qm_info->qm_vport_params)
  142. goto alloc_err;
  143. qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
  144. MAX_NUM_PORTS, GFP_KERNEL);
  145. if (!qm_info->qm_port_params)
  146. goto alloc_err;
  147. vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
  148. /* First init per-TC PQs */
  149. for (i = 0; i < multi_cos_tcs; i++) {
  150. struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
  151. params->vport_id = vport_id;
  152. params->tc_id = p_hwfn->hw_info.non_offload_tc;
  153. params->wrr_group = 1;
  154. }
  155. /* Then init pure-LB PQ */
  156. qm_info->pure_lb_pq = i;
  157. qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
  158. qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
  159. qm_info->qm_pq_params[i].wrr_group = 1;
  160. i++;
  161. qm_info->offload_pq = 0;
  162. qm_info->num_pqs = num_pqs;
  163. qm_info->num_vports = num_vports;
  164. /* Initialize qm port parameters */
  165. num_ports = p_hwfn->cdev->num_ports_in_engines;
  166. for (i = 0; i < num_ports; i++) {
  167. p_qm_port = &qm_info->qm_port_params[i];
  168. p_qm_port->active = 1;
  169. p_qm_port->num_active_phys_tcs = 4;
  170. p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
  171. p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
  172. }
  173. qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
  174. qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
  175. qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
  176. qm_info->pf_wfq = 0;
  177. qm_info->pf_rl = 0;
  178. qm_info->vport_rl_en = 1;
  179. return 0;
  180. alloc_err:
  181. DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
  182. kfree(qm_info->qm_pq_params);
  183. kfree(qm_info->qm_vport_params);
  184. kfree(qm_info->qm_port_params);
  185. return -ENOMEM;
  186. }
  187. int qed_resc_alloc(struct qed_dev *cdev)
  188. {
  189. struct qed_consq *p_consq;
  190. struct qed_eq *p_eq;
  191. int i, rc = 0;
  192. cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
  193. if (!cdev->fw_data)
  194. return -ENOMEM;
  195. /* Allocate Memory for the Queue->CID mapping */
  196. for_each_hwfn(cdev, i) {
  197. struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
  198. int tx_size = sizeof(struct qed_hw_cid_data) *
  199. RESC_NUM(p_hwfn, QED_L2_QUEUE);
  200. int rx_size = sizeof(struct qed_hw_cid_data) *
  201. RESC_NUM(p_hwfn, QED_L2_QUEUE);
  202. p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
  203. if (!p_hwfn->p_tx_cids) {
  204. DP_NOTICE(p_hwfn,
  205. "Failed to allocate memory for Tx Cids\n");
  206. rc = -ENOMEM;
  207. goto alloc_err;
  208. }
  209. p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
  210. if (!p_hwfn->p_rx_cids) {
  211. DP_NOTICE(p_hwfn,
  212. "Failed to allocate memory for Rx Cids\n");
  213. rc = -ENOMEM;
  214. goto alloc_err;
  215. }
  216. }
  217. for_each_hwfn(cdev, i) {
  218. struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
  219. /* First allocate the context manager structure */
  220. rc = qed_cxt_mngr_alloc(p_hwfn);
  221. if (rc)
  222. goto alloc_err;
  223. /* Set the HW cid/tid numbers (in the contest manager)
  224. * Must be done prior to any further computations.
  225. */
  226. rc = qed_cxt_set_pf_params(p_hwfn);
  227. if (rc)
  228. goto alloc_err;
  229. /* Prepare and process QM requirements */
  230. rc = qed_init_qm_info(p_hwfn);
  231. if (rc)
  232. goto alloc_err;
  233. /* Compute the ILT client partition */
  234. rc = qed_cxt_cfg_ilt_compute(p_hwfn);
  235. if (rc)
  236. goto alloc_err;
  237. /* CID map / ILT shadow table / T2
  238. * The talbes sizes are determined by the computations above
  239. */
  240. rc = qed_cxt_tables_alloc(p_hwfn);
  241. if (rc)
  242. goto alloc_err;
  243. /* SPQ, must follow ILT because initializes SPQ context */
  244. rc = qed_spq_alloc(p_hwfn);
  245. if (rc)
  246. goto alloc_err;
  247. /* SP status block allocation */
  248. p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
  249. RESERVED_PTT_DPC);
  250. rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
  251. if (rc)
  252. goto alloc_err;
  253. /* EQ */
  254. p_eq = qed_eq_alloc(p_hwfn, 256);
  255. if (!p_eq) {
  256. rc = -ENOMEM;
  257. goto alloc_err;
  258. }
  259. p_hwfn->p_eq = p_eq;
  260. p_consq = qed_consq_alloc(p_hwfn);
  261. if (!p_consq) {
  262. rc = -ENOMEM;
  263. goto alloc_err;
  264. }
  265. p_hwfn->p_consq = p_consq;
  266. /* DMA info initialization */
  267. rc = qed_dmae_info_alloc(p_hwfn);
  268. if (rc) {
  269. DP_NOTICE(p_hwfn,
  270. "Failed to allocate memory for dmae_info structure\n");
  271. goto alloc_err;
  272. }
  273. }
  274. cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
  275. if (!cdev->reset_stats) {
  276. DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
  277. rc = -ENOMEM;
  278. goto alloc_err;
  279. }
  280. return 0;
  281. alloc_err:
  282. qed_resc_free(cdev);
  283. return rc;
  284. }
  285. void qed_resc_setup(struct qed_dev *cdev)
  286. {
  287. int i;
  288. for_each_hwfn(cdev, i) {
  289. struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
  290. qed_cxt_mngr_setup(p_hwfn);
  291. qed_spq_setup(p_hwfn);
  292. qed_eq_setup(p_hwfn, p_hwfn->p_eq);
  293. qed_consq_setup(p_hwfn, p_hwfn->p_consq);
  294. /* Read shadow of current MFW mailbox */
  295. qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
  296. memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
  297. p_hwfn->mcp_info->mfw_mb_cur,
  298. p_hwfn->mcp_info->mfw_mb_length);
  299. qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
  300. }
  301. }
  302. #define FINAL_CLEANUP_POLL_CNT (100)
  303. #define FINAL_CLEANUP_POLL_TIME (10)
  304. int qed_final_cleanup(struct qed_hwfn *p_hwfn,
  305. struct qed_ptt *p_ptt,
  306. u16 id)
  307. {
  308. u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
  309. int rc = -EBUSY;
  310. addr = GTT_BAR0_MAP_REG_USDM_RAM +
  311. USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
  312. command |= X_FINAL_CLEANUP_AGG_INT <<
  313. SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
  314. command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
  315. command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
  316. command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
  317. /* Make sure notification is not set before initiating final cleanup */
  318. if (REG_RD(p_hwfn, addr)) {
  319. DP_NOTICE(
  320. p_hwfn,
  321. "Unexpected; Found final cleanup notification before initiating final cleanup\n");
  322. REG_WR(p_hwfn, addr, 0);
  323. }
  324. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  325. "Sending final cleanup for PFVF[%d] [Command %08x\n]",
  326. id, command);
  327. qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
  328. /* Poll until completion */
  329. while (!REG_RD(p_hwfn, addr) && count--)
  330. msleep(FINAL_CLEANUP_POLL_TIME);
  331. if (REG_RD(p_hwfn, addr))
  332. rc = 0;
  333. else
  334. DP_NOTICE(p_hwfn,
  335. "Failed to receive FW final cleanup notification\n");
  336. /* Cleanup afterwards */
  337. REG_WR(p_hwfn, addr, 0);
  338. return rc;
  339. }
  340. static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
  341. {
  342. int hw_mode = 0;
  343. hw_mode = (1 << MODE_BB_B0);
  344. switch (p_hwfn->cdev->num_ports_in_engines) {
  345. case 1:
  346. hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
  347. break;
  348. case 2:
  349. hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
  350. break;
  351. case 4:
  352. hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
  353. break;
  354. default:
  355. DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
  356. p_hwfn->cdev->num_ports_in_engines);
  357. return;
  358. }
  359. switch (p_hwfn->cdev->mf_mode) {
  360. case QED_MF_DEFAULT:
  361. case QED_MF_NPAR:
  362. hw_mode |= 1 << MODE_MF_SI;
  363. break;
  364. case QED_MF_OVLAN:
  365. hw_mode |= 1 << MODE_MF_SD;
  366. break;
  367. default:
  368. DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
  369. hw_mode |= 1 << MODE_MF_SI;
  370. }
  371. hw_mode |= 1 << MODE_ASIC;
  372. p_hwfn->hw_info.hw_mode = hw_mode;
  373. }
  374. /* Init run time data for all PFs on an engine. */
  375. static void qed_init_cau_rt_data(struct qed_dev *cdev)
  376. {
  377. u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
  378. int i, sb_id;
  379. for_each_hwfn(cdev, i) {
  380. struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
  381. struct qed_igu_info *p_igu_info;
  382. struct qed_igu_block *p_block;
  383. struct cau_sb_entry sb_entry;
  384. p_igu_info = p_hwfn->hw_info.p_igu_info;
  385. for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
  386. sb_id++) {
  387. p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
  388. if (!p_block->is_pf)
  389. continue;
  390. qed_init_cau_sb_entry(p_hwfn, &sb_entry,
  391. p_block->function_id,
  392. 0, 0);
  393. STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
  394. sb_entry);
  395. }
  396. }
  397. }
  398. static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
  399. struct qed_ptt *p_ptt,
  400. int hw_mode)
  401. {
  402. struct qed_qm_info *qm_info = &p_hwfn->qm_info;
  403. struct qed_qm_common_rt_init_params params;
  404. struct qed_dev *cdev = p_hwfn->cdev;
  405. int rc = 0;
  406. qed_init_cau_rt_data(cdev);
  407. /* Program GTT windows */
  408. qed_gtt_init(p_hwfn);
  409. if (p_hwfn->mcp_info) {
  410. if (p_hwfn->mcp_info->func_info.bandwidth_max)
  411. qm_info->pf_rl_en = 1;
  412. if (p_hwfn->mcp_info->func_info.bandwidth_min)
  413. qm_info->pf_wfq_en = 1;
  414. }
  415. memset(&params, 0, sizeof(params));
  416. params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
  417. params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
  418. params.pf_rl_en = qm_info->pf_rl_en;
  419. params.pf_wfq_en = qm_info->pf_wfq_en;
  420. params.vport_rl_en = qm_info->vport_rl_en;
  421. params.vport_wfq_en = qm_info->vport_wfq_en;
  422. params.port_params = qm_info->qm_port_params;
  423. qed_qm_common_rt_init(p_hwfn, &params);
  424. qed_cxt_hw_init_common(p_hwfn);
  425. /* Close gate from NIG to BRB/Storm; By default they are open, but
  426. * we close them to prevent NIG from passing data to reset blocks.
  427. * Should have been done in the ENGINE phase, but init-tool lacks
  428. * proper port-pretend capabilities.
  429. */
  430. qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
  431. qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
  432. qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
  433. qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
  434. qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
  435. qed_port_unpretend(p_hwfn, p_ptt);
  436. rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
  437. if (rc != 0)
  438. return rc;
  439. qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
  440. qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
  441. /* Disable relaxed ordering in the PCI config space */
  442. qed_wr(p_hwfn, p_ptt, 0x20b4,
  443. qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
  444. return rc;
  445. }
  446. static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
  447. struct qed_ptt *p_ptt,
  448. int hw_mode)
  449. {
  450. int rc = 0;
  451. rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
  452. hw_mode);
  453. return rc;
  454. }
  455. static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
  456. struct qed_ptt *p_ptt,
  457. int hw_mode,
  458. bool b_hw_start,
  459. enum qed_int_mode int_mode,
  460. bool allow_npar_tx_switch)
  461. {
  462. u8 rel_pf_id = p_hwfn->rel_pf_id;
  463. int rc = 0;
  464. if (p_hwfn->mcp_info) {
  465. struct qed_mcp_function_info *p_info;
  466. p_info = &p_hwfn->mcp_info->func_info;
  467. if (p_info->bandwidth_min)
  468. p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
  469. /* Update rate limit once we'll actually have a link */
  470. p_hwfn->qm_info.pf_rl = 100;
  471. }
  472. qed_cxt_hw_init_pf(p_hwfn);
  473. qed_int_igu_init_rt(p_hwfn);
  474. /* Set VLAN in NIG if needed */
  475. if (hw_mode & (1 << MODE_MF_SD)) {
  476. DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
  477. STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
  478. STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
  479. p_hwfn->hw_info.ovlan);
  480. }
  481. /* Enable classification by MAC if needed */
  482. if (hw_mode & (1 << MODE_MF_SI)) {
  483. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  484. "Configuring TAGMAC_CLS_TYPE\n");
  485. STORE_RT_REG(p_hwfn,
  486. NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
  487. }
  488. /* Protocl Configuration */
  489. STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
  490. STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
  491. STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
  492. /* Cleanup chip from previous driver if such remains exist */
  493. rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
  494. if (rc != 0)
  495. return rc;
  496. /* PF Init sequence */
  497. rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
  498. if (rc)
  499. return rc;
  500. /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
  501. rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
  502. if (rc)
  503. return rc;
  504. /* Pure runtime initializations - directly to the HW */
  505. qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
  506. if (b_hw_start) {
  507. /* enable interrupts */
  508. qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
  509. /* send function start command */
  510. rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
  511. if (rc)
  512. DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
  513. }
  514. return rc;
  515. }
  516. static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
  517. struct qed_ptt *p_ptt,
  518. u8 enable)
  519. {
  520. u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
  521. /* Change PF in PXP */
  522. qed_wr(p_hwfn, p_ptt,
  523. PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
  524. /* wait until value is set - try for 1 second every 50us */
  525. for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
  526. val = qed_rd(p_hwfn, p_ptt,
  527. PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
  528. if (val == set_val)
  529. break;
  530. usleep_range(50, 60);
  531. }
  532. if (val != set_val) {
  533. DP_NOTICE(p_hwfn,
  534. "PFID_ENABLE_MASTER wasn't changed after a second\n");
  535. return -EAGAIN;
  536. }
  537. return 0;
  538. }
  539. static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
  540. struct qed_ptt *p_main_ptt)
  541. {
  542. /* Read shadow of current MFW mailbox */
  543. qed_mcp_read_mb(p_hwfn, p_main_ptt);
  544. memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
  545. p_hwfn->mcp_info->mfw_mb_cur,
  546. p_hwfn->mcp_info->mfw_mb_length);
  547. }
  548. int qed_hw_init(struct qed_dev *cdev,
  549. bool b_hw_start,
  550. enum qed_int_mode int_mode,
  551. bool allow_npar_tx_switch,
  552. const u8 *bin_fw_data)
  553. {
  554. u32 load_code, param;
  555. int rc, mfw_rc, i;
  556. rc = qed_init_fw_data(cdev, bin_fw_data);
  557. if (rc != 0)
  558. return rc;
  559. for_each_hwfn(cdev, i) {
  560. struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
  561. /* Enable DMAE in PXP */
  562. rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
  563. qed_calc_hw_mode(p_hwfn);
  564. rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
  565. &load_code);
  566. if (rc) {
  567. DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
  568. return rc;
  569. }
  570. qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
  571. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  572. "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
  573. rc, load_code);
  574. p_hwfn->first_on_engine = (load_code ==
  575. FW_MSG_CODE_DRV_LOAD_ENGINE);
  576. switch (load_code) {
  577. case FW_MSG_CODE_DRV_LOAD_ENGINE:
  578. rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
  579. p_hwfn->hw_info.hw_mode);
  580. if (rc)
  581. break;
  582. /* Fall into */
  583. case FW_MSG_CODE_DRV_LOAD_PORT:
  584. rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
  585. p_hwfn->hw_info.hw_mode);
  586. if (rc)
  587. break;
  588. /* Fall into */
  589. case FW_MSG_CODE_DRV_LOAD_FUNCTION:
  590. rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
  591. p_hwfn->hw_info.hw_mode,
  592. b_hw_start, int_mode,
  593. allow_npar_tx_switch);
  594. break;
  595. default:
  596. rc = -EINVAL;
  597. break;
  598. }
  599. if (rc)
  600. DP_NOTICE(p_hwfn,
  601. "init phase failed for loadcode 0x%x (rc %d)\n",
  602. load_code, rc);
  603. /* ACK mfw regardless of success or failure of initialization */
  604. mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
  605. DRV_MSG_CODE_LOAD_DONE,
  606. 0, &load_code, &param);
  607. if (rc)
  608. return rc;
  609. if (mfw_rc) {
  610. DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
  611. return mfw_rc;
  612. }
  613. p_hwfn->hw_init_done = true;
  614. }
  615. return 0;
  616. }
  617. #define QED_HW_STOP_RETRY_LIMIT (10)
  618. static inline void qed_hw_timers_stop(struct qed_dev *cdev,
  619. struct qed_hwfn *p_hwfn,
  620. struct qed_ptt *p_ptt)
  621. {
  622. int i;
  623. /* close timers */
  624. qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
  625. qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
  626. for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
  627. if ((!qed_rd(p_hwfn, p_ptt,
  628. TM_REG_PF_SCAN_ACTIVE_CONN)) &&
  629. (!qed_rd(p_hwfn, p_ptt,
  630. TM_REG_PF_SCAN_ACTIVE_TASK)))
  631. break;
  632. /* Dependent on number of connection/tasks, possibly
  633. * 1ms sleep is required between polls
  634. */
  635. usleep_range(1000, 2000);
  636. }
  637. if (i < QED_HW_STOP_RETRY_LIMIT)
  638. return;
  639. DP_NOTICE(p_hwfn,
  640. "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
  641. (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
  642. (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
  643. }
  644. void qed_hw_timers_stop_all(struct qed_dev *cdev)
  645. {
  646. int j;
  647. for_each_hwfn(cdev, j) {
  648. struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
  649. struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
  650. qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
  651. }
  652. }
  653. int qed_hw_stop(struct qed_dev *cdev)
  654. {
  655. int rc = 0, t_rc;
  656. int j;
  657. for_each_hwfn(cdev, j) {
  658. struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
  659. struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
  660. DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
  661. /* mark the hw as uninitialized... */
  662. p_hwfn->hw_init_done = false;
  663. rc = qed_sp_pf_stop(p_hwfn);
  664. if (rc)
  665. DP_NOTICE(p_hwfn,
  666. "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
  667. qed_wr(p_hwfn, p_ptt,
  668. NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
  669. qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
  670. qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
  671. qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
  672. qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
  673. qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
  674. qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
  675. /* Disable Attention Generation */
  676. qed_int_igu_disable_int(p_hwfn, p_ptt);
  677. qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
  678. qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
  679. qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
  680. /* Need to wait 1ms to guarantee SBs are cleared */
  681. usleep_range(1000, 2000);
  682. }
  683. /* Disable DMAE in PXP - in CMT, this should only be done for
  684. * first hw-function, and only after all transactions have
  685. * stopped for all active hw-functions.
  686. */
  687. t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
  688. cdev->hwfns[0].p_main_ptt,
  689. false);
  690. if (t_rc != 0)
  691. rc = t_rc;
  692. return rc;
  693. }
  694. void qed_hw_stop_fastpath(struct qed_dev *cdev)
  695. {
  696. int j;
  697. for_each_hwfn(cdev, j) {
  698. struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
  699. struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
  700. DP_VERBOSE(p_hwfn,
  701. NETIF_MSG_IFDOWN,
  702. "Shutting down the fastpath\n");
  703. qed_wr(p_hwfn, p_ptt,
  704. NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
  705. qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
  706. qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
  707. qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
  708. qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
  709. qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
  710. qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
  711. /* Need to wait 1ms to guarantee SBs are cleared */
  712. usleep_range(1000, 2000);
  713. }
  714. }
  715. void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
  716. {
  717. /* Re-open incoming traffic */
  718. qed_wr(p_hwfn, p_hwfn->p_main_ptt,
  719. NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
  720. }
  721. static int qed_reg_assert(struct qed_hwfn *hwfn,
  722. struct qed_ptt *ptt, u32 reg,
  723. bool expected)
  724. {
  725. u32 assert_val = qed_rd(hwfn, ptt, reg);
  726. if (assert_val != expected) {
  727. DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
  728. reg, expected);
  729. return -EINVAL;
  730. }
  731. return 0;
  732. }
  733. int qed_hw_reset(struct qed_dev *cdev)
  734. {
  735. int rc = 0;
  736. u32 unload_resp, unload_param;
  737. int i;
  738. for_each_hwfn(cdev, i) {
  739. struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
  740. DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
  741. /* Check for incorrect states */
  742. qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
  743. QM_REG_USG_CNT_PF_TX, 0);
  744. qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
  745. QM_REG_USG_CNT_PF_OTHER, 0);
  746. /* Disable PF in HW blocks */
  747. qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
  748. qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
  749. qed_wr(p_hwfn, p_hwfn->p_main_ptt,
  750. TCFC_REG_STRONG_ENABLE_PF, 0);
  751. qed_wr(p_hwfn, p_hwfn->p_main_ptt,
  752. CCFC_REG_STRONG_ENABLE_PF, 0);
  753. /* Send unload command to MCP */
  754. rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
  755. DRV_MSG_CODE_UNLOAD_REQ,
  756. DRV_MB_PARAM_UNLOAD_WOL_MCP,
  757. &unload_resp, &unload_param);
  758. if (rc) {
  759. DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
  760. unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
  761. }
  762. rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
  763. DRV_MSG_CODE_UNLOAD_DONE,
  764. 0, &unload_resp, &unload_param);
  765. if (rc) {
  766. DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
  767. return rc;
  768. }
  769. }
  770. return rc;
  771. }
  772. /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
  773. static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
  774. {
  775. qed_ptt_pool_free(p_hwfn);
  776. kfree(p_hwfn->hw_info.p_igu_info);
  777. }
  778. /* Setup bar access */
  779. static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
  780. {
  781. /* clear indirect access */
  782. qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
  783. qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
  784. qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
  785. qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
  786. /* Clean Previous errors if such exist */
  787. qed_wr(p_hwfn, p_hwfn->p_main_ptt,
  788. PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
  789. 1 << p_hwfn->abs_pf_id);
  790. /* enable internal target-read */
  791. qed_wr(p_hwfn, p_hwfn->p_main_ptt,
  792. PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
  793. }
  794. static void get_function_id(struct qed_hwfn *p_hwfn)
  795. {
  796. /* ME Register */
  797. p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
  798. p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
  799. p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
  800. p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
  801. PXP_CONCRETE_FID_PFID);
  802. p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
  803. PXP_CONCRETE_FID_PORT);
  804. }
  805. static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
  806. {
  807. u32 *feat_num = p_hwfn->hw_info.feat_num;
  808. int num_features = 1;
  809. feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
  810. num_features,
  811. RESC_NUM(p_hwfn, QED_L2_QUEUE));
  812. DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
  813. "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
  814. feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
  815. num_features);
  816. }
  817. static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
  818. {
  819. u32 *resc_start = p_hwfn->hw_info.resc_start;
  820. u32 *resc_num = p_hwfn->hw_info.resc_num;
  821. struct qed_sb_cnt_info sb_cnt_info;
  822. int num_funcs, i;
  823. num_funcs = MAX_NUM_PFS_BB;
  824. memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
  825. qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
  826. resc_num[QED_SB] = min_t(u32,
  827. (MAX_SB_PER_PATH_BB / num_funcs),
  828. sb_cnt_info.sb_cnt);
  829. resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
  830. resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
  831. resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
  832. resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
  833. resc_num[QED_RL] = 8;
  834. resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
  835. resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
  836. num_funcs;
  837. resc_num[QED_ILT] = 950;
  838. for (i = 0; i < QED_MAX_RESC; i++)
  839. resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
  840. qed_hw_set_feat(p_hwfn);
  841. DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
  842. "The numbers for each resource are:\n"
  843. "SB = %d start = %d\n"
  844. "L2_QUEUE = %d start = %d\n"
  845. "VPORT = %d start = %d\n"
  846. "PQ = %d start = %d\n"
  847. "RL = %d start = %d\n"
  848. "MAC = %d start = %d\n"
  849. "VLAN = %d start = %d\n"
  850. "ILT = %d start = %d\n",
  851. p_hwfn->hw_info.resc_num[QED_SB],
  852. p_hwfn->hw_info.resc_start[QED_SB],
  853. p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
  854. p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
  855. p_hwfn->hw_info.resc_num[QED_VPORT],
  856. p_hwfn->hw_info.resc_start[QED_VPORT],
  857. p_hwfn->hw_info.resc_num[QED_PQ],
  858. p_hwfn->hw_info.resc_start[QED_PQ],
  859. p_hwfn->hw_info.resc_num[QED_RL],
  860. p_hwfn->hw_info.resc_start[QED_RL],
  861. p_hwfn->hw_info.resc_num[QED_MAC],
  862. p_hwfn->hw_info.resc_start[QED_MAC],
  863. p_hwfn->hw_info.resc_num[QED_VLAN],
  864. p_hwfn->hw_info.resc_start[QED_VLAN],
  865. p_hwfn->hw_info.resc_num[QED_ILT],
  866. p_hwfn->hw_info.resc_start[QED_ILT]);
  867. }
  868. static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
  869. struct qed_ptt *p_ptt)
  870. {
  871. u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
  872. u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
  873. struct qed_mcp_link_params *link;
  874. /* Read global nvm_cfg address */
  875. nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
  876. /* Verify MCP has initialized it */
  877. if (!nvm_cfg_addr) {
  878. DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
  879. return -EINVAL;
  880. }
  881. /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
  882. nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
  883. addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
  884. offsetof(struct nvm_cfg1, glob) +
  885. offsetof(struct nvm_cfg1_glob, core_cfg);
  886. core_cfg = qed_rd(p_hwfn, p_ptt, addr);
  887. switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
  888. NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
  889. case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
  890. p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
  891. break;
  892. case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
  893. p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
  894. break;
  895. case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
  896. p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
  897. break;
  898. case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
  899. p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
  900. break;
  901. case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
  902. p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
  903. break;
  904. case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
  905. p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
  906. break;
  907. case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
  908. p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
  909. break;
  910. case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
  911. p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
  912. break;
  913. case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
  914. p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
  915. break;
  916. default:
  917. DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
  918. core_cfg);
  919. break;
  920. }
  921. /* Read default link configuration */
  922. link = &p_hwfn->mcp_info->link_input;
  923. port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
  924. offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
  925. link_temp = qed_rd(p_hwfn, p_ptt,
  926. port_cfg_addr +
  927. offsetof(struct nvm_cfg1_port, speed_cap_mask));
  928. link->speed.advertised_speeds =
  929. link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
  930. p_hwfn->mcp_info->link_capabilities.speed_capabilities =
  931. link->speed.advertised_speeds;
  932. link_temp = qed_rd(p_hwfn, p_ptt,
  933. port_cfg_addr +
  934. offsetof(struct nvm_cfg1_port, link_settings));
  935. switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
  936. NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
  937. case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
  938. link->speed.autoneg = true;
  939. break;
  940. case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
  941. link->speed.forced_speed = 1000;
  942. break;
  943. case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
  944. link->speed.forced_speed = 10000;
  945. break;
  946. case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
  947. link->speed.forced_speed = 25000;
  948. break;
  949. case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
  950. link->speed.forced_speed = 40000;
  951. break;
  952. case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
  953. link->speed.forced_speed = 50000;
  954. break;
  955. case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
  956. link->speed.forced_speed = 100000;
  957. break;
  958. default:
  959. DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
  960. link_temp);
  961. }
  962. link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
  963. link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
  964. link->pause.autoneg = !!(link_temp &
  965. NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
  966. link->pause.forced_rx = !!(link_temp &
  967. NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
  968. link->pause.forced_tx = !!(link_temp &
  969. NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
  970. link->loopback_mode = 0;
  971. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  972. "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
  973. link->speed.forced_speed, link->speed.advertised_speeds,
  974. link->speed.autoneg, link->pause.autoneg);
  975. /* Read Multi-function information from shmem */
  976. addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
  977. offsetof(struct nvm_cfg1, glob) +
  978. offsetof(struct nvm_cfg1_glob, generic_cont0);
  979. generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
  980. mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
  981. NVM_CFG1_GLOB_MF_MODE_OFFSET;
  982. switch (mf_mode) {
  983. case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
  984. p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
  985. break;
  986. case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
  987. p_hwfn->cdev->mf_mode = QED_MF_NPAR;
  988. break;
  989. case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
  990. p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
  991. break;
  992. }
  993. DP_INFO(p_hwfn, "Multi function mode is %08x\n",
  994. p_hwfn->cdev->mf_mode);
  995. /* Read Multi-function information from shmem */
  996. addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
  997. offsetof(struct nvm_cfg1, glob) +
  998. offsetof(struct nvm_cfg1_glob, device_capabilities);
  999. device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
  1000. if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
  1001. __set_bit(QED_DEV_CAP_ETH,
  1002. &p_hwfn->hw_info.device_capabilities);
  1003. return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
  1004. }
  1005. static int
  1006. qed_get_hw_info(struct qed_hwfn *p_hwfn,
  1007. struct qed_ptt *p_ptt,
  1008. enum qed_pci_personality personality)
  1009. {
  1010. u32 port_mode;
  1011. int rc;
  1012. /* Read the port mode */
  1013. port_mode = qed_rd(p_hwfn, p_ptt,
  1014. CNIG_REG_NW_PORT_MODE_BB_B0);
  1015. if (port_mode < 3) {
  1016. p_hwfn->cdev->num_ports_in_engines = 1;
  1017. } else if (port_mode <= 5) {
  1018. p_hwfn->cdev->num_ports_in_engines = 2;
  1019. } else {
  1020. DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
  1021. p_hwfn->cdev->num_ports_in_engines);
  1022. /* Default num_ports_in_engines to something */
  1023. p_hwfn->cdev->num_ports_in_engines = 1;
  1024. }
  1025. qed_hw_get_nvm_info(p_hwfn, p_ptt);
  1026. rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
  1027. if (rc)
  1028. return rc;
  1029. if (qed_mcp_is_init(p_hwfn))
  1030. ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
  1031. p_hwfn->mcp_info->func_info.mac);
  1032. else
  1033. eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
  1034. if (qed_mcp_is_init(p_hwfn)) {
  1035. if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
  1036. p_hwfn->hw_info.ovlan =
  1037. p_hwfn->mcp_info->func_info.ovlan;
  1038. qed_mcp_cmd_port_init(p_hwfn, p_ptt);
  1039. }
  1040. if (qed_mcp_is_init(p_hwfn)) {
  1041. enum qed_pci_personality protocol;
  1042. protocol = p_hwfn->mcp_info->func_info.protocol;
  1043. p_hwfn->hw_info.personality = protocol;
  1044. }
  1045. qed_hw_get_resc(p_hwfn);
  1046. return rc;
  1047. }
  1048. static int qed_get_dev_info(struct qed_dev *cdev)
  1049. {
  1050. struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
  1051. u32 tmp;
  1052. /* Read Vendor Id / Device Id */
  1053. pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
  1054. &cdev->vendor_id);
  1055. pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
  1056. &cdev->device_id);
  1057. cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
  1058. MISCS_REG_CHIP_NUM);
  1059. cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
  1060. MISCS_REG_CHIP_REV);
  1061. MASK_FIELD(CHIP_REV, cdev->chip_rev);
  1062. cdev->type = QED_DEV_TYPE_BB;
  1063. /* Learn number of HW-functions */
  1064. tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
  1065. MISCS_REG_CMT_ENABLED_FOR_PAIR);
  1066. if (tmp & (1 << p_hwfn->rel_pf_id)) {
  1067. DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
  1068. cdev->num_hwfns = 2;
  1069. } else {
  1070. cdev->num_hwfns = 1;
  1071. }
  1072. cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
  1073. MISCS_REG_CHIP_TEST_REG) >> 4;
  1074. MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
  1075. cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
  1076. MISCS_REG_CHIP_METAL);
  1077. MASK_FIELD(CHIP_METAL, cdev->chip_metal);
  1078. DP_INFO(cdev->hwfns,
  1079. "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
  1080. cdev->chip_num, cdev->chip_rev,
  1081. cdev->chip_bond_id, cdev->chip_metal);
  1082. if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) {
  1083. DP_NOTICE(cdev->hwfns,
  1084. "The chip type/rev (BB A0) is not supported!\n");
  1085. return -EINVAL;
  1086. }
  1087. return 0;
  1088. }
  1089. static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
  1090. void __iomem *p_regview,
  1091. void __iomem *p_doorbells,
  1092. enum qed_pci_personality personality)
  1093. {
  1094. int rc = 0;
  1095. /* Split PCI bars evenly between hwfns */
  1096. p_hwfn->regview = p_regview;
  1097. p_hwfn->doorbells = p_doorbells;
  1098. /* Validate that chip access is feasible */
  1099. if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
  1100. DP_ERR(p_hwfn,
  1101. "Reading the ME register returns all Fs; Preventing further chip access\n");
  1102. return -EINVAL;
  1103. }
  1104. get_function_id(p_hwfn);
  1105. /* Allocate PTT pool */
  1106. rc = qed_ptt_pool_alloc(p_hwfn);
  1107. if (rc) {
  1108. DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
  1109. goto err0;
  1110. }
  1111. /* Allocate the main PTT */
  1112. p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
  1113. /* First hwfn learns basic information, e.g., number of hwfns */
  1114. if (!p_hwfn->my_id) {
  1115. rc = qed_get_dev_info(p_hwfn->cdev);
  1116. if (rc != 0)
  1117. goto err1;
  1118. }
  1119. qed_hw_hwfn_prepare(p_hwfn);
  1120. /* Initialize MCP structure */
  1121. rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
  1122. if (rc) {
  1123. DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
  1124. goto err1;
  1125. }
  1126. /* Read the device configuration information from the HW and SHMEM */
  1127. rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
  1128. if (rc) {
  1129. DP_NOTICE(p_hwfn, "Failed to get HW information\n");
  1130. goto err2;
  1131. }
  1132. /* Allocate the init RT array and initialize the init-ops engine */
  1133. rc = qed_init_alloc(p_hwfn);
  1134. if (rc) {
  1135. DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
  1136. goto err2;
  1137. }
  1138. return rc;
  1139. err2:
  1140. qed_mcp_free(p_hwfn);
  1141. err1:
  1142. qed_hw_hwfn_free(p_hwfn);
  1143. err0:
  1144. return rc;
  1145. }
  1146. int qed_hw_prepare(struct qed_dev *cdev,
  1147. int personality)
  1148. {
  1149. struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
  1150. int rc;
  1151. /* Store the precompiled init data ptrs */
  1152. qed_init_iro_array(cdev);
  1153. /* Initialize the first hwfn - will learn number of hwfns */
  1154. rc = qed_hw_prepare_single(p_hwfn,
  1155. cdev->regview,
  1156. cdev->doorbells, personality);
  1157. if (rc)
  1158. return rc;
  1159. personality = p_hwfn->hw_info.personality;
  1160. /* Initialize the rest of the hwfns */
  1161. if (cdev->num_hwfns > 1) {
  1162. void __iomem *p_regview, *p_doorbell;
  1163. u8 __iomem *addr;
  1164. /* adjust bar offset for second engine */
  1165. addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
  1166. p_regview = addr;
  1167. /* adjust doorbell bar offset for second engine */
  1168. addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
  1169. p_doorbell = addr;
  1170. /* prepare second hw function */
  1171. rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
  1172. p_doorbell, personality);
  1173. /* in case of error, need to free the previously
  1174. * initiliazed hwfn 0.
  1175. */
  1176. if (rc) {
  1177. qed_init_free(p_hwfn);
  1178. qed_mcp_free(p_hwfn);
  1179. qed_hw_hwfn_free(p_hwfn);
  1180. }
  1181. }
  1182. return rc;
  1183. }
  1184. void qed_hw_remove(struct qed_dev *cdev)
  1185. {
  1186. int i;
  1187. for_each_hwfn(cdev, i) {
  1188. struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
  1189. qed_init_free(p_hwfn);
  1190. qed_hw_hwfn_free(p_hwfn);
  1191. qed_mcp_free(p_hwfn);
  1192. }
  1193. }
  1194. int qed_chain_alloc(struct qed_dev *cdev,
  1195. enum qed_chain_use_mode intended_use,
  1196. enum qed_chain_mode mode,
  1197. u16 num_elems,
  1198. size_t elem_size,
  1199. struct qed_chain *p_chain)
  1200. {
  1201. dma_addr_t p_pbl_phys = 0;
  1202. void *p_pbl_virt = NULL;
  1203. dma_addr_t p_phys = 0;
  1204. void *p_virt = NULL;
  1205. u16 page_cnt = 0;
  1206. size_t size;
  1207. if (mode == QED_CHAIN_MODE_SINGLE)
  1208. page_cnt = 1;
  1209. else
  1210. page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
  1211. size = page_cnt * QED_CHAIN_PAGE_SIZE;
  1212. p_virt = dma_alloc_coherent(&cdev->pdev->dev,
  1213. size, &p_phys, GFP_KERNEL);
  1214. if (!p_virt) {
  1215. DP_NOTICE(cdev, "Failed to allocate chain mem\n");
  1216. goto nomem;
  1217. }
  1218. if (mode == QED_CHAIN_MODE_PBL) {
  1219. size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
  1220. p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
  1221. size, &p_pbl_phys,
  1222. GFP_KERNEL);
  1223. if (!p_pbl_virt) {
  1224. DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
  1225. goto nomem;
  1226. }
  1227. qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
  1228. (u8)elem_size, intended_use,
  1229. p_pbl_phys, p_pbl_virt);
  1230. } else {
  1231. qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
  1232. (u8)elem_size, intended_use, mode);
  1233. }
  1234. return 0;
  1235. nomem:
  1236. dma_free_coherent(&cdev->pdev->dev,
  1237. page_cnt * QED_CHAIN_PAGE_SIZE,
  1238. p_virt, p_phys);
  1239. dma_free_coherent(&cdev->pdev->dev,
  1240. page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
  1241. p_pbl_virt, p_pbl_phys);
  1242. return -ENOMEM;
  1243. }
  1244. void qed_chain_free(struct qed_dev *cdev,
  1245. struct qed_chain *p_chain)
  1246. {
  1247. size_t size;
  1248. if (!p_chain->p_virt_addr)
  1249. return;
  1250. if (p_chain->mode == QED_CHAIN_MODE_PBL) {
  1251. size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
  1252. dma_free_coherent(&cdev->pdev->dev, size,
  1253. p_chain->pbl.p_virt_table,
  1254. p_chain->pbl.p_phys_table);
  1255. }
  1256. size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
  1257. dma_free_coherent(&cdev->pdev->dev, size,
  1258. p_chain->p_virt_addr,
  1259. p_chain->p_phys_addr);
  1260. }
  1261. int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
  1262. u16 src_id, u16 *dst_id)
  1263. {
  1264. if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
  1265. u16 min, max;
  1266. min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
  1267. max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
  1268. DP_NOTICE(p_hwfn,
  1269. "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
  1270. src_id, min, max);
  1271. return -EINVAL;
  1272. }
  1273. *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
  1274. return 0;
  1275. }
  1276. int qed_fw_vport(struct qed_hwfn *p_hwfn,
  1277. u8 src_id, u8 *dst_id)
  1278. {
  1279. if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
  1280. u8 min, max;
  1281. min = (u8)RESC_START(p_hwfn, QED_VPORT);
  1282. max = min + RESC_NUM(p_hwfn, QED_VPORT);
  1283. DP_NOTICE(p_hwfn,
  1284. "vport id [%d] is not valid, available indices [%d - %d]\n",
  1285. src_id, min, max);
  1286. return -EINVAL;
  1287. }
  1288. *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
  1289. return 0;
  1290. }
  1291. int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
  1292. u8 src_id, u8 *dst_id)
  1293. {
  1294. if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
  1295. u8 min, max;
  1296. min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
  1297. max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
  1298. DP_NOTICE(p_hwfn,
  1299. "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
  1300. src_id, min, max);
  1301. return -EINVAL;
  1302. }
  1303. *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
  1304. return 0;
  1305. }