qed_mcp.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/types.h>
  9. #include <asm/byteorder.h>
  10. #include <linux/delay.h>
  11. #include <linux/errno.h>
  12. #include <linux/kernel.h>
  13. #include <linux/slab.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/string.h>
  16. #include "qed.h"
  17. #include "qed_dcbx.h"
  18. #include "qed_hsi.h"
  19. #include "qed_hw.h"
  20. #include "qed_mcp.h"
  21. #include "qed_reg_addr.h"
  22. #include "qed_sriov.h"
  23. #define CHIP_MCP_RESP_ITER_US 10
  24. #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
  25. #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
  26. #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
  27. qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
  28. _val)
  29. #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
  30. qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
  31. #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
  32. DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
  33. offsetof(struct public_drv_mb, _field), _val)
  34. #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
  35. DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
  36. offsetof(struct public_drv_mb, _field))
  37. #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
  38. DRV_ID_PDA_COMP_VER_SHIFT)
  39. #define MCP_BYTES_PER_MBIT_SHIFT 17
  40. bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
  41. {
  42. if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
  43. return false;
  44. return true;
  45. }
  46. void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
  47. struct qed_ptt *p_ptt)
  48. {
  49. u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
  50. PUBLIC_PORT);
  51. u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
  52. p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
  53. MFW_PORT(p_hwfn));
  54. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  55. "port_addr = 0x%x, port_id 0x%02x\n",
  56. p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
  57. }
  58. void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
  59. struct qed_ptt *p_ptt)
  60. {
  61. u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
  62. u32 tmp, i;
  63. if (!p_hwfn->mcp_info->public_base)
  64. return;
  65. for (i = 0; i < length; i++) {
  66. tmp = qed_rd(p_hwfn, p_ptt,
  67. p_hwfn->mcp_info->mfw_mb_addr +
  68. (i << 2) + sizeof(u32));
  69. /* The MB data is actually BE; Need to force it to cpu */
  70. ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
  71. be32_to_cpu((__force __be32)tmp);
  72. }
  73. }
  74. int qed_mcp_free(struct qed_hwfn *p_hwfn)
  75. {
  76. if (p_hwfn->mcp_info) {
  77. kfree(p_hwfn->mcp_info->mfw_mb_cur);
  78. kfree(p_hwfn->mcp_info->mfw_mb_shadow);
  79. }
  80. kfree(p_hwfn->mcp_info);
  81. return 0;
  82. }
  83. static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
  84. struct qed_ptt *p_ptt)
  85. {
  86. struct qed_mcp_info *p_info = p_hwfn->mcp_info;
  87. u32 drv_mb_offsize, mfw_mb_offsize;
  88. u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
  89. p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
  90. if (!p_info->public_base)
  91. return 0;
  92. p_info->public_base |= GRCBASE_MCP;
  93. /* Calculate the driver and MFW mailbox address */
  94. drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
  95. SECTION_OFFSIZE_ADDR(p_info->public_base,
  96. PUBLIC_DRV_MB));
  97. p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
  98. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  99. "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
  100. drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
  101. /* Set the MFW MB address */
  102. mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
  103. SECTION_OFFSIZE_ADDR(p_info->public_base,
  104. PUBLIC_MFW_MB));
  105. p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
  106. p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
  107. /* Get the current driver mailbox sequence before sending
  108. * the first command
  109. */
  110. p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
  111. DRV_MSG_SEQ_NUMBER_MASK;
  112. /* Get current FW pulse sequence */
  113. p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
  114. DRV_PULSE_SEQ_MASK;
  115. p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
  116. return 0;
  117. }
  118. int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
  119. struct qed_ptt *p_ptt)
  120. {
  121. struct qed_mcp_info *p_info;
  122. u32 size;
  123. /* Allocate mcp_info structure */
  124. p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
  125. if (!p_hwfn->mcp_info)
  126. goto err;
  127. p_info = p_hwfn->mcp_info;
  128. if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
  129. DP_NOTICE(p_hwfn, "MCP is not initialized\n");
  130. /* Do not free mcp_info here, since public_base indicate that
  131. * the MCP is not initialized
  132. */
  133. return 0;
  134. }
  135. size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
  136. p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
  137. p_info->mfw_mb_shadow =
  138. kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
  139. p_info->mfw_mb_length), GFP_KERNEL);
  140. if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
  141. goto err;
  142. /* Initialize the MFW spinlock */
  143. spin_lock_init(&p_info->lock);
  144. return 0;
  145. err:
  146. DP_NOTICE(p_hwfn, "Failed to allocate mcp memory\n");
  147. qed_mcp_free(p_hwfn);
  148. return -ENOMEM;
  149. }
  150. /* Locks the MFW mailbox of a PF to ensure a single access.
  151. * The lock is achieved in most cases by holding a spinlock, causing other
  152. * threads to wait till a previous access is done.
  153. * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
  154. * access is achieved by setting a blocking flag, which will fail other
  155. * competing contexts to send their mailboxes.
  156. */
  157. static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn,
  158. u32 cmd)
  159. {
  160. spin_lock_bh(&p_hwfn->mcp_info->lock);
  161. /* The spinlock shouldn't be acquired when the mailbox command is
  162. * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
  163. * pending [UN]LOAD_REQ command of another PF together with a spinlock
  164. * (i.e. interrupts are disabled) - can lead to a deadlock.
  165. * It is assumed that for a single PF, no other mailbox commands can be
  166. * sent from another context while sending LOAD_REQ, and that any
  167. * parallel commands to UNLOAD_REQ can be cancelled.
  168. */
  169. if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
  170. p_hwfn->mcp_info->block_mb_sending = false;
  171. if (p_hwfn->mcp_info->block_mb_sending) {
  172. DP_NOTICE(p_hwfn,
  173. "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
  174. cmd);
  175. spin_unlock_bh(&p_hwfn->mcp_info->lock);
  176. return -EBUSY;
  177. }
  178. if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
  179. p_hwfn->mcp_info->block_mb_sending = true;
  180. spin_unlock_bh(&p_hwfn->mcp_info->lock);
  181. }
  182. return 0;
  183. }
  184. static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn,
  185. u32 cmd)
  186. {
  187. if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
  188. spin_unlock_bh(&p_hwfn->mcp_info->lock);
  189. }
  190. int qed_mcp_reset(struct qed_hwfn *p_hwfn,
  191. struct qed_ptt *p_ptt)
  192. {
  193. u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
  194. u8 delay = CHIP_MCP_RESP_ITER_US;
  195. u32 org_mcp_reset_seq, cnt = 0;
  196. int rc = 0;
  197. /* Ensure that only a single thread is accessing the mailbox at a
  198. * certain time.
  199. */
  200. rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
  201. if (rc != 0)
  202. return rc;
  203. /* Set drv command along with the updated sequence */
  204. org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
  205. DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
  206. (DRV_MSG_CODE_MCP_RESET | seq));
  207. do {
  208. /* Wait for MFW response */
  209. udelay(delay);
  210. /* Give the FW up to 500 second (50*1000*10usec) */
  211. } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
  212. MISCS_REG_GENERIC_POR_0)) &&
  213. (cnt++ < QED_MCP_RESET_RETRIES));
  214. if (org_mcp_reset_seq !=
  215. qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
  216. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  217. "MCP was reset after %d usec\n", cnt * delay);
  218. } else {
  219. DP_ERR(p_hwfn, "Failed to reset MCP\n");
  220. rc = -EAGAIN;
  221. }
  222. qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
  223. return rc;
  224. }
  225. static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
  226. struct qed_ptt *p_ptt,
  227. u32 cmd,
  228. u32 param,
  229. u32 *o_mcp_resp,
  230. u32 *o_mcp_param)
  231. {
  232. u8 delay = CHIP_MCP_RESP_ITER_US;
  233. u32 seq, cnt = 1, actual_mb_seq;
  234. int rc = 0;
  235. /* Get actual driver mailbox sequence */
  236. actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
  237. DRV_MSG_SEQ_NUMBER_MASK;
  238. /* Use MCP history register to check if MCP reset occurred between
  239. * init time and now.
  240. */
  241. if (p_hwfn->mcp_info->mcp_hist !=
  242. qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
  243. DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
  244. qed_load_mcp_offsets(p_hwfn, p_ptt);
  245. qed_mcp_cmd_port_init(p_hwfn, p_ptt);
  246. }
  247. seq = ++p_hwfn->mcp_info->drv_mb_seq;
  248. /* Set drv param */
  249. DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
  250. /* Set drv command along with the updated sequence */
  251. DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
  252. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  253. "wrote command (%x) to MFW MB param 0x%08x\n",
  254. (cmd | seq), param);
  255. do {
  256. /* Wait for MFW response */
  257. udelay(delay);
  258. *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
  259. /* Give the FW up to 5 second (500*10ms) */
  260. } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
  261. (cnt++ < QED_DRV_MB_MAX_RETRIES));
  262. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  263. "[after %d ms] read (%x) seq is (%x) from FW MB\n",
  264. cnt * delay, *o_mcp_resp, seq);
  265. /* Is this a reply to our command? */
  266. if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
  267. *o_mcp_resp &= FW_MSG_CODE_MASK;
  268. /* Get the MCP param */
  269. *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
  270. } else {
  271. /* FW BUG! */
  272. DP_ERR(p_hwfn, "MFW failed to respond!\n");
  273. *o_mcp_resp = 0;
  274. rc = -EAGAIN;
  275. }
  276. return rc;
  277. }
  278. static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
  279. struct qed_ptt *p_ptt,
  280. struct qed_mcp_mb_params *p_mb_params)
  281. {
  282. u32 union_data_addr;
  283. int rc;
  284. /* MCP not initialized */
  285. if (!qed_mcp_is_init(p_hwfn)) {
  286. DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
  287. return -EBUSY;
  288. }
  289. union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
  290. offsetof(struct public_drv_mb, union_data);
  291. /* Ensure that only a single thread is accessing the mailbox at a
  292. * certain time.
  293. */
  294. rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
  295. if (rc)
  296. return rc;
  297. if (p_mb_params->p_data_src != NULL)
  298. qed_memcpy_to(p_hwfn, p_ptt, union_data_addr,
  299. p_mb_params->p_data_src,
  300. sizeof(*p_mb_params->p_data_src));
  301. rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
  302. p_mb_params->param, &p_mb_params->mcp_resp,
  303. &p_mb_params->mcp_param);
  304. if (p_mb_params->p_data_dst != NULL)
  305. qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
  306. union_data_addr,
  307. sizeof(*p_mb_params->p_data_dst));
  308. qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
  309. return rc;
  310. }
  311. int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
  312. struct qed_ptt *p_ptt,
  313. u32 cmd,
  314. u32 param,
  315. u32 *o_mcp_resp,
  316. u32 *o_mcp_param)
  317. {
  318. struct qed_mcp_mb_params mb_params;
  319. int rc;
  320. memset(&mb_params, 0, sizeof(mb_params));
  321. mb_params.cmd = cmd;
  322. mb_params.param = param;
  323. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  324. if (rc)
  325. return rc;
  326. *o_mcp_resp = mb_params.mcp_resp;
  327. *o_mcp_param = mb_params.mcp_param;
  328. return 0;
  329. }
  330. int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
  331. struct qed_ptt *p_ptt,
  332. u32 *p_load_code)
  333. {
  334. struct qed_dev *cdev = p_hwfn->cdev;
  335. struct qed_mcp_mb_params mb_params;
  336. union drv_union_data union_data;
  337. int rc;
  338. memset(&mb_params, 0, sizeof(mb_params));
  339. /* Load Request */
  340. mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
  341. mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
  342. cdev->drv_type;
  343. memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE);
  344. mb_params.p_data_src = &union_data;
  345. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  346. /* if mcp fails to respond we must abort */
  347. if (rc) {
  348. DP_ERR(p_hwfn, "MCP response failure, aborting\n");
  349. return rc;
  350. }
  351. *p_load_code = mb_params.mcp_resp;
  352. /* If MFW refused (e.g. other port is in diagnostic mode) we
  353. * must abort. This can happen in the following cases:
  354. * - Other port is in diagnostic mode
  355. * - Previously loaded function on the engine is not compliant with
  356. * the requester.
  357. * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
  358. * -
  359. */
  360. if (!(*p_load_code) ||
  361. ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
  362. ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
  363. ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
  364. DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
  365. return -EBUSY;
  366. }
  367. return 0;
  368. }
  369. static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
  370. struct qed_ptt *p_ptt)
  371. {
  372. u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
  373. PUBLIC_PATH);
  374. u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
  375. u32 path_addr = SECTION_ADDR(mfw_path_offsize,
  376. QED_PATH_ID(p_hwfn));
  377. u32 disabled_vfs[VF_MAX_STATIC / 32];
  378. int i;
  379. DP_VERBOSE(p_hwfn,
  380. QED_MSG_SP,
  381. "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
  382. mfw_path_offsize, path_addr);
  383. for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
  384. disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
  385. path_addr +
  386. offsetof(struct public_path,
  387. mcp_vf_disabled) +
  388. sizeof(u32) * i);
  389. DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
  390. "FLR-ed VFs [%08x,...,%08x] - %08x\n",
  391. i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
  392. }
  393. if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
  394. qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
  395. }
  396. int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
  397. struct qed_ptt *p_ptt, u32 *vfs_to_ack)
  398. {
  399. u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
  400. PUBLIC_FUNC);
  401. u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
  402. u32 func_addr = SECTION_ADDR(mfw_func_offsize,
  403. MCP_PF_ID(p_hwfn));
  404. struct qed_mcp_mb_params mb_params;
  405. union drv_union_data union_data;
  406. int rc;
  407. int i;
  408. for (i = 0; i < (VF_MAX_STATIC / 32); i++)
  409. DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
  410. "Acking VFs [%08x,...,%08x] - %08x\n",
  411. i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
  412. memset(&mb_params, 0, sizeof(mb_params));
  413. mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
  414. memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
  415. mb_params.p_data_src = &union_data;
  416. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  417. if (rc) {
  418. DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
  419. return -EBUSY;
  420. }
  421. /* Clear the ACK bits */
  422. for (i = 0; i < (VF_MAX_STATIC / 32); i++)
  423. qed_wr(p_hwfn, p_ptt,
  424. func_addr +
  425. offsetof(struct public_func, drv_ack_vf_disabled) +
  426. i * sizeof(u32), 0);
  427. return rc;
  428. }
  429. static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
  430. struct qed_ptt *p_ptt)
  431. {
  432. u32 transceiver_state;
  433. transceiver_state = qed_rd(p_hwfn, p_ptt,
  434. p_hwfn->mcp_info->port_addr +
  435. offsetof(struct public_port,
  436. transceiver_data));
  437. DP_VERBOSE(p_hwfn,
  438. (NETIF_MSG_HW | QED_MSG_SP),
  439. "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
  440. transceiver_state,
  441. (u32)(p_hwfn->mcp_info->port_addr +
  442. offsetof(struct public_port,
  443. transceiver_data)));
  444. transceiver_state = GET_FIELD(transceiver_state,
  445. ETH_TRANSCEIVER_STATE);
  446. if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
  447. DP_NOTICE(p_hwfn, "Transceiver is present.\n");
  448. else
  449. DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
  450. }
  451. static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
  452. struct qed_ptt *p_ptt,
  453. bool b_reset)
  454. {
  455. struct qed_mcp_link_state *p_link;
  456. u8 max_bw, min_bw;
  457. u32 status = 0;
  458. p_link = &p_hwfn->mcp_info->link_output;
  459. memset(p_link, 0, sizeof(*p_link));
  460. if (!b_reset) {
  461. status = qed_rd(p_hwfn, p_ptt,
  462. p_hwfn->mcp_info->port_addr +
  463. offsetof(struct public_port, link_status));
  464. DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
  465. "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
  466. status,
  467. (u32)(p_hwfn->mcp_info->port_addr +
  468. offsetof(struct public_port,
  469. link_status)));
  470. } else {
  471. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  472. "Resetting link indications\n");
  473. return;
  474. }
  475. if (p_hwfn->b_drv_link_init)
  476. p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
  477. else
  478. p_link->link_up = false;
  479. p_link->full_duplex = true;
  480. switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
  481. case LINK_STATUS_SPEED_AND_DUPLEX_100G:
  482. p_link->speed = 100000;
  483. break;
  484. case LINK_STATUS_SPEED_AND_DUPLEX_50G:
  485. p_link->speed = 50000;
  486. break;
  487. case LINK_STATUS_SPEED_AND_DUPLEX_40G:
  488. p_link->speed = 40000;
  489. break;
  490. case LINK_STATUS_SPEED_AND_DUPLEX_25G:
  491. p_link->speed = 25000;
  492. break;
  493. case LINK_STATUS_SPEED_AND_DUPLEX_20G:
  494. p_link->speed = 20000;
  495. break;
  496. case LINK_STATUS_SPEED_AND_DUPLEX_10G:
  497. p_link->speed = 10000;
  498. break;
  499. case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
  500. p_link->full_duplex = false;
  501. /* Fall-through */
  502. case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
  503. p_link->speed = 1000;
  504. break;
  505. default:
  506. p_link->speed = 0;
  507. }
  508. if (p_link->link_up && p_link->speed)
  509. p_link->line_speed = p_link->speed;
  510. else
  511. p_link->line_speed = 0;
  512. max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
  513. min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
  514. /* Max bandwidth configuration */
  515. __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
  516. /* Min bandwidth configuration */
  517. __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
  518. qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate);
  519. p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
  520. p_link->an_complete = !!(status &
  521. LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
  522. p_link->parallel_detection = !!(status &
  523. LINK_STATUS_PARALLEL_DETECTION_USED);
  524. p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
  525. p_link->partner_adv_speed |=
  526. (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
  527. QED_LINK_PARTNER_SPEED_1G_FD : 0;
  528. p_link->partner_adv_speed |=
  529. (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
  530. QED_LINK_PARTNER_SPEED_1G_HD : 0;
  531. p_link->partner_adv_speed |=
  532. (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
  533. QED_LINK_PARTNER_SPEED_10G : 0;
  534. p_link->partner_adv_speed |=
  535. (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
  536. QED_LINK_PARTNER_SPEED_20G : 0;
  537. p_link->partner_adv_speed |=
  538. (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
  539. QED_LINK_PARTNER_SPEED_40G : 0;
  540. p_link->partner_adv_speed |=
  541. (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
  542. QED_LINK_PARTNER_SPEED_50G : 0;
  543. p_link->partner_adv_speed |=
  544. (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
  545. QED_LINK_PARTNER_SPEED_100G : 0;
  546. p_link->partner_tx_flow_ctrl_en =
  547. !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
  548. p_link->partner_rx_flow_ctrl_en =
  549. !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
  550. switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
  551. case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
  552. p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
  553. break;
  554. case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
  555. p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
  556. break;
  557. case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
  558. p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
  559. break;
  560. default:
  561. p_link->partner_adv_pause = 0;
  562. }
  563. p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
  564. qed_link_update(p_hwfn);
  565. }
  566. int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
  567. {
  568. struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
  569. struct qed_mcp_mb_params mb_params;
  570. union drv_union_data union_data;
  571. struct eth_phy_cfg *phy_cfg;
  572. int rc = 0;
  573. u32 cmd;
  574. /* Set the shmem configuration according to params */
  575. phy_cfg = &union_data.drv_phy_cfg;
  576. memset(phy_cfg, 0, sizeof(*phy_cfg));
  577. cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
  578. if (!params->speed.autoneg)
  579. phy_cfg->speed = params->speed.forced_speed;
  580. phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
  581. phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
  582. phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
  583. phy_cfg->adv_speed = params->speed.advertised_speeds;
  584. phy_cfg->loopback_mode = params->loopback_mode;
  585. p_hwfn->b_drv_link_init = b_up;
  586. if (b_up) {
  587. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  588. "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
  589. phy_cfg->speed,
  590. phy_cfg->pause,
  591. phy_cfg->adv_speed,
  592. phy_cfg->loopback_mode,
  593. phy_cfg->feature_config_flags);
  594. } else {
  595. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  596. "Resetting link\n");
  597. }
  598. memset(&mb_params, 0, sizeof(mb_params));
  599. mb_params.cmd = cmd;
  600. mb_params.p_data_src = &union_data;
  601. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  602. /* if mcp fails to respond we must abort */
  603. if (rc) {
  604. DP_ERR(p_hwfn, "MCP response failure, aborting\n");
  605. return rc;
  606. }
  607. /* Reset the link status if needed */
  608. if (!b_up)
  609. qed_mcp_handle_link_change(p_hwfn, p_ptt, true);
  610. return 0;
  611. }
  612. static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
  613. struct public_func *p_shmem_info)
  614. {
  615. struct qed_mcp_function_info *p_info;
  616. p_info = &p_hwfn->mcp_info->func_info;
  617. p_info->bandwidth_min = (p_shmem_info->config &
  618. FUNC_MF_CFG_MIN_BW_MASK) >>
  619. FUNC_MF_CFG_MIN_BW_SHIFT;
  620. if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
  621. DP_INFO(p_hwfn,
  622. "bandwidth minimum out of bounds [%02x]. Set to 1\n",
  623. p_info->bandwidth_min);
  624. p_info->bandwidth_min = 1;
  625. }
  626. p_info->bandwidth_max = (p_shmem_info->config &
  627. FUNC_MF_CFG_MAX_BW_MASK) >>
  628. FUNC_MF_CFG_MAX_BW_SHIFT;
  629. if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
  630. DP_INFO(p_hwfn,
  631. "bandwidth maximum out of bounds [%02x]. Set to 100\n",
  632. p_info->bandwidth_max);
  633. p_info->bandwidth_max = 100;
  634. }
  635. }
  636. static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
  637. struct qed_ptt *p_ptt,
  638. struct public_func *p_data,
  639. int pfid)
  640. {
  641. u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
  642. PUBLIC_FUNC);
  643. u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
  644. u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
  645. u32 i, size;
  646. memset(p_data, 0, sizeof(*p_data));
  647. size = min_t(u32, sizeof(*p_data),
  648. QED_SECTION_SIZE(mfw_path_offsize));
  649. for (i = 0; i < size / sizeof(u32); i++)
  650. ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
  651. func_addr + (i << 2));
  652. return size;
  653. }
  654. int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
  655. struct qed_ptt *p_ptt, u8 *p_pf)
  656. {
  657. struct public_func shmem_info;
  658. int i;
  659. /* Find first Ethernet interface in port */
  660. for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->cdev);
  661. i += p_hwfn->cdev->num_ports_in_engines) {
  662. qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
  663. MCP_PF_ID_BY_REL(p_hwfn, i));
  664. if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
  665. continue;
  666. if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) ==
  667. FUNC_MF_CFG_PROTOCOL_ETHERNET) {
  668. *p_pf = (u8)i;
  669. return 0;
  670. }
  671. }
  672. DP_NOTICE(p_hwfn,
  673. "Failed to find on port an ethernet interface in MF_SI mode\n");
  674. return -EINVAL;
  675. }
  676. static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn,
  677. struct qed_ptt *p_ptt)
  678. {
  679. struct qed_mcp_function_info *p_info;
  680. struct public_func shmem_info;
  681. u32 resp = 0, param = 0;
  682. qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
  683. MCP_PF_ID(p_hwfn));
  684. qed_read_pf_bandwidth(p_hwfn, &shmem_info);
  685. p_info = &p_hwfn->mcp_info->func_info;
  686. qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
  687. qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
  688. /* Acknowledge the MFW */
  689. qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
  690. &param);
  691. }
  692. int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
  693. struct qed_ptt *p_ptt)
  694. {
  695. struct qed_mcp_info *info = p_hwfn->mcp_info;
  696. int rc = 0;
  697. bool found = false;
  698. u16 i;
  699. DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
  700. /* Read Messages from MFW */
  701. qed_mcp_read_mb(p_hwfn, p_ptt);
  702. /* Compare current messages to old ones */
  703. for (i = 0; i < info->mfw_mb_length; i++) {
  704. if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
  705. continue;
  706. found = true;
  707. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  708. "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
  709. i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
  710. switch (i) {
  711. case MFW_DRV_MSG_LINK_CHANGE:
  712. qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
  713. break;
  714. case MFW_DRV_MSG_VF_DISABLED:
  715. qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
  716. break;
  717. case MFW_DRV_MSG_LLDP_DATA_UPDATED:
  718. qed_dcbx_mib_update_event(p_hwfn, p_ptt,
  719. QED_DCBX_REMOTE_LLDP_MIB);
  720. break;
  721. case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
  722. qed_dcbx_mib_update_event(p_hwfn, p_ptt,
  723. QED_DCBX_REMOTE_MIB);
  724. break;
  725. case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
  726. qed_dcbx_mib_update_event(p_hwfn, p_ptt,
  727. QED_DCBX_OPERATIONAL_MIB);
  728. break;
  729. case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
  730. qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
  731. break;
  732. case MFW_DRV_MSG_BW_UPDATE:
  733. qed_mcp_update_bw(p_hwfn, p_ptt);
  734. break;
  735. default:
  736. DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
  737. rc = -EINVAL;
  738. }
  739. }
  740. /* ACK everything */
  741. for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
  742. __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
  743. /* MFW expect answer in BE, so we force write in that format */
  744. qed_wr(p_hwfn, p_ptt,
  745. info->mfw_mb_addr + sizeof(u32) +
  746. MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
  747. sizeof(u32) + i * sizeof(u32),
  748. (__force u32)val);
  749. }
  750. if (!found) {
  751. DP_NOTICE(p_hwfn,
  752. "Received an MFW message indication but no new message!\n");
  753. rc = -EINVAL;
  754. }
  755. /* Copy the new mfw messages into the shadow */
  756. memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
  757. return rc;
  758. }
  759. int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
  760. struct qed_ptt *p_ptt,
  761. u32 *p_mfw_ver, u32 *p_running_bundle_id)
  762. {
  763. u32 global_offsize;
  764. if (IS_VF(p_hwfn->cdev)) {
  765. if (p_hwfn->vf_iov_info) {
  766. struct pfvf_acquire_resp_tlv *p_resp;
  767. p_resp = &p_hwfn->vf_iov_info->acquire_resp;
  768. *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
  769. return 0;
  770. } else {
  771. DP_VERBOSE(p_hwfn,
  772. QED_MSG_IOV,
  773. "VF requested MFW version prior to ACQUIRE\n");
  774. return -EINVAL;
  775. }
  776. }
  777. global_offsize = qed_rd(p_hwfn, p_ptt,
  778. SECTION_OFFSIZE_ADDR(p_hwfn->
  779. mcp_info->public_base,
  780. PUBLIC_GLOBAL));
  781. *p_mfw_ver =
  782. qed_rd(p_hwfn, p_ptt,
  783. SECTION_ADDR(global_offsize,
  784. 0) + offsetof(struct public_global, mfw_ver));
  785. if (p_running_bundle_id != NULL) {
  786. *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
  787. SECTION_ADDR(global_offsize, 0) +
  788. offsetof(struct public_global,
  789. running_bundle_id));
  790. }
  791. return 0;
  792. }
  793. int qed_mcp_get_media_type(struct qed_dev *cdev,
  794. u32 *p_media_type)
  795. {
  796. struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
  797. struct qed_ptt *p_ptt;
  798. if (IS_VF(cdev))
  799. return -EINVAL;
  800. if (!qed_mcp_is_init(p_hwfn)) {
  801. DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
  802. return -EBUSY;
  803. }
  804. *p_media_type = MEDIA_UNSPECIFIED;
  805. p_ptt = qed_ptt_acquire(p_hwfn);
  806. if (!p_ptt)
  807. return -EBUSY;
  808. *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
  809. offsetof(struct public_port, media_type));
  810. qed_ptt_release(p_hwfn, p_ptt);
  811. return 0;
  812. }
  813. static int
  814. qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
  815. struct public_func *p_info,
  816. enum qed_pci_personality *p_proto)
  817. {
  818. int rc = 0;
  819. switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
  820. case FUNC_MF_CFG_PROTOCOL_ETHERNET:
  821. if (test_bit(QED_DEV_CAP_ROCE,
  822. &p_hwfn->hw_info.device_capabilities))
  823. *p_proto = QED_PCI_ETH_ROCE;
  824. else
  825. *p_proto = QED_PCI_ETH;
  826. break;
  827. case FUNC_MF_CFG_PROTOCOL_ISCSI:
  828. *p_proto = QED_PCI_ISCSI;
  829. break;
  830. case FUNC_MF_CFG_PROTOCOL_ROCE:
  831. DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
  832. rc = -EINVAL;
  833. break;
  834. default:
  835. rc = -EINVAL;
  836. }
  837. return rc;
  838. }
  839. int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
  840. struct qed_ptt *p_ptt)
  841. {
  842. struct qed_mcp_function_info *info;
  843. struct public_func shmem_info;
  844. qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
  845. MCP_PF_ID(p_hwfn));
  846. info = &p_hwfn->mcp_info->func_info;
  847. info->pause_on_host = (shmem_info.config &
  848. FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
  849. if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info,
  850. &info->protocol)) {
  851. DP_ERR(p_hwfn, "Unknown personality %08x\n",
  852. (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
  853. return -EINVAL;
  854. }
  855. qed_read_pf_bandwidth(p_hwfn, &shmem_info);
  856. if (shmem_info.mac_upper || shmem_info.mac_lower) {
  857. info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
  858. info->mac[1] = (u8)(shmem_info.mac_upper);
  859. info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
  860. info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
  861. info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
  862. info->mac[5] = (u8)(shmem_info.mac_lower);
  863. } else {
  864. DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
  865. }
  866. info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
  867. (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
  868. info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
  869. (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
  870. info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
  871. DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
  872. "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n",
  873. info->pause_on_host, info->protocol,
  874. info->bandwidth_min, info->bandwidth_max,
  875. info->mac[0], info->mac[1], info->mac[2],
  876. info->mac[3], info->mac[4], info->mac[5],
  877. info->wwn_port, info->wwn_node, info->ovlan);
  878. return 0;
  879. }
  880. struct qed_mcp_link_params
  881. *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
  882. {
  883. if (!p_hwfn || !p_hwfn->mcp_info)
  884. return NULL;
  885. return &p_hwfn->mcp_info->link_input;
  886. }
  887. struct qed_mcp_link_state
  888. *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
  889. {
  890. if (!p_hwfn || !p_hwfn->mcp_info)
  891. return NULL;
  892. return &p_hwfn->mcp_info->link_output;
  893. }
  894. struct qed_mcp_link_capabilities
  895. *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
  896. {
  897. if (!p_hwfn || !p_hwfn->mcp_info)
  898. return NULL;
  899. return &p_hwfn->mcp_info->link_capabilities;
  900. }
  901. int qed_mcp_drain(struct qed_hwfn *p_hwfn,
  902. struct qed_ptt *p_ptt)
  903. {
  904. u32 resp = 0, param = 0;
  905. int rc;
  906. rc = qed_mcp_cmd(p_hwfn, p_ptt,
  907. DRV_MSG_CODE_NIG_DRAIN, 1000,
  908. &resp, &param);
  909. /* Wait for the drain to complete before returning */
  910. msleep(1020);
  911. return rc;
  912. }
  913. int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
  914. struct qed_ptt *p_ptt,
  915. u32 *p_flash_size)
  916. {
  917. u32 flash_size;
  918. if (IS_VF(p_hwfn->cdev))
  919. return -EINVAL;
  920. flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
  921. flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
  922. MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
  923. flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
  924. *p_flash_size = flash_size;
  925. return 0;
  926. }
  927. int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
  928. struct qed_ptt *p_ptt, u8 vf_id, u8 num)
  929. {
  930. u32 resp = 0, param = 0, rc_param = 0;
  931. int rc;
  932. /* Only Leader can configure MSIX, and need to take CMT into account */
  933. if (!IS_LEAD_HWFN(p_hwfn))
  934. return 0;
  935. num *= p_hwfn->cdev->num_hwfns;
  936. param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
  937. DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
  938. param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
  939. DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
  940. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
  941. &resp, &rc_param);
  942. if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
  943. DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
  944. rc = -EINVAL;
  945. } else {
  946. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  947. "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
  948. num, vf_id);
  949. }
  950. return rc;
  951. }
  952. int
  953. qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
  954. struct qed_ptt *p_ptt,
  955. struct qed_mcp_drv_version *p_ver)
  956. {
  957. struct drv_version_stc *p_drv_version;
  958. struct qed_mcp_mb_params mb_params;
  959. union drv_union_data union_data;
  960. __be32 val;
  961. u32 i;
  962. int rc;
  963. p_drv_version = &union_data.drv_version;
  964. p_drv_version->version = p_ver->version;
  965. for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) {
  966. val = cpu_to_be32(p_ver->name[i]);
  967. *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
  968. }
  969. memset(&mb_params, 0, sizeof(mb_params));
  970. mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
  971. mb_params.p_data_src = &union_data;
  972. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  973. if (rc)
  974. DP_ERR(p_hwfn, "MCP response failure, aborting\n");
  975. return rc;
  976. }
  977. int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
  978. enum qed_led_mode mode)
  979. {
  980. u32 resp = 0, param = 0, drv_mb_param;
  981. int rc;
  982. switch (mode) {
  983. case QED_LED_MODE_ON:
  984. drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
  985. break;
  986. case QED_LED_MODE_OFF:
  987. drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
  988. break;
  989. case QED_LED_MODE_RESTORE:
  990. drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
  991. break;
  992. default:
  993. DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
  994. return -EINVAL;
  995. }
  996. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
  997. drv_mb_param, &resp, &param);
  998. return rc;
  999. }
  1000. int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  1001. {
  1002. u32 drv_mb_param = 0, rsp, param;
  1003. int rc = 0;
  1004. drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
  1005. DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
  1006. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
  1007. drv_mb_param, &rsp, &param);
  1008. if (rc)
  1009. return rc;
  1010. if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
  1011. (param != DRV_MB_PARAM_BIST_RC_PASSED))
  1012. rc = -EAGAIN;
  1013. return rc;
  1014. }
  1015. int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  1016. {
  1017. u32 drv_mb_param, rsp, param;
  1018. int rc = 0;
  1019. drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
  1020. DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
  1021. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
  1022. drv_mb_param, &rsp, &param);
  1023. if (rc)
  1024. return rc;
  1025. if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
  1026. (param != DRV_MB_PARAM_BIST_RC_PASSED))
  1027. rc = -EAGAIN;
  1028. return rc;
  1029. }