qed_mcp.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/types.h>
  9. #include <asm/byteorder.h>
  10. #include <linux/delay.h>
  11. #include <linux/errno.h>
  12. #include <linux/kernel.h>
  13. #include <linux/slab.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/string.h>
  16. #include <linux/etherdevice.h>
  17. #include "qed.h"
  18. #include "qed_dcbx.h"
  19. #include "qed_hsi.h"
  20. #include "qed_hw.h"
  21. #include "qed_mcp.h"
  22. #include "qed_reg_addr.h"
  23. #include "qed_sriov.h"
  24. #define CHIP_MCP_RESP_ITER_US 10
  25. #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
  26. #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
  27. #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
  28. qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
  29. _val)
  30. #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
  31. qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
  32. #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
  33. DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
  34. offsetof(struct public_drv_mb, _field), _val)
  35. #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
  36. DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
  37. offsetof(struct public_drv_mb, _field))
  38. #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
  39. DRV_ID_PDA_COMP_VER_SHIFT)
  40. #define MCP_BYTES_PER_MBIT_SHIFT 17
  41. bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
  42. {
  43. if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
  44. return false;
  45. return true;
  46. }
  47. void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  48. {
  49. u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
  50. PUBLIC_PORT);
  51. u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
  52. p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
  53. MFW_PORT(p_hwfn));
  54. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  55. "port_addr = 0x%x, port_id 0x%02x\n",
  56. p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
  57. }
  58. void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  59. {
  60. u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
  61. u32 tmp, i;
  62. if (!p_hwfn->mcp_info->public_base)
  63. return;
  64. for (i = 0; i < length; i++) {
  65. tmp = qed_rd(p_hwfn, p_ptt,
  66. p_hwfn->mcp_info->mfw_mb_addr +
  67. (i << 2) + sizeof(u32));
  68. /* The MB data is actually BE; Need to force it to cpu */
  69. ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
  70. be32_to_cpu((__force __be32)tmp);
  71. }
  72. }
  73. int qed_mcp_free(struct qed_hwfn *p_hwfn)
  74. {
  75. if (p_hwfn->mcp_info) {
  76. kfree(p_hwfn->mcp_info->mfw_mb_cur);
  77. kfree(p_hwfn->mcp_info->mfw_mb_shadow);
  78. }
  79. kfree(p_hwfn->mcp_info);
  80. return 0;
  81. }
  82. static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  83. {
  84. struct qed_mcp_info *p_info = p_hwfn->mcp_info;
  85. u32 drv_mb_offsize, mfw_mb_offsize;
  86. u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
  87. p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
  88. if (!p_info->public_base)
  89. return 0;
  90. p_info->public_base |= GRCBASE_MCP;
  91. /* Calculate the driver and MFW mailbox address */
  92. drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
  93. SECTION_OFFSIZE_ADDR(p_info->public_base,
  94. PUBLIC_DRV_MB));
  95. p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
  96. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  97. "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
  98. drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
  99. /* Set the MFW MB address */
  100. mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
  101. SECTION_OFFSIZE_ADDR(p_info->public_base,
  102. PUBLIC_MFW_MB));
  103. p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
  104. p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
  105. /* Get the current driver mailbox sequence before sending
  106. * the first command
  107. */
  108. p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
  109. DRV_MSG_SEQ_NUMBER_MASK;
  110. /* Get current FW pulse sequence */
  111. p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
  112. DRV_PULSE_SEQ_MASK;
  113. p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
  114. return 0;
  115. }
  116. int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  117. {
  118. struct qed_mcp_info *p_info;
  119. u32 size;
  120. /* Allocate mcp_info structure */
  121. p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
  122. if (!p_hwfn->mcp_info)
  123. goto err;
  124. p_info = p_hwfn->mcp_info;
  125. if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
  126. DP_NOTICE(p_hwfn, "MCP is not initialized\n");
  127. /* Do not free mcp_info here, since public_base indicate that
  128. * the MCP is not initialized
  129. */
  130. return 0;
  131. }
  132. size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
  133. p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
  134. p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
  135. if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
  136. goto err;
  137. /* Initialize the MFW spinlock */
  138. spin_lock_init(&p_info->lock);
  139. return 0;
  140. err:
  141. qed_mcp_free(p_hwfn);
  142. return -ENOMEM;
  143. }
  144. /* Locks the MFW mailbox of a PF to ensure a single access.
  145. * The lock is achieved in most cases by holding a spinlock, causing other
  146. * threads to wait till a previous access is done.
  147. * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
  148. * access is achieved by setting a blocking flag, which will fail other
  149. * competing contexts to send their mailboxes.
  150. */
  151. static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd)
  152. {
  153. spin_lock_bh(&p_hwfn->mcp_info->lock);
  154. /* The spinlock shouldn't be acquired when the mailbox command is
  155. * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
  156. * pending [UN]LOAD_REQ command of another PF together with a spinlock
  157. * (i.e. interrupts are disabled) - can lead to a deadlock.
  158. * It is assumed that for a single PF, no other mailbox commands can be
  159. * sent from another context while sending LOAD_REQ, and that any
  160. * parallel commands to UNLOAD_REQ can be cancelled.
  161. */
  162. if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
  163. p_hwfn->mcp_info->block_mb_sending = false;
  164. if (p_hwfn->mcp_info->block_mb_sending) {
  165. DP_NOTICE(p_hwfn,
  166. "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
  167. cmd);
  168. spin_unlock_bh(&p_hwfn->mcp_info->lock);
  169. return -EBUSY;
  170. }
  171. if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
  172. p_hwfn->mcp_info->block_mb_sending = true;
  173. spin_unlock_bh(&p_hwfn->mcp_info->lock);
  174. }
  175. return 0;
  176. }
  177. static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd)
  178. {
  179. if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
  180. spin_unlock_bh(&p_hwfn->mcp_info->lock);
  181. }
  182. int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  183. {
  184. u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
  185. u8 delay = CHIP_MCP_RESP_ITER_US;
  186. u32 org_mcp_reset_seq, cnt = 0;
  187. int rc = 0;
  188. /* Ensure that only a single thread is accessing the mailbox at a
  189. * certain time.
  190. */
  191. rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
  192. if (rc != 0)
  193. return rc;
  194. /* Set drv command along with the updated sequence */
  195. org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
  196. DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
  197. (DRV_MSG_CODE_MCP_RESET | seq));
  198. do {
  199. /* Wait for MFW response */
  200. udelay(delay);
  201. /* Give the FW up to 500 second (50*1000*10usec) */
  202. } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
  203. MISCS_REG_GENERIC_POR_0)) &&
  204. (cnt++ < QED_MCP_RESET_RETRIES));
  205. if (org_mcp_reset_seq !=
  206. qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
  207. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  208. "MCP was reset after %d usec\n", cnt * delay);
  209. } else {
  210. DP_ERR(p_hwfn, "Failed to reset MCP\n");
  211. rc = -EAGAIN;
  212. }
  213. qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
  214. return rc;
  215. }
  216. static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
  217. struct qed_ptt *p_ptt,
  218. u32 cmd,
  219. u32 param,
  220. u32 *o_mcp_resp,
  221. u32 *o_mcp_param)
  222. {
  223. u8 delay = CHIP_MCP_RESP_ITER_US;
  224. u32 seq, cnt = 1, actual_mb_seq;
  225. int rc = 0;
  226. /* Get actual driver mailbox sequence */
  227. actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
  228. DRV_MSG_SEQ_NUMBER_MASK;
  229. /* Use MCP history register to check if MCP reset occurred between
  230. * init time and now.
  231. */
  232. if (p_hwfn->mcp_info->mcp_hist !=
  233. qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
  234. DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
  235. qed_load_mcp_offsets(p_hwfn, p_ptt);
  236. qed_mcp_cmd_port_init(p_hwfn, p_ptt);
  237. }
  238. seq = ++p_hwfn->mcp_info->drv_mb_seq;
  239. /* Set drv param */
  240. DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
  241. /* Set drv command along with the updated sequence */
  242. DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
  243. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  244. "wrote command (%x) to MFW MB param 0x%08x\n",
  245. (cmd | seq), param);
  246. do {
  247. /* Wait for MFW response */
  248. udelay(delay);
  249. *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
  250. /* Give the FW up to 5 second (500*10ms) */
  251. } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
  252. (cnt++ < QED_DRV_MB_MAX_RETRIES));
  253. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  254. "[after %d ms] read (%x) seq is (%x) from FW MB\n",
  255. cnt * delay, *o_mcp_resp, seq);
  256. /* Is this a reply to our command? */
  257. if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
  258. *o_mcp_resp &= FW_MSG_CODE_MASK;
  259. /* Get the MCP param */
  260. *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
  261. } else {
  262. /* FW BUG! */
  263. DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
  264. cmd, param);
  265. *o_mcp_resp = 0;
  266. rc = -EAGAIN;
  267. }
  268. return rc;
  269. }
  270. static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
  271. struct qed_ptt *p_ptt,
  272. struct qed_mcp_mb_params *p_mb_params)
  273. {
  274. u32 union_data_addr;
  275. int rc;
  276. /* MCP not initialized */
  277. if (!qed_mcp_is_init(p_hwfn)) {
  278. DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
  279. return -EBUSY;
  280. }
  281. union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
  282. offsetof(struct public_drv_mb, union_data);
  283. /* Ensure that only a single thread is accessing the mailbox at a
  284. * certain time.
  285. */
  286. rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
  287. if (rc)
  288. return rc;
  289. if (p_mb_params->p_data_src != NULL)
  290. qed_memcpy_to(p_hwfn, p_ptt, union_data_addr,
  291. p_mb_params->p_data_src,
  292. sizeof(*p_mb_params->p_data_src));
  293. rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
  294. p_mb_params->param, &p_mb_params->mcp_resp,
  295. &p_mb_params->mcp_param);
  296. if (p_mb_params->p_data_dst != NULL)
  297. qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
  298. union_data_addr,
  299. sizeof(*p_mb_params->p_data_dst));
  300. qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
  301. return rc;
  302. }
  303. int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
  304. struct qed_ptt *p_ptt,
  305. u32 cmd,
  306. u32 param,
  307. u32 *o_mcp_resp,
  308. u32 *o_mcp_param)
  309. {
  310. struct qed_mcp_mb_params mb_params;
  311. union drv_union_data data_src;
  312. int rc;
  313. memset(&mb_params, 0, sizeof(mb_params));
  314. memset(&data_src, 0, sizeof(data_src));
  315. mb_params.cmd = cmd;
  316. mb_params.param = param;
  317. /* In case of UNLOAD_DONE, set the primary MAC */
  318. if ((cmd == DRV_MSG_CODE_UNLOAD_DONE) &&
  319. (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED)) {
  320. u8 *p_mac = p_hwfn->cdev->wol_mac;
  321. data_src.wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
  322. data_src.wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
  323. p_mac[4] << 8 | p_mac[5];
  324. DP_VERBOSE(p_hwfn,
  325. (QED_MSG_SP | NETIF_MSG_IFDOWN),
  326. "Setting WoL MAC: %pM --> [%08x,%08x]\n",
  327. p_mac, data_src.wol_mac.mac_upper,
  328. data_src.wol_mac.mac_lower);
  329. mb_params.p_data_src = &data_src;
  330. }
  331. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  332. if (rc)
  333. return rc;
  334. *o_mcp_resp = mb_params.mcp_resp;
  335. *o_mcp_param = mb_params.mcp_param;
  336. return 0;
  337. }
  338. int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
  339. struct qed_ptt *p_ptt,
  340. u32 cmd,
  341. u32 param,
  342. u32 *o_mcp_resp,
  343. u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
  344. {
  345. struct qed_mcp_mb_params mb_params;
  346. union drv_union_data union_data;
  347. int rc;
  348. memset(&mb_params, 0, sizeof(mb_params));
  349. mb_params.cmd = cmd;
  350. mb_params.param = param;
  351. mb_params.p_data_dst = &union_data;
  352. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  353. if (rc)
  354. return rc;
  355. *o_mcp_resp = mb_params.mcp_resp;
  356. *o_mcp_param = mb_params.mcp_param;
  357. *o_txn_size = *o_mcp_param;
  358. memcpy(o_buf, &union_data.raw_data, *o_txn_size);
  359. return 0;
  360. }
  361. int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
  362. struct qed_ptt *p_ptt, u32 *p_load_code)
  363. {
  364. struct qed_dev *cdev = p_hwfn->cdev;
  365. struct qed_mcp_mb_params mb_params;
  366. union drv_union_data union_data;
  367. int rc;
  368. memset(&mb_params, 0, sizeof(mb_params));
  369. /* Load Request */
  370. mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
  371. mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
  372. cdev->drv_type;
  373. memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE);
  374. mb_params.p_data_src = &union_data;
  375. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  376. /* if mcp fails to respond we must abort */
  377. if (rc) {
  378. DP_ERR(p_hwfn, "MCP response failure, aborting\n");
  379. return rc;
  380. }
  381. *p_load_code = mb_params.mcp_resp;
  382. /* If MFW refused (e.g. other port is in diagnostic mode) we
  383. * must abort. This can happen in the following cases:
  384. * - Other port is in diagnostic mode
  385. * - Previously loaded function on the engine is not compliant with
  386. * the requester.
  387. * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
  388. * -
  389. */
  390. if (!(*p_load_code) ||
  391. ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
  392. ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
  393. ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
  394. DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
  395. return -EBUSY;
  396. }
  397. return 0;
  398. }
  399. static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
  400. struct qed_ptt *p_ptt)
  401. {
  402. u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
  403. PUBLIC_PATH);
  404. u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
  405. u32 path_addr = SECTION_ADDR(mfw_path_offsize,
  406. QED_PATH_ID(p_hwfn));
  407. u32 disabled_vfs[VF_MAX_STATIC / 32];
  408. int i;
  409. DP_VERBOSE(p_hwfn,
  410. QED_MSG_SP,
  411. "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
  412. mfw_path_offsize, path_addr);
  413. for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
  414. disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
  415. path_addr +
  416. offsetof(struct public_path,
  417. mcp_vf_disabled) +
  418. sizeof(u32) * i);
  419. DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
  420. "FLR-ed VFs [%08x,...,%08x] - %08x\n",
  421. i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
  422. }
  423. if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
  424. qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
  425. }
  426. int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
  427. struct qed_ptt *p_ptt, u32 *vfs_to_ack)
  428. {
  429. u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
  430. PUBLIC_FUNC);
  431. u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
  432. u32 func_addr = SECTION_ADDR(mfw_func_offsize,
  433. MCP_PF_ID(p_hwfn));
  434. struct qed_mcp_mb_params mb_params;
  435. union drv_union_data union_data;
  436. int rc;
  437. int i;
  438. for (i = 0; i < (VF_MAX_STATIC / 32); i++)
  439. DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
  440. "Acking VFs [%08x,...,%08x] - %08x\n",
  441. i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
  442. memset(&mb_params, 0, sizeof(mb_params));
  443. mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
  444. memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
  445. mb_params.p_data_src = &union_data;
  446. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  447. if (rc) {
  448. DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
  449. return -EBUSY;
  450. }
  451. /* Clear the ACK bits */
  452. for (i = 0; i < (VF_MAX_STATIC / 32); i++)
  453. qed_wr(p_hwfn, p_ptt,
  454. func_addr +
  455. offsetof(struct public_func, drv_ack_vf_disabled) +
  456. i * sizeof(u32), 0);
  457. return rc;
  458. }
  459. static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
  460. struct qed_ptt *p_ptt)
  461. {
  462. u32 transceiver_state;
  463. transceiver_state = qed_rd(p_hwfn, p_ptt,
  464. p_hwfn->mcp_info->port_addr +
  465. offsetof(struct public_port,
  466. transceiver_data));
  467. DP_VERBOSE(p_hwfn,
  468. (NETIF_MSG_HW | QED_MSG_SP),
  469. "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
  470. transceiver_state,
  471. (u32)(p_hwfn->mcp_info->port_addr +
  472. offsetof(struct public_port, transceiver_data)));
  473. transceiver_state = GET_FIELD(transceiver_state,
  474. ETH_TRANSCEIVER_STATE);
  475. if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
  476. DP_NOTICE(p_hwfn, "Transceiver is present.\n");
  477. else
  478. DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
  479. }
  480. static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
  481. struct qed_ptt *p_ptt, bool b_reset)
  482. {
  483. struct qed_mcp_link_state *p_link;
  484. u8 max_bw, min_bw;
  485. u32 status = 0;
  486. p_link = &p_hwfn->mcp_info->link_output;
  487. memset(p_link, 0, sizeof(*p_link));
  488. if (!b_reset) {
  489. status = qed_rd(p_hwfn, p_ptt,
  490. p_hwfn->mcp_info->port_addr +
  491. offsetof(struct public_port, link_status));
  492. DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
  493. "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
  494. status,
  495. (u32)(p_hwfn->mcp_info->port_addr +
  496. offsetof(struct public_port, link_status)));
  497. } else {
  498. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  499. "Resetting link indications\n");
  500. return;
  501. }
  502. if (p_hwfn->b_drv_link_init)
  503. p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
  504. else
  505. p_link->link_up = false;
  506. p_link->full_duplex = true;
  507. switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
  508. case LINK_STATUS_SPEED_AND_DUPLEX_100G:
  509. p_link->speed = 100000;
  510. break;
  511. case LINK_STATUS_SPEED_AND_DUPLEX_50G:
  512. p_link->speed = 50000;
  513. break;
  514. case LINK_STATUS_SPEED_AND_DUPLEX_40G:
  515. p_link->speed = 40000;
  516. break;
  517. case LINK_STATUS_SPEED_AND_DUPLEX_25G:
  518. p_link->speed = 25000;
  519. break;
  520. case LINK_STATUS_SPEED_AND_DUPLEX_20G:
  521. p_link->speed = 20000;
  522. break;
  523. case LINK_STATUS_SPEED_AND_DUPLEX_10G:
  524. p_link->speed = 10000;
  525. break;
  526. case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
  527. p_link->full_duplex = false;
  528. /* Fall-through */
  529. case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
  530. p_link->speed = 1000;
  531. break;
  532. default:
  533. p_link->speed = 0;
  534. }
  535. if (p_link->link_up && p_link->speed)
  536. p_link->line_speed = p_link->speed;
  537. else
  538. p_link->line_speed = 0;
  539. max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
  540. min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
  541. /* Max bandwidth configuration */
  542. __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
  543. /* Min bandwidth configuration */
  544. __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
  545. qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate);
  546. p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
  547. p_link->an_complete = !!(status &
  548. LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
  549. p_link->parallel_detection = !!(status &
  550. LINK_STATUS_PARALLEL_DETECTION_USED);
  551. p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
  552. p_link->partner_adv_speed |=
  553. (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
  554. QED_LINK_PARTNER_SPEED_1G_FD : 0;
  555. p_link->partner_adv_speed |=
  556. (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
  557. QED_LINK_PARTNER_SPEED_1G_HD : 0;
  558. p_link->partner_adv_speed |=
  559. (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
  560. QED_LINK_PARTNER_SPEED_10G : 0;
  561. p_link->partner_adv_speed |=
  562. (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
  563. QED_LINK_PARTNER_SPEED_20G : 0;
  564. p_link->partner_adv_speed |=
  565. (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
  566. QED_LINK_PARTNER_SPEED_25G : 0;
  567. p_link->partner_adv_speed |=
  568. (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
  569. QED_LINK_PARTNER_SPEED_40G : 0;
  570. p_link->partner_adv_speed |=
  571. (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
  572. QED_LINK_PARTNER_SPEED_50G : 0;
  573. p_link->partner_adv_speed |=
  574. (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
  575. QED_LINK_PARTNER_SPEED_100G : 0;
  576. p_link->partner_tx_flow_ctrl_en =
  577. !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
  578. p_link->partner_rx_flow_ctrl_en =
  579. !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
  580. switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
  581. case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
  582. p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
  583. break;
  584. case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
  585. p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
  586. break;
  587. case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
  588. p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
  589. break;
  590. default:
  591. p_link->partner_adv_pause = 0;
  592. }
  593. p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
  594. qed_link_update(p_hwfn);
  595. }
  596. int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
  597. {
  598. struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
  599. struct qed_mcp_mb_params mb_params;
  600. union drv_union_data union_data;
  601. struct eth_phy_cfg *phy_cfg;
  602. int rc = 0;
  603. u32 cmd;
  604. /* Set the shmem configuration according to params */
  605. phy_cfg = &union_data.drv_phy_cfg;
  606. memset(phy_cfg, 0, sizeof(*phy_cfg));
  607. cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
  608. if (!params->speed.autoneg)
  609. phy_cfg->speed = params->speed.forced_speed;
  610. phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
  611. phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
  612. phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
  613. phy_cfg->adv_speed = params->speed.advertised_speeds;
  614. phy_cfg->loopback_mode = params->loopback_mode;
  615. p_hwfn->b_drv_link_init = b_up;
  616. if (b_up) {
  617. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  618. "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
  619. phy_cfg->speed,
  620. phy_cfg->pause,
  621. phy_cfg->adv_speed,
  622. phy_cfg->loopback_mode,
  623. phy_cfg->feature_config_flags);
  624. } else {
  625. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  626. "Resetting link\n");
  627. }
  628. memset(&mb_params, 0, sizeof(mb_params));
  629. mb_params.cmd = cmd;
  630. mb_params.p_data_src = &union_data;
  631. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  632. /* if mcp fails to respond we must abort */
  633. if (rc) {
  634. DP_ERR(p_hwfn, "MCP response failure, aborting\n");
  635. return rc;
  636. }
  637. /* Reset the link status if needed */
  638. if (!b_up)
  639. qed_mcp_handle_link_change(p_hwfn, p_ptt, true);
  640. return 0;
  641. }
  642. static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
  643. struct qed_ptt *p_ptt,
  644. enum MFW_DRV_MSG_TYPE type)
  645. {
  646. enum qed_mcp_protocol_type stats_type;
  647. union qed_mcp_protocol_stats stats;
  648. struct qed_mcp_mb_params mb_params;
  649. union drv_union_data union_data;
  650. u32 hsi_param;
  651. switch (type) {
  652. case MFW_DRV_MSG_GET_LAN_STATS:
  653. stats_type = QED_MCP_LAN_STATS;
  654. hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
  655. break;
  656. case MFW_DRV_MSG_GET_FCOE_STATS:
  657. stats_type = QED_MCP_FCOE_STATS;
  658. hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
  659. break;
  660. case MFW_DRV_MSG_GET_ISCSI_STATS:
  661. stats_type = QED_MCP_ISCSI_STATS;
  662. hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
  663. break;
  664. case MFW_DRV_MSG_GET_RDMA_STATS:
  665. stats_type = QED_MCP_RDMA_STATS;
  666. hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
  667. break;
  668. default:
  669. DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
  670. return;
  671. }
  672. qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
  673. memset(&mb_params, 0, sizeof(mb_params));
  674. mb_params.cmd = DRV_MSG_CODE_GET_STATS;
  675. mb_params.param = hsi_param;
  676. memcpy(&union_data, &stats, sizeof(stats));
  677. mb_params.p_data_src = &union_data;
  678. qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  679. }
  680. static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
  681. struct public_func *p_shmem_info)
  682. {
  683. struct qed_mcp_function_info *p_info;
  684. p_info = &p_hwfn->mcp_info->func_info;
  685. p_info->bandwidth_min = (p_shmem_info->config &
  686. FUNC_MF_CFG_MIN_BW_MASK) >>
  687. FUNC_MF_CFG_MIN_BW_SHIFT;
  688. if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
  689. DP_INFO(p_hwfn,
  690. "bandwidth minimum out of bounds [%02x]. Set to 1\n",
  691. p_info->bandwidth_min);
  692. p_info->bandwidth_min = 1;
  693. }
  694. p_info->bandwidth_max = (p_shmem_info->config &
  695. FUNC_MF_CFG_MAX_BW_MASK) >>
  696. FUNC_MF_CFG_MAX_BW_SHIFT;
  697. if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
  698. DP_INFO(p_hwfn,
  699. "bandwidth maximum out of bounds [%02x]. Set to 100\n",
  700. p_info->bandwidth_max);
  701. p_info->bandwidth_max = 100;
  702. }
  703. }
  704. static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
  705. struct qed_ptt *p_ptt,
  706. struct public_func *p_data, int pfid)
  707. {
  708. u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
  709. PUBLIC_FUNC);
  710. u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
  711. u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
  712. u32 i, size;
  713. memset(p_data, 0, sizeof(*p_data));
  714. size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
  715. for (i = 0; i < size / sizeof(u32); i++)
  716. ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
  717. func_addr + (i << 2));
  718. return size;
  719. }
  720. static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  721. {
  722. struct qed_mcp_function_info *p_info;
  723. struct public_func shmem_info;
  724. u32 resp = 0, param = 0;
  725. qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
  726. qed_read_pf_bandwidth(p_hwfn, &shmem_info);
  727. p_info = &p_hwfn->mcp_info->func_info;
  728. qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
  729. qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
  730. /* Acknowledge the MFW */
  731. qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
  732. &param);
  733. }
  734. int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
  735. struct qed_ptt *p_ptt)
  736. {
  737. struct qed_mcp_info *info = p_hwfn->mcp_info;
  738. int rc = 0;
  739. bool found = false;
  740. u16 i;
  741. DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
  742. /* Read Messages from MFW */
  743. qed_mcp_read_mb(p_hwfn, p_ptt);
  744. /* Compare current messages to old ones */
  745. for (i = 0; i < info->mfw_mb_length; i++) {
  746. if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
  747. continue;
  748. found = true;
  749. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  750. "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
  751. i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
  752. switch (i) {
  753. case MFW_DRV_MSG_LINK_CHANGE:
  754. qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
  755. break;
  756. case MFW_DRV_MSG_VF_DISABLED:
  757. qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
  758. break;
  759. case MFW_DRV_MSG_LLDP_DATA_UPDATED:
  760. qed_dcbx_mib_update_event(p_hwfn, p_ptt,
  761. QED_DCBX_REMOTE_LLDP_MIB);
  762. break;
  763. case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
  764. qed_dcbx_mib_update_event(p_hwfn, p_ptt,
  765. QED_DCBX_REMOTE_MIB);
  766. break;
  767. case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
  768. qed_dcbx_mib_update_event(p_hwfn, p_ptt,
  769. QED_DCBX_OPERATIONAL_MIB);
  770. break;
  771. case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
  772. qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
  773. break;
  774. case MFW_DRV_MSG_GET_LAN_STATS:
  775. case MFW_DRV_MSG_GET_FCOE_STATS:
  776. case MFW_DRV_MSG_GET_ISCSI_STATS:
  777. case MFW_DRV_MSG_GET_RDMA_STATS:
  778. qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
  779. break;
  780. case MFW_DRV_MSG_BW_UPDATE:
  781. qed_mcp_update_bw(p_hwfn, p_ptt);
  782. break;
  783. default:
  784. DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
  785. rc = -EINVAL;
  786. }
  787. }
  788. /* ACK everything */
  789. for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
  790. __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
  791. /* MFW expect answer in BE, so we force write in that format */
  792. qed_wr(p_hwfn, p_ptt,
  793. info->mfw_mb_addr + sizeof(u32) +
  794. MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
  795. sizeof(u32) + i * sizeof(u32),
  796. (__force u32)val);
  797. }
  798. if (!found) {
  799. DP_NOTICE(p_hwfn,
  800. "Received an MFW message indication but no new message!\n");
  801. rc = -EINVAL;
  802. }
  803. /* Copy the new mfw messages into the shadow */
  804. memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
  805. return rc;
  806. }
  807. int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
  808. struct qed_ptt *p_ptt,
  809. u32 *p_mfw_ver, u32 *p_running_bundle_id)
  810. {
  811. u32 global_offsize;
  812. if (IS_VF(p_hwfn->cdev)) {
  813. if (p_hwfn->vf_iov_info) {
  814. struct pfvf_acquire_resp_tlv *p_resp;
  815. p_resp = &p_hwfn->vf_iov_info->acquire_resp;
  816. *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
  817. return 0;
  818. } else {
  819. DP_VERBOSE(p_hwfn,
  820. QED_MSG_IOV,
  821. "VF requested MFW version prior to ACQUIRE\n");
  822. return -EINVAL;
  823. }
  824. }
  825. global_offsize = qed_rd(p_hwfn, p_ptt,
  826. SECTION_OFFSIZE_ADDR(p_hwfn->
  827. mcp_info->public_base,
  828. PUBLIC_GLOBAL));
  829. *p_mfw_ver =
  830. qed_rd(p_hwfn, p_ptt,
  831. SECTION_ADDR(global_offsize,
  832. 0) + offsetof(struct public_global, mfw_ver));
  833. if (p_running_bundle_id != NULL) {
  834. *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
  835. SECTION_ADDR(global_offsize, 0) +
  836. offsetof(struct public_global,
  837. running_bundle_id));
  838. }
  839. return 0;
  840. }
  841. int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
  842. {
  843. struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
  844. struct qed_ptt *p_ptt;
  845. if (IS_VF(cdev))
  846. return -EINVAL;
  847. if (!qed_mcp_is_init(p_hwfn)) {
  848. DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
  849. return -EBUSY;
  850. }
  851. *p_media_type = MEDIA_UNSPECIFIED;
  852. p_ptt = qed_ptt_acquire(p_hwfn);
  853. if (!p_ptt)
  854. return -EBUSY;
  855. *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
  856. offsetof(struct public_port, media_type));
  857. qed_ptt_release(p_hwfn, p_ptt);
  858. return 0;
  859. }
  860. /* Old MFW has a global configuration for all PFs regarding RDMA support */
  861. static void
  862. qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
  863. enum qed_pci_personality *p_proto)
  864. {
  865. /* There wasn't ever a legacy MFW that published iwarp.
  866. * So at this point, this is either plain l2 or RoCE.
  867. */
  868. if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities))
  869. *p_proto = QED_PCI_ETH_ROCE;
  870. else
  871. *p_proto = QED_PCI_ETH;
  872. DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
  873. "According to Legacy capabilities, L2 personality is %08x\n",
  874. (u32) *p_proto);
  875. }
  876. static int
  877. qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
  878. struct qed_ptt *p_ptt,
  879. enum qed_pci_personality *p_proto)
  880. {
  881. u32 resp = 0, param = 0;
  882. int rc;
  883. rc = qed_mcp_cmd(p_hwfn, p_ptt,
  884. DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, &param);
  885. if (rc)
  886. return rc;
  887. if (resp != FW_MSG_CODE_OK) {
  888. DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
  889. "MFW lacks support for command; Returns %08x\n",
  890. resp);
  891. return -EINVAL;
  892. }
  893. switch (param) {
  894. case FW_MB_PARAM_GET_PF_RDMA_NONE:
  895. *p_proto = QED_PCI_ETH;
  896. break;
  897. case FW_MB_PARAM_GET_PF_RDMA_ROCE:
  898. *p_proto = QED_PCI_ETH_ROCE;
  899. break;
  900. case FW_MB_PARAM_GET_PF_RDMA_BOTH:
  901. DP_NOTICE(p_hwfn,
  902. "Current day drivers don't support RoCE & iWARP. Default to RoCE-only\n");
  903. *p_proto = QED_PCI_ETH_ROCE;
  904. break;
  905. case FW_MB_PARAM_GET_PF_RDMA_IWARP:
  906. default:
  907. DP_NOTICE(p_hwfn,
  908. "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
  909. param);
  910. return -EINVAL;
  911. }
  912. DP_VERBOSE(p_hwfn,
  913. NETIF_MSG_IFUP,
  914. "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
  915. (u32) *p_proto, resp, param);
  916. return 0;
  917. }
  918. static int
  919. qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
  920. struct public_func *p_info,
  921. struct qed_ptt *p_ptt,
  922. enum qed_pci_personality *p_proto)
  923. {
  924. int rc = 0;
  925. switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
  926. case FUNC_MF_CFG_PROTOCOL_ETHERNET:
  927. if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
  928. qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
  929. break;
  930. case FUNC_MF_CFG_PROTOCOL_ISCSI:
  931. *p_proto = QED_PCI_ISCSI;
  932. break;
  933. case FUNC_MF_CFG_PROTOCOL_ROCE:
  934. DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
  935. /* Fallthrough */
  936. default:
  937. rc = -EINVAL;
  938. }
  939. return rc;
  940. }
  941. int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
  942. struct qed_ptt *p_ptt)
  943. {
  944. struct qed_mcp_function_info *info;
  945. struct public_func shmem_info;
  946. qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
  947. info = &p_hwfn->mcp_info->func_info;
  948. info->pause_on_host = (shmem_info.config &
  949. FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
  950. if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
  951. &info->protocol)) {
  952. DP_ERR(p_hwfn, "Unknown personality %08x\n",
  953. (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
  954. return -EINVAL;
  955. }
  956. qed_read_pf_bandwidth(p_hwfn, &shmem_info);
  957. if (shmem_info.mac_upper || shmem_info.mac_lower) {
  958. info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
  959. info->mac[1] = (u8)(shmem_info.mac_upper);
  960. info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
  961. info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
  962. info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
  963. info->mac[5] = (u8)(shmem_info.mac_lower);
  964. /* Store primary MAC for later possible WoL */
  965. memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN);
  966. } else {
  967. DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
  968. }
  969. info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
  970. (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
  971. info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
  972. (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
  973. info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
  974. info->mtu = (u16)shmem_info.mtu_size;
  975. p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE;
  976. p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT;
  977. if (qed_mcp_is_init(p_hwfn)) {
  978. u32 resp = 0, param = 0;
  979. int rc;
  980. rc = qed_mcp_cmd(p_hwfn, p_ptt,
  981. DRV_MSG_CODE_OS_WOL, 0, &resp, &param);
  982. if (rc)
  983. return rc;
  984. if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
  985. p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME;
  986. }
  987. DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
  988. "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
  989. info->pause_on_host, info->protocol,
  990. info->bandwidth_min, info->bandwidth_max,
  991. info->mac[0], info->mac[1], info->mac[2],
  992. info->mac[3], info->mac[4], info->mac[5],
  993. info->wwn_port, info->wwn_node,
  994. info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
  995. return 0;
  996. }
  997. struct qed_mcp_link_params
  998. *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
  999. {
  1000. if (!p_hwfn || !p_hwfn->mcp_info)
  1001. return NULL;
  1002. return &p_hwfn->mcp_info->link_input;
  1003. }
  1004. struct qed_mcp_link_state
  1005. *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
  1006. {
  1007. if (!p_hwfn || !p_hwfn->mcp_info)
  1008. return NULL;
  1009. return &p_hwfn->mcp_info->link_output;
  1010. }
  1011. struct qed_mcp_link_capabilities
  1012. *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
  1013. {
  1014. if (!p_hwfn || !p_hwfn->mcp_info)
  1015. return NULL;
  1016. return &p_hwfn->mcp_info->link_capabilities;
  1017. }
  1018. int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  1019. {
  1020. u32 resp = 0, param = 0;
  1021. int rc;
  1022. rc = qed_mcp_cmd(p_hwfn, p_ptt,
  1023. DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
  1024. /* Wait for the drain to complete before returning */
  1025. msleep(1020);
  1026. return rc;
  1027. }
  1028. int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
  1029. struct qed_ptt *p_ptt, u32 *p_flash_size)
  1030. {
  1031. u32 flash_size;
  1032. if (IS_VF(p_hwfn->cdev))
  1033. return -EINVAL;
  1034. flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
  1035. flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
  1036. MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
  1037. flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
  1038. *p_flash_size = flash_size;
  1039. return 0;
  1040. }
  1041. int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
  1042. struct qed_ptt *p_ptt, u8 vf_id, u8 num)
  1043. {
  1044. u32 resp = 0, param = 0, rc_param = 0;
  1045. int rc;
  1046. /* Only Leader can configure MSIX, and need to take CMT into account */
  1047. if (!IS_LEAD_HWFN(p_hwfn))
  1048. return 0;
  1049. num *= p_hwfn->cdev->num_hwfns;
  1050. param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
  1051. DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
  1052. param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
  1053. DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
  1054. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
  1055. &resp, &rc_param);
  1056. if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
  1057. DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
  1058. rc = -EINVAL;
  1059. } else {
  1060. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  1061. "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
  1062. num, vf_id);
  1063. }
  1064. return rc;
  1065. }
  1066. int
  1067. qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
  1068. struct qed_ptt *p_ptt,
  1069. struct qed_mcp_drv_version *p_ver)
  1070. {
  1071. struct drv_version_stc *p_drv_version;
  1072. struct qed_mcp_mb_params mb_params;
  1073. union drv_union_data union_data;
  1074. __be32 val;
  1075. u32 i;
  1076. int rc;
  1077. p_drv_version = &union_data.drv_version;
  1078. p_drv_version->version = p_ver->version;
  1079. for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
  1080. val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
  1081. *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
  1082. }
  1083. memset(&mb_params, 0, sizeof(mb_params));
  1084. mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
  1085. mb_params.p_data_src = &union_data;
  1086. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  1087. if (rc)
  1088. DP_ERR(p_hwfn, "MCP response failure, aborting\n");
  1089. return rc;
  1090. }
  1091. int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  1092. {
  1093. u32 resp = 0, param = 0;
  1094. int rc;
  1095. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
  1096. &param);
  1097. if (rc)
  1098. DP_ERR(p_hwfn, "MCP response failure, aborting\n");
  1099. return rc;
  1100. }
  1101. int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  1102. {
  1103. u32 value, cpu_mode;
  1104. qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
  1105. value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
  1106. value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
  1107. qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
  1108. cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
  1109. return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
  1110. }
  1111. int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
  1112. struct qed_ptt *p_ptt,
  1113. enum qed_ov_client client)
  1114. {
  1115. u32 resp = 0, param = 0;
  1116. u32 drv_mb_param;
  1117. int rc;
  1118. switch (client) {
  1119. case QED_OV_CLIENT_DRV:
  1120. drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
  1121. break;
  1122. case QED_OV_CLIENT_USER:
  1123. drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
  1124. break;
  1125. case QED_OV_CLIENT_VENDOR_SPEC:
  1126. drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
  1127. break;
  1128. default:
  1129. DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
  1130. return -EINVAL;
  1131. }
  1132. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
  1133. drv_mb_param, &resp, &param);
  1134. if (rc)
  1135. DP_ERR(p_hwfn, "MCP response failure, aborting\n");
  1136. return rc;
  1137. }
  1138. int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
  1139. struct qed_ptt *p_ptt,
  1140. enum qed_ov_driver_state drv_state)
  1141. {
  1142. u32 resp = 0, param = 0;
  1143. u32 drv_mb_param;
  1144. int rc;
  1145. switch (drv_state) {
  1146. case QED_OV_DRIVER_STATE_NOT_LOADED:
  1147. drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
  1148. break;
  1149. case QED_OV_DRIVER_STATE_DISABLED:
  1150. drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
  1151. break;
  1152. case QED_OV_DRIVER_STATE_ACTIVE:
  1153. drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
  1154. break;
  1155. default:
  1156. DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
  1157. return -EINVAL;
  1158. }
  1159. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
  1160. drv_mb_param, &resp, &param);
  1161. if (rc)
  1162. DP_ERR(p_hwfn, "Failed to send driver state\n");
  1163. return rc;
  1164. }
  1165. int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
  1166. struct qed_ptt *p_ptt, u16 mtu)
  1167. {
  1168. u32 resp = 0, param = 0;
  1169. u32 drv_mb_param;
  1170. int rc;
  1171. drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
  1172. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
  1173. drv_mb_param, &resp, &param);
  1174. if (rc)
  1175. DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
  1176. return rc;
  1177. }
  1178. int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
  1179. struct qed_ptt *p_ptt, u8 *mac)
  1180. {
  1181. struct qed_mcp_mb_params mb_params;
  1182. union drv_union_data union_data;
  1183. int rc;
  1184. memset(&mb_params, 0, sizeof(mb_params));
  1185. mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
  1186. mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
  1187. DRV_MSG_CODE_VMAC_TYPE_SHIFT;
  1188. mb_params.param |= MCP_PF_ID(p_hwfn);
  1189. ether_addr_copy(&union_data.raw_data[0], mac);
  1190. mb_params.p_data_src = &union_data;
  1191. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  1192. if (rc)
  1193. DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
  1194. /* Store primary MAC for later possible WoL */
  1195. memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN);
  1196. return rc;
  1197. }
  1198. int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
  1199. struct qed_ptt *p_ptt, enum qed_ov_wol wol)
  1200. {
  1201. u32 resp = 0, param = 0;
  1202. u32 drv_mb_param;
  1203. int rc;
  1204. if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) {
  1205. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  1206. "Can't change WoL configuration when WoL isn't supported\n");
  1207. return -EINVAL;
  1208. }
  1209. switch (wol) {
  1210. case QED_OV_WOL_DEFAULT:
  1211. drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
  1212. break;
  1213. case QED_OV_WOL_DISABLED:
  1214. drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
  1215. break;
  1216. case QED_OV_WOL_ENABLED:
  1217. drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
  1218. break;
  1219. default:
  1220. DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
  1221. return -EINVAL;
  1222. }
  1223. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
  1224. drv_mb_param, &resp, &param);
  1225. if (rc)
  1226. DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
  1227. /* Store the WoL update for a future unload */
  1228. p_hwfn->cdev->wol_config = (u8)wol;
  1229. return rc;
  1230. }
  1231. int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
  1232. struct qed_ptt *p_ptt,
  1233. enum qed_ov_eswitch eswitch)
  1234. {
  1235. u32 resp = 0, param = 0;
  1236. u32 drv_mb_param;
  1237. int rc;
  1238. switch (eswitch) {
  1239. case QED_OV_ESWITCH_NONE:
  1240. drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
  1241. break;
  1242. case QED_OV_ESWITCH_VEB:
  1243. drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
  1244. break;
  1245. case QED_OV_ESWITCH_VEPA:
  1246. drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
  1247. break;
  1248. default:
  1249. DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
  1250. return -EINVAL;
  1251. }
  1252. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
  1253. drv_mb_param, &resp, &param);
  1254. if (rc)
  1255. DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
  1256. return rc;
  1257. }
  1258. int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
  1259. struct qed_ptt *p_ptt, enum qed_led_mode mode)
  1260. {
  1261. u32 resp = 0, param = 0, drv_mb_param;
  1262. int rc;
  1263. switch (mode) {
  1264. case QED_LED_MODE_ON:
  1265. drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
  1266. break;
  1267. case QED_LED_MODE_OFF:
  1268. drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
  1269. break;
  1270. case QED_LED_MODE_RESTORE:
  1271. drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
  1272. break;
  1273. default:
  1274. DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
  1275. return -EINVAL;
  1276. }
  1277. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
  1278. drv_mb_param, &resp, &param);
  1279. return rc;
  1280. }
  1281. int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
  1282. struct qed_ptt *p_ptt, u32 mask_parities)
  1283. {
  1284. u32 resp = 0, param = 0;
  1285. int rc;
  1286. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
  1287. mask_parities, &resp, &param);
  1288. if (rc) {
  1289. DP_ERR(p_hwfn,
  1290. "MCP response failure for mask parities, aborting\n");
  1291. } else if (resp != FW_MSG_CODE_OK) {
  1292. DP_ERR(p_hwfn,
  1293. "MCP did not acknowledge mask parity request. Old MFW?\n");
  1294. rc = -EINVAL;
  1295. }
  1296. return rc;
  1297. }
  1298. int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
  1299. {
  1300. u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0;
  1301. struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
  1302. u32 resp = 0, resp_param = 0;
  1303. struct qed_ptt *p_ptt;
  1304. int rc = 0;
  1305. p_ptt = qed_ptt_acquire(p_hwfn);
  1306. if (!p_ptt)
  1307. return -EBUSY;
  1308. while (bytes_left > 0) {
  1309. bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN);
  1310. rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
  1311. DRV_MSG_CODE_NVM_READ_NVRAM,
  1312. addr + offset +
  1313. (bytes_to_copy <<
  1314. DRV_MB_PARAM_NVM_LEN_SHIFT),
  1315. &resp, &resp_param,
  1316. &read_len,
  1317. (u32 *)(p_buf + offset));
  1318. if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
  1319. DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
  1320. break;
  1321. }
  1322. /* This can be a lengthy process, and it's possible scheduler
  1323. * isn't preemptable. Sleep a bit to prevent CPU hogging.
  1324. */
  1325. if (bytes_left % 0x1000 <
  1326. (bytes_left - read_len) % 0x1000)
  1327. usleep_range(1000, 2000);
  1328. offset += read_len;
  1329. bytes_left -= read_len;
  1330. }
  1331. cdev->mcp_nvm_resp = resp;
  1332. qed_ptt_release(p_hwfn, p_ptt);
  1333. return rc;
  1334. }
  1335. int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  1336. {
  1337. u32 drv_mb_param = 0, rsp, param;
  1338. int rc = 0;
  1339. drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
  1340. DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
  1341. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
  1342. drv_mb_param, &rsp, &param);
  1343. if (rc)
  1344. return rc;
  1345. if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
  1346. (param != DRV_MB_PARAM_BIST_RC_PASSED))
  1347. rc = -EAGAIN;
  1348. return rc;
  1349. }
  1350. int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  1351. {
  1352. u32 drv_mb_param, rsp, param;
  1353. int rc = 0;
  1354. drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
  1355. DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
  1356. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
  1357. drv_mb_param, &rsp, &param);
  1358. if (rc)
  1359. return rc;
  1360. if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
  1361. (param != DRV_MB_PARAM_BIST_RC_PASSED))
  1362. rc = -EAGAIN;
  1363. return rc;
  1364. }
  1365. int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn,
  1366. struct qed_ptt *p_ptt,
  1367. u32 *num_images)
  1368. {
  1369. u32 drv_mb_param = 0, rsp;
  1370. int rc = 0;
  1371. drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
  1372. DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
  1373. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
  1374. drv_mb_param, &rsp, num_images);
  1375. if (rc)
  1376. return rc;
  1377. if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
  1378. rc = -EINVAL;
  1379. return rc;
  1380. }
  1381. int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
  1382. struct qed_ptt *p_ptt,
  1383. struct bist_nvm_image_att *p_image_att,
  1384. u32 image_index)
  1385. {
  1386. u32 buf_size = 0, param, resp = 0, resp_param = 0;
  1387. int rc;
  1388. param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
  1389. DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT;
  1390. param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT;
  1391. rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
  1392. DRV_MSG_CODE_BIST_TEST, param,
  1393. &resp, &resp_param,
  1394. &buf_size,
  1395. (u32 *)p_image_att);
  1396. if (rc)
  1397. return rc;
  1398. if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
  1399. (p_image_att->return_code != 1))
  1400. rc = -EINVAL;
  1401. return rc;
  1402. }
  1403. #define QED_RESC_ALLOC_VERSION_MAJOR 1
  1404. #define QED_RESC_ALLOC_VERSION_MINOR 0
  1405. #define QED_RESC_ALLOC_VERSION \
  1406. ((QED_RESC_ALLOC_VERSION_MAJOR << \
  1407. DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
  1408. (QED_RESC_ALLOC_VERSION_MINOR << \
  1409. DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
  1410. int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
  1411. struct qed_ptt *p_ptt,
  1412. struct resource_info *p_resc_info,
  1413. u32 *p_mcp_resp, u32 *p_mcp_param)
  1414. {
  1415. struct qed_mcp_mb_params mb_params;
  1416. union drv_union_data union_data;
  1417. int rc;
  1418. memset(&mb_params, 0, sizeof(mb_params));
  1419. memset(&union_data, 0, sizeof(union_data));
  1420. mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
  1421. mb_params.param = QED_RESC_ALLOC_VERSION;
  1422. /* Need to have a sufficient large struct, as the cmd_and_union
  1423. * is going to do memcpy from and to it.
  1424. */
  1425. memcpy(&union_data.resource, p_resc_info, sizeof(*p_resc_info));
  1426. mb_params.p_data_src = &union_data;
  1427. mb_params.p_data_dst = &union_data;
  1428. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  1429. if (rc)
  1430. return rc;
  1431. /* Copy the data back */
  1432. memcpy(p_resc_info, &union_data.resource, sizeof(*p_resc_info));
  1433. *p_mcp_resp = mb_params.mcp_resp;
  1434. *p_mcp_param = mb_params.mcp_param;
  1435. DP_VERBOSE(p_hwfn,
  1436. QED_MSG_SP,
  1437. "MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x, offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n",
  1438. *p_mcp_param,
  1439. p_resc_info->res_id,
  1440. p_resc_info->size,
  1441. p_resc_info->offset,
  1442. p_resc_info->vf_size,
  1443. p_resc_info->vf_offset, p_resc_info->flags);
  1444. return 0;
  1445. }