qed_mcp.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/types.h>
  9. #include <asm/byteorder.h>
  10. #include <linux/delay.h>
  11. #include <linux/errno.h>
  12. #include <linux/kernel.h>
  13. #include <linux/slab.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/string.h>
  16. #include "qed.h"
  17. #include "qed_hsi.h"
  18. #include "qed_hw.h"
  19. #include "qed_mcp.h"
  20. #include "qed_reg_addr.h"
  21. #define CHIP_MCP_RESP_ITER_US 10
  22. #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
  23. #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
  24. #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
  25. qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
  26. _val)
  27. #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
  28. qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
  29. #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
  30. DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
  31. offsetof(struct public_drv_mb, _field), _val)
  32. #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
  33. DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
  34. offsetof(struct public_drv_mb, _field))
  35. #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
  36. DRV_ID_PDA_COMP_VER_SHIFT)
  37. #define MCP_BYTES_PER_MBIT_SHIFT 17
  38. bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
  39. {
  40. if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
  41. return false;
  42. return true;
  43. }
  44. void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
  45. struct qed_ptt *p_ptt)
  46. {
  47. u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
  48. PUBLIC_PORT);
  49. u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
  50. p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
  51. MFW_PORT(p_hwfn));
  52. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  53. "port_addr = 0x%x, port_id 0x%02x\n",
  54. p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
  55. }
  56. void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
  57. struct qed_ptt *p_ptt)
  58. {
  59. u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
  60. u32 tmp, i;
  61. if (!p_hwfn->mcp_info->public_base)
  62. return;
  63. for (i = 0; i < length; i++) {
  64. tmp = qed_rd(p_hwfn, p_ptt,
  65. p_hwfn->mcp_info->mfw_mb_addr +
  66. (i << 2) + sizeof(u32));
  67. /* The MB data is actually BE; Need to force it to cpu */
  68. ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
  69. be32_to_cpu((__force __be32)tmp);
  70. }
  71. }
  72. int qed_mcp_free(struct qed_hwfn *p_hwfn)
  73. {
  74. if (p_hwfn->mcp_info) {
  75. kfree(p_hwfn->mcp_info->mfw_mb_cur);
  76. kfree(p_hwfn->mcp_info->mfw_mb_shadow);
  77. }
  78. kfree(p_hwfn->mcp_info);
  79. return 0;
  80. }
  81. static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
  82. struct qed_ptt *p_ptt)
  83. {
  84. struct qed_mcp_info *p_info = p_hwfn->mcp_info;
  85. u32 drv_mb_offsize, mfw_mb_offsize;
  86. u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
  87. p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
  88. if (!p_info->public_base)
  89. return 0;
  90. p_info->public_base |= GRCBASE_MCP;
  91. /* Calculate the driver and MFW mailbox address */
  92. drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
  93. SECTION_OFFSIZE_ADDR(p_info->public_base,
  94. PUBLIC_DRV_MB));
  95. p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
  96. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  97. "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
  98. drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
  99. /* Set the MFW MB address */
  100. mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
  101. SECTION_OFFSIZE_ADDR(p_info->public_base,
  102. PUBLIC_MFW_MB));
  103. p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
  104. p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
  105. /* Get the current driver mailbox sequence before sending
  106. * the first command
  107. */
  108. p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
  109. DRV_MSG_SEQ_NUMBER_MASK;
  110. /* Get current FW pulse sequence */
  111. p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
  112. DRV_PULSE_SEQ_MASK;
  113. p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
  114. return 0;
  115. }
  116. int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
  117. struct qed_ptt *p_ptt)
  118. {
  119. struct qed_mcp_info *p_info;
  120. u32 size;
  121. /* Allocate mcp_info structure */
  122. p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
  123. if (!p_hwfn->mcp_info)
  124. goto err;
  125. p_info = p_hwfn->mcp_info;
  126. if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
  127. DP_NOTICE(p_hwfn, "MCP is not initialized\n");
  128. /* Do not free mcp_info here, since public_base indicate that
  129. * the MCP is not initialized
  130. */
  131. return 0;
  132. }
  133. size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
  134. p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
  135. p_info->mfw_mb_shadow =
  136. kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
  137. p_info->mfw_mb_length), GFP_KERNEL);
  138. if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
  139. goto err;
  140. /* Initialize the MFW spinlock */
  141. spin_lock_init(&p_info->lock);
  142. return 0;
  143. err:
  144. DP_NOTICE(p_hwfn, "Failed to allocate mcp memory\n");
  145. qed_mcp_free(p_hwfn);
  146. return -ENOMEM;
  147. }
  148. /* Locks the MFW mailbox of a PF to ensure a single access.
  149. * The lock is achieved in most cases by holding a spinlock, causing other
  150. * threads to wait till a previous access is done.
  151. * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
  152. * access is achieved by setting a blocking flag, which will fail other
  153. * competing contexts to send their mailboxes.
  154. */
  155. static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn,
  156. u32 cmd)
  157. {
  158. spin_lock_bh(&p_hwfn->mcp_info->lock);
  159. /* The spinlock shouldn't be acquired when the mailbox command is
  160. * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
  161. * pending [UN]LOAD_REQ command of another PF together with a spinlock
  162. * (i.e. interrupts are disabled) - can lead to a deadlock.
  163. * It is assumed that for a single PF, no other mailbox commands can be
  164. * sent from another context while sending LOAD_REQ, and that any
  165. * parallel commands to UNLOAD_REQ can be cancelled.
  166. */
  167. if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
  168. p_hwfn->mcp_info->block_mb_sending = false;
  169. if (p_hwfn->mcp_info->block_mb_sending) {
  170. DP_NOTICE(p_hwfn,
  171. "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
  172. cmd);
  173. spin_unlock_bh(&p_hwfn->mcp_info->lock);
  174. return -EBUSY;
  175. }
  176. if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
  177. p_hwfn->mcp_info->block_mb_sending = true;
  178. spin_unlock_bh(&p_hwfn->mcp_info->lock);
  179. }
  180. return 0;
  181. }
  182. static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn,
  183. u32 cmd)
  184. {
  185. if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
  186. spin_unlock_bh(&p_hwfn->mcp_info->lock);
  187. }
  188. int qed_mcp_reset(struct qed_hwfn *p_hwfn,
  189. struct qed_ptt *p_ptt)
  190. {
  191. u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
  192. u8 delay = CHIP_MCP_RESP_ITER_US;
  193. u32 org_mcp_reset_seq, cnt = 0;
  194. int rc = 0;
  195. /* Ensure that only a single thread is accessing the mailbox at a
  196. * certain time.
  197. */
  198. rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
  199. if (rc != 0)
  200. return rc;
  201. /* Set drv command along with the updated sequence */
  202. org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
  203. DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
  204. (DRV_MSG_CODE_MCP_RESET | seq));
  205. do {
  206. /* Wait for MFW response */
  207. udelay(delay);
  208. /* Give the FW up to 500 second (50*1000*10usec) */
  209. } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
  210. MISCS_REG_GENERIC_POR_0)) &&
  211. (cnt++ < QED_MCP_RESET_RETRIES));
  212. if (org_mcp_reset_seq !=
  213. qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
  214. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  215. "MCP was reset after %d usec\n", cnt * delay);
  216. } else {
  217. DP_ERR(p_hwfn, "Failed to reset MCP\n");
  218. rc = -EAGAIN;
  219. }
  220. qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
  221. return rc;
  222. }
  223. static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
  224. struct qed_ptt *p_ptt,
  225. u32 cmd,
  226. u32 param,
  227. u32 *o_mcp_resp,
  228. u32 *o_mcp_param)
  229. {
  230. u8 delay = CHIP_MCP_RESP_ITER_US;
  231. u32 seq, cnt = 1, actual_mb_seq;
  232. int rc = 0;
  233. /* Get actual driver mailbox sequence */
  234. actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
  235. DRV_MSG_SEQ_NUMBER_MASK;
  236. /* Use MCP history register to check if MCP reset occurred between
  237. * init time and now.
  238. */
  239. if (p_hwfn->mcp_info->mcp_hist !=
  240. qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
  241. DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
  242. qed_load_mcp_offsets(p_hwfn, p_ptt);
  243. qed_mcp_cmd_port_init(p_hwfn, p_ptt);
  244. }
  245. seq = ++p_hwfn->mcp_info->drv_mb_seq;
  246. /* Set drv param */
  247. DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
  248. /* Set drv command along with the updated sequence */
  249. DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
  250. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  251. "wrote command (%x) to MFW MB param 0x%08x\n",
  252. (cmd | seq), param);
  253. do {
  254. /* Wait for MFW response */
  255. udelay(delay);
  256. *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
  257. /* Give the FW up to 5 second (500*10ms) */
  258. } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
  259. (cnt++ < QED_DRV_MB_MAX_RETRIES));
  260. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  261. "[after %d ms] read (%x) seq is (%x) from FW MB\n",
  262. cnt * delay, *o_mcp_resp, seq);
  263. /* Is this a reply to our command? */
  264. if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
  265. *o_mcp_resp &= FW_MSG_CODE_MASK;
  266. /* Get the MCP param */
  267. *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
  268. } else {
  269. /* FW BUG! */
  270. DP_ERR(p_hwfn, "MFW failed to respond!\n");
  271. *o_mcp_resp = 0;
  272. rc = -EAGAIN;
  273. }
  274. return rc;
  275. }
  276. static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
  277. struct qed_ptt *p_ptt,
  278. struct qed_mcp_mb_params *p_mb_params)
  279. {
  280. u32 union_data_addr;
  281. int rc;
  282. /* MCP not initialized */
  283. if (!qed_mcp_is_init(p_hwfn)) {
  284. DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
  285. return -EBUSY;
  286. }
  287. union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
  288. offsetof(struct public_drv_mb, union_data);
  289. /* Ensure that only a single thread is accessing the mailbox at a
  290. * certain time.
  291. */
  292. rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
  293. if (rc)
  294. return rc;
  295. if (p_mb_params->p_data_src != NULL)
  296. qed_memcpy_to(p_hwfn, p_ptt, union_data_addr,
  297. p_mb_params->p_data_src,
  298. sizeof(*p_mb_params->p_data_src));
  299. rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
  300. p_mb_params->param, &p_mb_params->mcp_resp,
  301. &p_mb_params->mcp_param);
  302. if (p_mb_params->p_data_dst != NULL)
  303. qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
  304. union_data_addr,
  305. sizeof(*p_mb_params->p_data_dst));
  306. qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
  307. return rc;
  308. }
  309. int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
  310. struct qed_ptt *p_ptt,
  311. u32 cmd,
  312. u32 param,
  313. u32 *o_mcp_resp,
  314. u32 *o_mcp_param)
  315. {
  316. struct qed_mcp_mb_params mb_params;
  317. int rc;
  318. memset(&mb_params, 0, sizeof(mb_params));
  319. mb_params.cmd = cmd;
  320. mb_params.param = param;
  321. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  322. if (rc)
  323. return rc;
  324. *o_mcp_resp = mb_params.mcp_resp;
  325. *o_mcp_param = mb_params.mcp_param;
  326. return 0;
  327. }
  328. int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
  329. struct qed_ptt *p_ptt,
  330. u32 *p_load_code)
  331. {
  332. struct qed_dev *cdev = p_hwfn->cdev;
  333. struct qed_mcp_mb_params mb_params;
  334. union drv_union_data union_data;
  335. int rc;
  336. memset(&mb_params, 0, sizeof(mb_params));
  337. /* Load Request */
  338. mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
  339. mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
  340. cdev->drv_type;
  341. memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE);
  342. mb_params.p_data_src = &union_data;
  343. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  344. /* if mcp fails to respond we must abort */
  345. if (rc) {
  346. DP_ERR(p_hwfn, "MCP response failure, aborting\n");
  347. return rc;
  348. }
  349. *p_load_code = mb_params.mcp_resp;
  350. /* If MFW refused (e.g. other port is in diagnostic mode) we
  351. * must abort. This can happen in the following cases:
  352. * - Other port is in diagnostic mode
  353. * - Previously loaded function on the engine is not compliant with
  354. * the requester.
  355. * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
  356. * -
  357. */
  358. if (!(*p_load_code) ||
  359. ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
  360. ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
  361. ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
  362. DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
  363. return -EBUSY;
  364. }
  365. return 0;
  366. }
  367. static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
  368. struct qed_ptt *p_ptt)
  369. {
  370. u32 transceiver_state;
  371. transceiver_state = qed_rd(p_hwfn, p_ptt,
  372. p_hwfn->mcp_info->port_addr +
  373. offsetof(struct public_port,
  374. transceiver_data));
  375. DP_VERBOSE(p_hwfn,
  376. (NETIF_MSG_HW | QED_MSG_SP),
  377. "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
  378. transceiver_state,
  379. (u32)(p_hwfn->mcp_info->port_addr +
  380. offsetof(struct public_port,
  381. transceiver_data)));
  382. transceiver_state = GET_FIELD(transceiver_state,
  383. PMM_TRANSCEIVER_STATE);
  384. if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT)
  385. DP_NOTICE(p_hwfn, "Transceiver is present.\n");
  386. else
  387. DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
  388. }
  389. static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
  390. struct qed_ptt *p_ptt,
  391. bool b_reset)
  392. {
  393. struct qed_mcp_link_state *p_link;
  394. u32 status = 0;
  395. p_link = &p_hwfn->mcp_info->link_output;
  396. memset(p_link, 0, sizeof(*p_link));
  397. if (!b_reset) {
  398. status = qed_rd(p_hwfn, p_ptt,
  399. p_hwfn->mcp_info->port_addr +
  400. offsetof(struct public_port, link_status));
  401. DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
  402. "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
  403. status,
  404. (u32)(p_hwfn->mcp_info->port_addr +
  405. offsetof(struct public_port,
  406. link_status)));
  407. } else {
  408. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  409. "Resetting link indications\n");
  410. return;
  411. }
  412. if (p_hwfn->b_drv_link_init)
  413. p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
  414. else
  415. p_link->link_up = false;
  416. p_link->full_duplex = true;
  417. switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
  418. case LINK_STATUS_SPEED_AND_DUPLEX_100G:
  419. p_link->speed = 100000;
  420. break;
  421. case LINK_STATUS_SPEED_AND_DUPLEX_50G:
  422. p_link->speed = 50000;
  423. break;
  424. case LINK_STATUS_SPEED_AND_DUPLEX_40G:
  425. p_link->speed = 40000;
  426. break;
  427. case LINK_STATUS_SPEED_AND_DUPLEX_25G:
  428. p_link->speed = 25000;
  429. break;
  430. case LINK_STATUS_SPEED_AND_DUPLEX_20G:
  431. p_link->speed = 20000;
  432. break;
  433. case LINK_STATUS_SPEED_AND_DUPLEX_10G:
  434. p_link->speed = 10000;
  435. break;
  436. case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
  437. p_link->full_duplex = false;
  438. /* Fall-through */
  439. case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
  440. p_link->speed = 1000;
  441. break;
  442. default:
  443. p_link->speed = 0;
  444. }
  445. /* Correct speed according to bandwidth allocation */
  446. if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
  447. p_link->speed = p_link->speed *
  448. p_hwfn->mcp_info->func_info.bandwidth_max /
  449. 100;
  450. qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
  451. p_link->speed);
  452. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  453. "Configured MAX bandwidth to be %08x Mb/sec\n",
  454. p_link->speed);
  455. }
  456. p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
  457. p_link->an_complete = !!(status &
  458. LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
  459. p_link->parallel_detection = !!(status &
  460. LINK_STATUS_PARALLEL_DETECTION_USED);
  461. p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
  462. p_link->partner_adv_speed |=
  463. (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
  464. QED_LINK_PARTNER_SPEED_1G_FD : 0;
  465. p_link->partner_adv_speed |=
  466. (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
  467. QED_LINK_PARTNER_SPEED_1G_HD : 0;
  468. p_link->partner_adv_speed |=
  469. (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
  470. QED_LINK_PARTNER_SPEED_10G : 0;
  471. p_link->partner_adv_speed |=
  472. (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
  473. QED_LINK_PARTNER_SPEED_20G : 0;
  474. p_link->partner_adv_speed |=
  475. (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
  476. QED_LINK_PARTNER_SPEED_40G : 0;
  477. p_link->partner_adv_speed |=
  478. (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
  479. QED_LINK_PARTNER_SPEED_50G : 0;
  480. p_link->partner_adv_speed |=
  481. (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
  482. QED_LINK_PARTNER_SPEED_100G : 0;
  483. p_link->partner_tx_flow_ctrl_en =
  484. !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
  485. p_link->partner_rx_flow_ctrl_en =
  486. !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
  487. switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
  488. case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
  489. p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
  490. break;
  491. case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
  492. p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
  493. break;
  494. case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
  495. p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
  496. break;
  497. default:
  498. p_link->partner_adv_pause = 0;
  499. }
  500. p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
  501. qed_link_update(p_hwfn);
  502. }
  503. int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
  504. struct qed_ptt *p_ptt,
  505. bool b_up)
  506. {
  507. struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
  508. struct qed_mcp_mb_params mb_params;
  509. union drv_union_data union_data;
  510. struct pmm_phy_cfg *phy_cfg;
  511. int rc = 0;
  512. u32 cmd;
  513. /* Set the shmem configuration according to params */
  514. phy_cfg = &union_data.drv_phy_cfg;
  515. memset(phy_cfg, 0, sizeof(*phy_cfg));
  516. cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
  517. if (!params->speed.autoneg)
  518. phy_cfg->speed = params->speed.forced_speed;
  519. phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
  520. phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
  521. phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
  522. phy_cfg->adv_speed = params->speed.advertised_speeds;
  523. phy_cfg->loopback_mode = params->loopback_mode;
  524. p_hwfn->b_drv_link_init = b_up;
  525. if (b_up) {
  526. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  527. "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
  528. phy_cfg->speed,
  529. phy_cfg->pause,
  530. phy_cfg->adv_speed,
  531. phy_cfg->loopback_mode,
  532. phy_cfg->feature_config_flags);
  533. } else {
  534. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  535. "Resetting link\n");
  536. }
  537. memset(&mb_params, 0, sizeof(mb_params));
  538. mb_params.cmd = cmd;
  539. mb_params.p_data_src = &union_data;
  540. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  541. /* if mcp fails to respond we must abort */
  542. if (rc) {
  543. DP_ERR(p_hwfn, "MCP response failure, aborting\n");
  544. return rc;
  545. }
  546. /* Reset the link status if needed */
  547. if (!b_up)
  548. qed_mcp_handle_link_change(p_hwfn, p_ptt, true);
  549. return 0;
  550. }
  551. int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
  552. struct qed_ptt *p_ptt)
  553. {
  554. struct qed_mcp_info *info = p_hwfn->mcp_info;
  555. int rc = 0;
  556. bool found = false;
  557. u16 i;
  558. DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
  559. /* Read Messages from MFW */
  560. qed_mcp_read_mb(p_hwfn, p_ptt);
  561. /* Compare current messages to old ones */
  562. for (i = 0; i < info->mfw_mb_length; i++) {
  563. if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
  564. continue;
  565. found = true;
  566. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  567. "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
  568. i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
  569. switch (i) {
  570. case MFW_DRV_MSG_LINK_CHANGE:
  571. qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
  572. break;
  573. case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
  574. qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
  575. break;
  576. default:
  577. DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
  578. rc = -EINVAL;
  579. }
  580. }
  581. /* ACK everything */
  582. for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
  583. __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
  584. /* MFW expect answer in BE, so we force write in that format */
  585. qed_wr(p_hwfn, p_ptt,
  586. info->mfw_mb_addr + sizeof(u32) +
  587. MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
  588. sizeof(u32) + i * sizeof(u32),
  589. (__force u32)val);
  590. }
  591. if (!found) {
  592. DP_NOTICE(p_hwfn,
  593. "Received an MFW message indication but no new message!\n");
  594. rc = -EINVAL;
  595. }
  596. /* Copy the new mfw messages into the shadow */
  597. memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
  598. return rc;
  599. }
  600. int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
  601. u32 *p_mfw_ver)
  602. {
  603. struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
  604. struct qed_ptt *p_ptt;
  605. u32 global_offsize;
  606. p_ptt = qed_ptt_acquire(p_hwfn);
  607. if (!p_ptt)
  608. return -EBUSY;
  609. global_offsize = qed_rd(p_hwfn, p_ptt,
  610. SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
  611. public_base,
  612. PUBLIC_GLOBAL));
  613. *p_mfw_ver = qed_rd(p_hwfn, p_ptt,
  614. SECTION_ADDR(global_offsize, 0) +
  615. offsetof(struct public_global, mfw_ver));
  616. qed_ptt_release(p_hwfn, p_ptt);
  617. return 0;
  618. }
  619. int qed_mcp_get_media_type(struct qed_dev *cdev,
  620. u32 *p_media_type)
  621. {
  622. struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
  623. struct qed_ptt *p_ptt;
  624. if (!qed_mcp_is_init(p_hwfn)) {
  625. DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
  626. return -EBUSY;
  627. }
  628. *p_media_type = MEDIA_UNSPECIFIED;
  629. p_ptt = qed_ptt_acquire(p_hwfn);
  630. if (!p_ptt)
  631. return -EBUSY;
  632. *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
  633. offsetof(struct public_port, media_type));
  634. qed_ptt_release(p_hwfn, p_ptt);
  635. return 0;
  636. }
  637. static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
  638. struct qed_ptt *p_ptt,
  639. struct public_func *p_data,
  640. int pfid)
  641. {
  642. u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
  643. PUBLIC_FUNC);
  644. u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
  645. u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
  646. u32 i, size;
  647. memset(p_data, 0, sizeof(*p_data));
  648. size = min_t(u32, sizeof(*p_data),
  649. QED_SECTION_SIZE(mfw_path_offsize));
  650. for (i = 0; i < size / sizeof(u32); i++)
  651. ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
  652. func_addr + (i << 2));
  653. return size;
  654. }
  655. static int
  656. qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
  657. struct public_func *p_info,
  658. enum qed_pci_personality *p_proto)
  659. {
  660. int rc = 0;
  661. switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
  662. case FUNC_MF_CFG_PROTOCOL_ETHERNET:
  663. *p_proto = QED_PCI_ETH;
  664. break;
  665. default:
  666. rc = -EINVAL;
  667. }
  668. return rc;
  669. }
  670. int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
  671. struct qed_ptt *p_ptt)
  672. {
  673. struct qed_mcp_function_info *info;
  674. struct public_func shmem_info;
  675. qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
  676. MCP_PF_ID(p_hwfn));
  677. info = &p_hwfn->mcp_info->func_info;
  678. info->pause_on_host = (shmem_info.config &
  679. FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
  680. if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info,
  681. &info->protocol)) {
  682. DP_ERR(p_hwfn, "Unknown personality %08x\n",
  683. (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
  684. return -EINVAL;
  685. }
  686. info->bandwidth_min = (shmem_info.config &
  687. FUNC_MF_CFG_MIN_BW_MASK) >>
  688. FUNC_MF_CFG_MIN_BW_SHIFT;
  689. if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
  690. DP_INFO(p_hwfn,
  691. "bandwidth minimum out of bounds [%02x]. Set to 1\n",
  692. info->bandwidth_min);
  693. info->bandwidth_min = 1;
  694. }
  695. info->bandwidth_max = (shmem_info.config &
  696. FUNC_MF_CFG_MAX_BW_MASK) >>
  697. FUNC_MF_CFG_MAX_BW_SHIFT;
  698. if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
  699. DP_INFO(p_hwfn,
  700. "bandwidth maximum out of bounds [%02x]. Set to 100\n",
  701. info->bandwidth_max);
  702. info->bandwidth_max = 100;
  703. }
  704. if (shmem_info.mac_upper || shmem_info.mac_lower) {
  705. info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
  706. info->mac[1] = (u8)(shmem_info.mac_upper);
  707. info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
  708. info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
  709. info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
  710. info->mac[5] = (u8)(shmem_info.mac_lower);
  711. } else {
  712. DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
  713. }
  714. info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
  715. (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
  716. info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
  717. (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
  718. info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
  719. DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
  720. "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n",
  721. info->pause_on_host, info->protocol,
  722. info->bandwidth_min, info->bandwidth_max,
  723. info->mac[0], info->mac[1], info->mac[2],
  724. info->mac[3], info->mac[4], info->mac[5],
  725. info->wwn_port, info->wwn_node, info->ovlan);
  726. return 0;
  727. }
  728. struct qed_mcp_link_params
  729. *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
  730. {
  731. if (!p_hwfn || !p_hwfn->mcp_info)
  732. return NULL;
  733. return &p_hwfn->mcp_info->link_input;
  734. }
  735. struct qed_mcp_link_state
  736. *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
  737. {
  738. if (!p_hwfn || !p_hwfn->mcp_info)
  739. return NULL;
  740. return &p_hwfn->mcp_info->link_output;
  741. }
  742. struct qed_mcp_link_capabilities
  743. *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
  744. {
  745. if (!p_hwfn || !p_hwfn->mcp_info)
  746. return NULL;
  747. return &p_hwfn->mcp_info->link_capabilities;
  748. }
  749. int qed_mcp_drain(struct qed_hwfn *p_hwfn,
  750. struct qed_ptt *p_ptt)
  751. {
  752. u32 resp = 0, param = 0;
  753. int rc;
  754. rc = qed_mcp_cmd(p_hwfn, p_ptt,
  755. DRV_MSG_CODE_NIG_DRAIN, 1000,
  756. &resp, &param);
  757. /* Wait for the drain to complete before returning */
  758. msleep(1020);
  759. return rc;
  760. }
  761. int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
  762. struct qed_ptt *p_ptt,
  763. u32 *p_flash_size)
  764. {
  765. u32 flash_size;
  766. flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
  767. flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
  768. MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
  769. flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
  770. *p_flash_size = flash_size;
  771. return 0;
  772. }
  773. int
  774. qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
  775. struct qed_ptt *p_ptt,
  776. struct qed_mcp_drv_version *p_ver)
  777. {
  778. struct drv_version_stc *p_drv_version;
  779. struct qed_mcp_mb_params mb_params;
  780. union drv_union_data union_data;
  781. __be32 val;
  782. u32 i;
  783. int rc;
  784. p_drv_version = &union_data.drv_version;
  785. p_drv_version->version = p_ver->version;
  786. for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) {
  787. val = cpu_to_be32(p_ver->name[i]);
  788. *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
  789. }
  790. memset(&mb_params, 0, sizeof(mb_params));
  791. mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
  792. mb_params.p_data_src = &union_data;
  793. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  794. if (rc)
  795. DP_ERR(p_hwfn, "MCP response failure, aborting\n");
  796. return rc;
  797. }
  798. int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
  799. enum qed_led_mode mode)
  800. {
  801. u32 resp = 0, param = 0, drv_mb_param;
  802. int rc;
  803. switch (mode) {
  804. case QED_LED_MODE_ON:
  805. drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
  806. break;
  807. case QED_LED_MODE_OFF:
  808. drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
  809. break;
  810. case QED_LED_MODE_RESTORE:
  811. drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
  812. break;
  813. default:
  814. DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
  815. return -EINVAL;
  816. }
  817. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
  818. drv_mb_param, &resp, &param);
  819. return rc;
  820. }