qed_mcp.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/types.h>
  9. #include <asm/byteorder.h>
  10. #include <linux/delay.h>
  11. #include <linux/errno.h>
  12. #include <linux/kernel.h>
  13. #include <linux/mutex.h>
  14. #include <linux/slab.h>
  15. #include <linux/string.h>
  16. #include "qed.h"
  17. #include "qed_hsi.h"
  18. #include "qed_hw.h"
  19. #include "qed_mcp.h"
  20. #include "qed_reg_addr.h"
  21. #define CHIP_MCP_RESP_ITER_US 10
  22. #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
  23. #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
  24. #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
  25. qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
  26. _val)
  27. #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
  28. qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
  29. #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
  30. DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
  31. offsetof(struct public_drv_mb, _field), _val)
  32. #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
  33. DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
  34. offsetof(struct public_drv_mb, _field))
  35. #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
  36. DRV_ID_PDA_COMP_VER_SHIFT)
  37. #define MCP_BYTES_PER_MBIT_SHIFT 17
  38. bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
  39. {
  40. if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
  41. return false;
  42. return true;
  43. }
  44. void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
  45. struct qed_ptt *p_ptt)
  46. {
  47. u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
  48. PUBLIC_PORT);
  49. u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
  50. p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
  51. MFW_PORT(p_hwfn));
  52. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  53. "port_addr = 0x%x, port_id 0x%02x\n",
  54. p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
  55. }
  56. void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
  57. struct qed_ptt *p_ptt)
  58. {
  59. u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
  60. u32 tmp, i;
  61. if (!p_hwfn->mcp_info->public_base)
  62. return;
  63. for (i = 0; i < length; i++) {
  64. tmp = qed_rd(p_hwfn, p_ptt,
  65. p_hwfn->mcp_info->mfw_mb_addr +
  66. (i << 2) + sizeof(u32));
  67. /* The MB data is actually BE; Need to force it to cpu */
  68. ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
  69. be32_to_cpu((__force __be32)tmp);
  70. }
  71. }
  72. int qed_mcp_free(struct qed_hwfn *p_hwfn)
  73. {
  74. if (p_hwfn->mcp_info) {
  75. kfree(p_hwfn->mcp_info->mfw_mb_cur);
  76. kfree(p_hwfn->mcp_info->mfw_mb_shadow);
  77. }
  78. kfree(p_hwfn->mcp_info);
  79. return 0;
  80. }
  81. static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
  82. struct qed_ptt *p_ptt)
  83. {
  84. struct qed_mcp_info *p_info = p_hwfn->mcp_info;
  85. u32 drv_mb_offsize, mfw_mb_offsize;
  86. u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
  87. p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
  88. if (!p_info->public_base)
  89. return 0;
  90. p_info->public_base |= GRCBASE_MCP;
  91. /* Calculate the driver and MFW mailbox address */
  92. drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
  93. SECTION_OFFSIZE_ADDR(p_info->public_base,
  94. PUBLIC_DRV_MB));
  95. p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
  96. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  97. "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
  98. drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
  99. /* Set the MFW MB address */
  100. mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
  101. SECTION_OFFSIZE_ADDR(p_info->public_base,
  102. PUBLIC_MFW_MB));
  103. p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
  104. p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
  105. /* Get the current driver mailbox sequence before sending
  106. * the first command
  107. */
  108. p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
  109. DRV_MSG_SEQ_NUMBER_MASK;
  110. /* Get current FW pulse sequence */
  111. p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
  112. DRV_PULSE_SEQ_MASK;
  113. p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
  114. return 0;
  115. }
  116. int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
  117. struct qed_ptt *p_ptt)
  118. {
  119. struct qed_mcp_info *p_info;
  120. u32 size;
  121. /* Allocate mcp_info structure */
  122. p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_ATOMIC);
  123. if (!p_hwfn->mcp_info)
  124. goto err;
  125. p_info = p_hwfn->mcp_info;
  126. if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
  127. DP_NOTICE(p_hwfn, "MCP is not initialized\n");
  128. /* Do not free mcp_info here, since public_base indicate that
  129. * the MCP is not initialized
  130. */
  131. return 0;
  132. }
  133. size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
  134. p_info->mfw_mb_cur = kzalloc(size, GFP_ATOMIC);
  135. p_info->mfw_mb_shadow =
  136. kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
  137. p_info->mfw_mb_length), GFP_ATOMIC);
  138. if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
  139. goto err;
  140. /* Initialize the MFW mutex */
  141. mutex_init(&p_info->mutex);
  142. return 0;
  143. err:
  144. DP_NOTICE(p_hwfn, "Failed to allocate mcp memory\n");
  145. qed_mcp_free(p_hwfn);
  146. return -ENOMEM;
  147. }
  148. int qed_mcp_reset(struct qed_hwfn *p_hwfn,
  149. struct qed_ptt *p_ptt)
  150. {
  151. u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
  152. u8 delay = CHIP_MCP_RESP_ITER_US;
  153. u32 org_mcp_reset_seq, cnt = 0;
  154. int rc = 0;
  155. /* Set drv command along with the updated sequence */
  156. org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
  157. DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
  158. (DRV_MSG_CODE_MCP_RESET | seq));
  159. do {
  160. /* Wait for MFW response */
  161. udelay(delay);
  162. /* Give the FW up to 500 second (50*1000*10usec) */
  163. } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
  164. MISCS_REG_GENERIC_POR_0)) &&
  165. (cnt++ < QED_MCP_RESET_RETRIES));
  166. if (org_mcp_reset_seq !=
  167. qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
  168. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  169. "MCP was reset after %d usec\n", cnt * delay);
  170. } else {
  171. DP_ERR(p_hwfn, "Failed to reset MCP\n");
  172. rc = -EAGAIN;
  173. }
  174. return rc;
  175. }
  176. static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
  177. struct qed_ptt *p_ptt,
  178. u32 cmd,
  179. u32 param,
  180. u32 *o_mcp_resp,
  181. u32 *o_mcp_param)
  182. {
  183. u8 delay = CHIP_MCP_RESP_ITER_US;
  184. u32 seq, cnt = 1, actual_mb_seq;
  185. int rc = 0;
  186. /* Get actual driver mailbox sequence */
  187. actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
  188. DRV_MSG_SEQ_NUMBER_MASK;
  189. /* Use MCP history register to check if MCP reset occurred between
  190. * init time and now.
  191. */
  192. if (p_hwfn->mcp_info->mcp_hist !=
  193. qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
  194. DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
  195. qed_load_mcp_offsets(p_hwfn, p_ptt);
  196. qed_mcp_cmd_port_init(p_hwfn, p_ptt);
  197. }
  198. seq = ++p_hwfn->mcp_info->drv_mb_seq;
  199. /* Set drv param */
  200. DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
  201. /* Set drv command along with the updated sequence */
  202. DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
  203. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  204. "wrote command (%x) to MFW MB param 0x%08x\n",
  205. (cmd | seq), param);
  206. do {
  207. /* Wait for MFW response */
  208. udelay(delay);
  209. *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
  210. /* Give the FW up to 5 second (500*10ms) */
  211. } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
  212. (cnt++ < QED_DRV_MB_MAX_RETRIES));
  213. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  214. "[after %d ms] read (%x) seq is (%x) from FW MB\n",
  215. cnt * delay, *o_mcp_resp, seq);
  216. /* Is this a reply to our command? */
  217. if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
  218. *o_mcp_resp &= FW_MSG_CODE_MASK;
  219. /* Get the MCP param */
  220. *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
  221. } else {
  222. /* FW BUG! */
  223. DP_ERR(p_hwfn, "MFW failed to respond!\n");
  224. *o_mcp_resp = 0;
  225. rc = -EAGAIN;
  226. }
  227. return rc;
  228. }
  229. int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
  230. struct qed_ptt *p_ptt,
  231. u32 cmd,
  232. u32 param,
  233. u32 *o_mcp_resp,
  234. u32 *o_mcp_param)
  235. {
  236. int rc = 0;
  237. /* MCP not initialized */
  238. if (!qed_mcp_is_init(p_hwfn)) {
  239. DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
  240. return -EBUSY;
  241. }
  242. /* Lock Mutex to ensure only single thread is
  243. * accessing the MCP at one time
  244. */
  245. mutex_lock(&p_hwfn->mcp_info->mutex);
  246. rc = qed_do_mcp_cmd(p_hwfn, p_ptt, cmd, param,
  247. o_mcp_resp, o_mcp_param);
  248. /* Release Mutex */
  249. mutex_unlock(&p_hwfn->mcp_info->mutex);
  250. return rc;
  251. }
  252. static void qed_mcp_set_drv_ver(struct qed_dev *cdev,
  253. struct qed_hwfn *p_hwfn,
  254. struct qed_ptt *p_ptt)
  255. {
  256. u32 i;
  257. /* Copy version string to MCP */
  258. for (i = 0; i < MCP_DRV_VER_STR_SIZE_DWORD; i++)
  259. DRV_MB_WR(p_hwfn, p_ptt, union_data.ver_str[i],
  260. *(u32 *)&cdev->ver_str[i * sizeof(u32)]);
  261. }
  262. int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
  263. struct qed_ptt *p_ptt,
  264. u32 *p_load_code)
  265. {
  266. struct qed_dev *cdev = p_hwfn->cdev;
  267. u32 param;
  268. int rc;
  269. if (!qed_mcp_is_init(p_hwfn)) {
  270. DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
  271. return -EBUSY;
  272. }
  273. /* Save driver's version to shmem */
  274. qed_mcp_set_drv_ver(cdev, p_hwfn, p_ptt);
  275. DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
  276. p_hwfn->mcp_info->drv_mb_seq,
  277. p_hwfn->mcp_info->drv_pulse_seq);
  278. /* Load Request */
  279. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ,
  280. (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
  281. cdev->drv_type),
  282. p_load_code, &param);
  283. /* if mcp fails to respond we must abort */
  284. if (rc) {
  285. DP_ERR(p_hwfn, "MCP response failure, aborting\n");
  286. return rc;
  287. }
  288. /* If MFW refused (e.g. other port is in diagnostic mode) we
  289. * must abort. This can happen in the following cases:
  290. * - Other port is in diagnostic mode
  291. * - Previously loaded function on the engine is not compliant with
  292. * the requester.
  293. * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
  294. * -
  295. */
  296. if (!(*p_load_code) ||
  297. ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
  298. ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
  299. ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
  300. DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
  301. return -EBUSY;
  302. }
  303. return 0;
  304. }
  305. static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
  306. struct qed_ptt *p_ptt,
  307. bool b_reset)
  308. {
  309. struct qed_mcp_link_state *p_link;
  310. u32 status = 0;
  311. p_link = &p_hwfn->mcp_info->link_output;
  312. memset(p_link, 0, sizeof(*p_link));
  313. if (!b_reset) {
  314. status = qed_rd(p_hwfn, p_ptt,
  315. p_hwfn->mcp_info->port_addr +
  316. offsetof(struct public_port, link_status));
  317. DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
  318. "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
  319. status,
  320. (u32)(p_hwfn->mcp_info->port_addr +
  321. offsetof(struct public_port,
  322. link_status)));
  323. } else {
  324. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  325. "Resetting link indications\n");
  326. return;
  327. }
  328. p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
  329. p_link->full_duplex = true;
  330. switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
  331. case LINK_STATUS_SPEED_AND_DUPLEX_100G:
  332. p_link->speed = 100000;
  333. break;
  334. case LINK_STATUS_SPEED_AND_DUPLEX_50G:
  335. p_link->speed = 50000;
  336. break;
  337. case LINK_STATUS_SPEED_AND_DUPLEX_40G:
  338. p_link->speed = 40000;
  339. break;
  340. case LINK_STATUS_SPEED_AND_DUPLEX_25G:
  341. p_link->speed = 25000;
  342. break;
  343. case LINK_STATUS_SPEED_AND_DUPLEX_20G:
  344. p_link->speed = 20000;
  345. break;
  346. case LINK_STATUS_SPEED_AND_DUPLEX_10G:
  347. p_link->speed = 10000;
  348. break;
  349. case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
  350. p_link->full_duplex = false;
  351. /* Fall-through */
  352. case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
  353. p_link->speed = 1000;
  354. break;
  355. default:
  356. p_link->speed = 0;
  357. }
  358. /* Correct speed according to bandwidth allocation */
  359. if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
  360. p_link->speed = p_link->speed *
  361. p_hwfn->mcp_info->func_info.bandwidth_max /
  362. 100;
  363. qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
  364. p_link->speed);
  365. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  366. "Configured MAX bandwidth to be %08x Mb/sec\n",
  367. p_link->speed);
  368. }
  369. p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
  370. p_link->an_complete = !!(status &
  371. LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
  372. p_link->parallel_detection = !!(status &
  373. LINK_STATUS_PARALLEL_DETECTION_USED);
  374. p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
  375. p_link->partner_adv_speed |=
  376. (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
  377. QED_LINK_PARTNER_SPEED_1G_FD : 0;
  378. p_link->partner_adv_speed |=
  379. (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
  380. QED_LINK_PARTNER_SPEED_1G_HD : 0;
  381. p_link->partner_adv_speed |=
  382. (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
  383. QED_LINK_PARTNER_SPEED_10G : 0;
  384. p_link->partner_adv_speed |=
  385. (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
  386. QED_LINK_PARTNER_SPEED_20G : 0;
  387. p_link->partner_adv_speed |=
  388. (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
  389. QED_LINK_PARTNER_SPEED_40G : 0;
  390. p_link->partner_adv_speed |=
  391. (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
  392. QED_LINK_PARTNER_SPEED_50G : 0;
  393. p_link->partner_adv_speed |=
  394. (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
  395. QED_LINK_PARTNER_SPEED_100G : 0;
  396. p_link->partner_tx_flow_ctrl_en =
  397. !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
  398. p_link->partner_rx_flow_ctrl_en =
  399. !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
  400. switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
  401. case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
  402. p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
  403. break;
  404. case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
  405. p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
  406. break;
  407. case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
  408. p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
  409. break;
  410. default:
  411. p_link->partner_adv_pause = 0;
  412. }
  413. p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
  414. qed_link_update(p_hwfn);
  415. }
  416. int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
  417. struct qed_ptt *p_ptt,
  418. bool b_up)
  419. {
  420. struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
  421. u32 param = 0, reply = 0, cmd;
  422. struct pmm_phy_cfg phy_cfg;
  423. int rc = 0;
  424. u32 i;
  425. if (!qed_mcp_is_init(p_hwfn)) {
  426. DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
  427. return -EBUSY;
  428. }
  429. /* Set the shmem configuration according to params */
  430. memset(&phy_cfg, 0, sizeof(phy_cfg));
  431. cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
  432. if (!params->speed.autoneg)
  433. phy_cfg.speed = params->speed.forced_speed;
  434. phy_cfg.pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
  435. phy_cfg.pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
  436. phy_cfg.pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
  437. phy_cfg.adv_speed = params->speed.advertised_speeds;
  438. phy_cfg.loopback_mode = params->loopback_mode;
  439. /* Write the requested configuration to shmem */
  440. for (i = 0; i < sizeof(phy_cfg); i += 4)
  441. qed_wr(p_hwfn, p_ptt,
  442. p_hwfn->mcp_info->drv_mb_addr +
  443. offsetof(struct public_drv_mb, union_data) + i,
  444. ((u32 *)&phy_cfg)[i >> 2]);
  445. if (b_up) {
  446. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  447. "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
  448. phy_cfg.speed,
  449. phy_cfg.pause,
  450. phy_cfg.adv_speed,
  451. phy_cfg.loopback_mode,
  452. phy_cfg.feature_config_flags);
  453. } else {
  454. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  455. "Resetting link\n");
  456. }
  457. DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
  458. p_hwfn->mcp_info->drv_mb_seq,
  459. p_hwfn->mcp_info->drv_pulse_seq);
  460. /* Load Request */
  461. rc = qed_mcp_cmd(p_hwfn, p_ptt, cmd, 0, &reply, &param);
  462. /* if mcp fails to respond we must abort */
  463. if (rc) {
  464. DP_ERR(p_hwfn, "MCP response failure, aborting\n");
  465. return rc;
  466. }
  467. /* Reset the link status if needed */
  468. if (!b_up)
  469. qed_mcp_handle_link_change(p_hwfn, p_ptt, true);
  470. return 0;
  471. }
  472. int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
  473. struct qed_ptt *p_ptt)
  474. {
  475. struct qed_mcp_info *info = p_hwfn->mcp_info;
  476. int rc = 0;
  477. bool found = false;
  478. u16 i;
  479. DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
  480. /* Read Messages from MFW */
  481. qed_mcp_read_mb(p_hwfn, p_ptt);
  482. /* Compare current messages to old ones */
  483. for (i = 0; i < info->mfw_mb_length; i++) {
  484. if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
  485. continue;
  486. found = true;
  487. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  488. "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
  489. i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
  490. switch (i) {
  491. case MFW_DRV_MSG_LINK_CHANGE:
  492. qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
  493. break;
  494. default:
  495. DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
  496. rc = -EINVAL;
  497. }
  498. }
  499. /* ACK everything */
  500. for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
  501. __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
  502. /* MFW expect answer in BE, so we force write in that format */
  503. qed_wr(p_hwfn, p_ptt,
  504. info->mfw_mb_addr + sizeof(u32) +
  505. MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
  506. sizeof(u32) + i * sizeof(u32),
  507. (__force u32)val);
  508. }
  509. if (!found) {
  510. DP_NOTICE(p_hwfn,
  511. "Received an MFW message indication but no new message!\n");
  512. rc = -EINVAL;
  513. }
  514. /* Copy the new mfw messages into the shadow */
  515. memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
  516. return rc;
  517. }
  518. int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
  519. u32 *p_mfw_ver)
  520. {
  521. struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
  522. struct qed_ptt *p_ptt;
  523. u32 global_offsize;
  524. p_ptt = qed_ptt_acquire(p_hwfn);
  525. if (!p_ptt)
  526. return -EBUSY;
  527. global_offsize = qed_rd(p_hwfn, p_ptt,
  528. SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
  529. public_base,
  530. PUBLIC_GLOBAL));
  531. *p_mfw_ver = qed_rd(p_hwfn, p_ptt,
  532. SECTION_ADDR(global_offsize, 0) +
  533. offsetof(struct public_global, mfw_ver));
  534. qed_ptt_release(p_hwfn, p_ptt);
  535. return 0;
  536. }
  537. int qed_mcp_get_media_type(struct qed_dev *cdev,
  538. u32 *p_media_type)
  539. {
  540. struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
  541. struct qed_ptt *p_ptt;
  542. if (!qed_mcp_is_init(p_hwfn)) {
  543. DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
  544. return -EBUSY;
  545. }
  546. *p_media_type = MEDIA_UNSPECIFIED;
  547. p_ptt = qed_ptt_acquire(p_hwfn);
  548. if (!p_ptt)
  549. return -EBUSY;
  550. *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
  551. offsetof(struct public_port, media_type));
  552. qed_ptt_release(p_hwfn, p_ptt);
  553. return 0;
  554. }
  555. static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
  556. struct qed_ptt *p_ptt,
  557. struct public_func *p_data,
  558. int pfid)
  559. {
  560. u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
  561. PUBLIC_FUNC);
  562. u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
  563. u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
  564. u32 i, size;
  565. memset(p_data, 0, sizeof(*p_data));
  566. size = min_t(u32, sizeof(*p_data),
  567. QED_SECTION_SIZE(mfw_path_offsize));
  568. for (i = 0; i < size / sizeof(u32); i++)
  569. ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
  570. func_addr + (i << 2));
  571. return size;
  572. }
  573. static int
  574. qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
  575. struct public_func *p_info,
  576. enum qed_pci_personality *p_proto)
  577. {
  578. int rc = 0;
  579. switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
  580. case FUNC_MF_CFG_PROTOCOL_ETHERNET:
  581. *p_proto = QED_PCI_ETH;
  582. break;
  583. default:
  584. rc = -EINVAL;
  585. }
  586. return rc;
  587. }
  588. int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
  589. struct qed_ptt *p_ptt)
  590. {
  591. struct qed_mcp_function_info *info;
  592. struct public_func shmem_info;
  593. qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
  594. MCP_PF_ID(p_hwfn));
  595. info = &p_hwfn->mcp_info->func_info;
  596. info->pause_on_host = (shmem_info.config &
  597. FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
  598. if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info,
  599. &info->protocol)) {
  600. DP_ERR(p_hwfn, "Unknown personality %08x\n",
  601. (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
  602. return -EINVAL;
  603. }
  604. if (p_hwfn->cdev->mf_mode != SF) {
  605. info->bandwidth_min = (shmem_info.config &
  606. FUNC_MF_CFG_MIN_BW_MASK) >>
  607. FUNC_MF_CFG_MIN_BW_SHIFT;
  608. if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
  609. DP_INFO(p_hwfn,
  610. "bandwidth minimum out of bounds [%02x]. Set to 1\n",
  611. info->bandwidth_min);
  612. info->bandwidth_min = 1;
  613. }
  614. info->bandwidth_max = (shmem_info.config &
  615. FUNC_MF_CFG_MAX_BW_MASK) >>
  616. FUNC_MF_CFG_MAX_BW_SHIFT;
  617. if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
  618. DP_INFO(p_hwfn,
  619. "bandwidth maximum out of bounds [%02x]. Set to 100\n",
  620. info->bandwidth_max);
  621. info->bandwidth_max = 100;
  622. }
  623. }
  624. if (shmem_info.mac_upper || shmem_info.mac_lower) {
  625. info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
  626. info->mac[1] = (u8)(shmem_info.mac_upper);
  627. info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
  628. info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
  629. info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
  630. info->mac[5] = (u8)(shmem_info.mac_lower);
  631. } else {
  632. DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
  633. }
  634. info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
  635. (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
  636. info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
  637. (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
  638. info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
  639. DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
  640. "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n",
  641. info->pause_on_host, info->protocol,
  642. info->bandwidth_min, info->bandwidth_max,
  643. info->mac[0], info->mac[1], info->mac[2],
  644. info->mac[3], info->mac[4], info->mac[5],
  645. info->wwn_port, info->wwn_node, info->ovlan);
  646. return 0;
  647. }
  648. struct qed_mcp_link_params
  649. *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
  650. {
  651. if (!p_hwfn || !p_hwfn->mcp_info)
  652. return NULL;
  653. return &p_hwfn->mcp_info->link_input;
  654. }
  655. struct qed_mcp_link_state
  656. *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
  657. {
  658. if (!p_hwfn || !p_hwfn->mcp_info)
  659. return NULL;
  660. return &p_hwfn->mcp_info->link_output;
  661. }
  662. struct qed_mcp_link_capabilities
  663. *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
  664. {
  665. if (!p_hwfn || !p_hwfn->mcp_info)
  666. return NULL;
  667. return &p_hwfn->mcp_info->link_capabilities;
  668. }
  669. int qed_mcp_drain(struct qed_hwfn *p_hwfn,
  670. struct qed_ptt *p_ptt)
  671. {
  672. u32 resp = 0, param = 0;
  673. int rc;
  674. rc = qed_mcp_cmd(p_hwfn, p_ptt,
  675. DRV_MSG_CODE_NIG_DRAIN, 100,
  676. &resp, &param);
  677. /* Wait for the drain to complete before returning */
  678. msleep(120);
  679. return rc;
  680. }
  681. int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
  682. struct qed_ptt *p_ptt,
  683. u32 *p_flash_size)
  684. {
  685. u32 flash_size;
  686. flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
  687. flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
  688. MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
  689. flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
  690. *p_flash_size = flash_size;
  691. return 0;
  692. }
  693. int
  694. qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
  695. struct qed_ptt *p_ptt,
  696. struct qed_mcp_drv_version *p_ver)
  697. {
  698. int rc = 0;
  699. u32 param = 0, reply = 0, i;
  700. if (!qed_mcp_is_init(p_hwfn)) {
  701. DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
  702. return -EBUSY;
  703. }
  704. DRV_MB_WR(p_hwfn, p_ptt, union_data.drv_version.version,
  705. p_ver->version);
  706. /* Copy version string to shmem */
  707. for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / 4; i++) {
  708. DRV_MB_WR(p_hwfn, p_ptt,
  709. union_data.drv_version.name[i * sizeof(u32)],
  710. *(u32 *)&p_ver->name[i * sizeof(u32)]);
  711. }
  712. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0, &reply,
  713. &param);
  714. if (rc) {
  715. DP_ERR(p_hwfn, "MCP response failure, aborting\n");
  716. return rc;
  717. }
  718. return 0;
  719. }
  720. int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
  721. enum qed_led_mode mode)
  722. {
  723. u32 resp = 0, param = 0, drv_mb_param;
  724. int rc;
  725. switch (mode) {
  726. case QED_LED_MODE_ON:
  727. drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
  728. break;
  729. case QED_LED_MODE_OFF:
  730. drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
  731. break;
  732. case QED_LED_MODE_RESTORE:
  733. drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
  734. break;
  735. default:
  736. DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
  737. return -EINVAL;
  738. }
  739. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
  740. drv_mb_param, &resp, &param);
  741. return rc;
  742. }