qed_mcp.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/types.h>
  9. #include <asm/byteorder.h>
  10. #include <linux/delay.h>
  11. #include <linux/errno.h>
  12. #include <linux/kernel.h>
  13. #include <linux/slab.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/string.h>
  16. #include "qed.h"
  17. #include "qed_dcbx.h"
  18. #include "qed_hsi.h"
  19. #include "qed_hw.h"
  20. #include "qed_mcp.h"
  21. #include "qed_reg_addr.h"
  22. #include "qed_sriov.h"
  23. #define CHIP_MCP_RESP_ITER_US 10
  24. #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
  25. #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
  26. #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
  27. qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
  28. _val)
  29. #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
  30. qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
  31. #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
  32. DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
  33. offsetof(struct public_drv_mb, _field), _val)
  34. #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
  35. DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
  36. offsetof(struct public_drv_mb, _field))
  37. #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
  38. DRV_ID_PDA_COMP_VER_SHIFT)
  39. #define MCP_BYTES_PER_MBIT_SHIFT 17
  40. bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
  41. {
  42. if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
  43. return false;
  44. return true;
  45. }
  46. void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  47. {
  48. u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
  49. PUBLIC_PORT);
  50. u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
  51. p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
  52. MFW_PORT(p_hwfn));
  53. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  54. "port_addr = 0x%x, port_id 0x%02x\n",
  55. p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
  56. }
  57. void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  58. {
  59. u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
  60. u32 tmp, i;
  61. if (!p_hwfn->mcp_info->public_base)
  62. return;
  63. for (i = 0; i < length; i++) {
  64. tmp = qed_rd(p_hwfn, p_ptt,
  65. p_hwfn->mcp_info->mfw_mb_addr +
  66. (i << 2) + sizeof(u32));
  67. /* The MB data is actually BE; Need to force it to cpu */
  68. ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
  69. be32_to_cpu((__force __be32)tmp);
  70. }
  71. }
  72. int qed_mcp_free(struct qed_hwfn *p_hwfn)
  73. {
  74. if (p_hwfn->mcp_info) {
  75. kfree(p_hwfn->mcp_info->mfw_mb_cur);
  76. kfree(p_hwfn->mcp_info->mfw_mb_shadow);
  77. }
  78. kfree(p_hwfn->mcp_info);
  79. return 0;
  80. }
  81. static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  82. {
  83. struct qed_mcp_info *p_info = p_hwfn->mcp_info;
  84. u32 drv_mb_offsize, mfw_mb_offsize;
  85. u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
  86. p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
  87. if (!p_info->public_base)
  88. return 0;
  89. p_info->public_base |= GRCBASE_MCP;
  90. /* Calculate the driver and MFW mailbox address */
  91. drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
  92. SECTION_OFFSIZE_ADDR(p_info->public_base,
  93. PUBLIC_DRV_MB));
  94. p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
  95. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  96. "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
  97. drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
  98. /* Set the MFW MB address */
  99. mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
  100. SECTION_OFFSIZE_ADDR(p_info->public_base,
  101. PUBLIC_MFW_MB));
  102. p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
  103. p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
  104. /* Get the current driver mailbox sequence before sending
  105. * the first command
  106. */
  107. p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
  108. DRV_MSG_SEQ_NUMBER_MASK;
  109. /* Get current FW pulse sequence */
  110. p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
  111. DRV_PULSE_SEQ_MASK;
  112. p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
  113. return 0;
  114. }
  115. int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  116. {
  117. struct qed_mcp_info *p_info;
  118. u32 size;
  119. /* Allocate mcp_info structure */
  120. p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
  121. if (!p_hwfn->mcp_info)
  122. goto err;
  123. p_info = p_hwfn->mcp_info;
  124. if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
  125. DP_NOTICE(p_hwfn, "MCP is not initialized\n");
  126. /* Do not free mcp_info here, since public_base indicate that
  127. * the MCP is not initialized
  128. */
  129. return 0;
  130. }
  131. size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
  132. p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
  133. p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
  134. if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
  135. goto err;
  136. /* Initialize the MFW spinlock */
  137. spin_lock_init(&p_info->lock);
  138. return 0;
  139. err:
  140. qed_mcp_free(p_hwfn);
  141. return -ENOMEM;
  142. }
  143. /* Locks the MFW mailbox of a PF to ensure a single access.
  144. * The lock is achieved in most cases by holding a spinlock, causing other
  145. * threads to wait till a previous access is done.
  146. * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
  147. * access is achieved by setting a blocking flag, which will fail other
  148. * competing contexts to send their mailboxes.
  149. */
  150. static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd)
  151. {
  152. spin_lock_bh(&p_hwfn->mcp_info->lock);
  153. /* The spinlock shouldn't be acquired when the mailbox command is
  154. * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
  155. * pending [UN]LOAD_REQ command of another PF together with a spinlock
  156. * (i.e. interrupts are disabled) - can lead to a deadlock.
  157. * It is assumed that for a single PF, no other mailbox commands can be
  158. * sent from another context while sending LOAD_REQ, and that any
  159. * parallel commands to UNLOAD_REQ can be cancelled.
  160. */
  161. if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
  162. p_hwfn->mcp_info->block_mb_sending = false;
  163. if (p_hwfn->mcp_info->block_mb_sending) {
  164. DP_NOTICE(p_hwfn,
  165. "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
  166. cmd);
  167. spin_unlock_bh(&p_hwfn->mcp_info->lock);
  168. return -EBUSY;
  169. }
  170. if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
  171. p_hwfn->mcp_info->block_mb_sending = true;
  172. spin_unlock_bh(&p_hwfn->mcp_info->lock);
  173. }
  174. return 0;
  175. }
  176. static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd)
  177. {
  178. if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
  179. spin_unlock_bh(&p_hwfn->mcp_info->lock);
  180. }
  181. int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  182. {
  183. u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
  184. u8 delay = CHIP_MCP_RESP_ITER_US;
  185. u32 org_mcp_reset_seq, cnt = 0;
  186. int rc = 0;
  187. /* Ensure that only a single thread is accessing the mailbox at a
  188. * certain time.
  189. */
  190. rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
  191. if (rc != 0)
  192. return rc;
  193. /* Set drv command along with the updated sequence */
  194. org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
  195. DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
  196. (DRV_MSG_CODE_MCP_RESET | seq));
  197. do {
  198. /* Wait for MFW response */
  199. udelay(delay);
  200. /* Give the FW up to 500 second (50*1000*10usec) */
  201. } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
  202. MISCS_REG_GENERIC_POR_0)) &&
  203. (cnt++ < QED_MCP_RESET_RETRIES));
  204. if (org_mcp_reset_seq !=
  205. qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
  206. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  207. "MCP was reset after %d usec\n", cnt * delay);
  208. } else {
  209. DP_ERR(p_hwfn, "Failed to reset MCP\n");
  210. rc = -EAGAIN;
  211. }
  212. qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
  213. return rc;
  214. }
  215. static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
  216. struct qed_ptt *p_ptt,
  217. u32 cmd,
  218. u32 param,
  219. u32 *o_mcp_resp,
  220. u32 *o_mcp_param)
  221. {
  222. u8 delay = CHIP_MCP_RESP_ITER_US;
  223. u32 seq, cnt = 1, actual_mb_seq;
  224. int rc = 0;
  225. /* Get actual driver mailbox sequence */
  226. actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
  227. DRV_MSG_SEQ_NUMBER_MASK;
  228. /* Use MCP history register to check if MCP reset occurred between
  229. * init time and now.
  230. */
  231. if (p_hwfn->mcp_info->mcp_hist !=
  232. qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
  233. DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
  234. qed_load_mcp_offsets(p_hwfn, p_ptt);
  235. qed_mcp_cmd_port_init(p_hwfn, p_ptt);
  236. }
  237. seq = ++p_hwfn->mcp_info->drv_mb_seq;
  238. /* Set drv param */
  239. DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
  240. /* Set drv command along with the updated sequence */
  241. DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
  242. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  243. "wrote command (%x) to MFW MB param 0x%08x\n",
  244. (cmd | seq), param);
  245. do {
  246. /* Wait for MFW response */
  247. udelay(delay);
  248. *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
  249. /* Give the FW up to 5 second (500*10ms) */
  250. } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
  251. (cnt++ < QED_DRV_MB_MAX_RETRIES));
  252. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  253. "[after %d ms] read (%x) seq is (%x) from FW MB\n",
  254. cnt * delay, *o_mcp_resp, seq);
  255. /* Is this a reply to our command? */
  256. if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
  257. *o_mcp_resp &= FW_MSG_CODE_MASK;
  258. /* Get the MCP param */
  259. *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
  260. } else {
  261. /* FW BUG! */
  262. DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
  263. cmd, param);
  264. *o_mcp_resp = 0;
  265. rc = -EAGAIN;
  266. }
  267. return rc;
  268. }
  269. static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
  270. struct qed_ptt *p_ptt,
  271. struct qed_mcp_mb_params *p_mb_params)
  272. {
  273. u32 union_data_addr;
  274. int rc;
  275. /* MCP not initialized */
  276. if (!qed_mcp_is_init(p_hwfn)) {
  277. DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
  278. return -EBUSY;
  279. }
  280. union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
  281. offsetof(struct public_drv_mb, union_data);
  282. /* Ensure that only a single thread is accessing the mailbox at a
  283. * certain time.
  284. */
  285. rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
  286. if (rc)
  287. return rc;
  288. if (p_mb_params->p_data_src != NULL)
  289. qed_memcpy_to(p_hwfn, p_ptt, union_data_addr,
  290. p_mb_params->p_data_src,
  291. sizeof(*p_mb_params->p_data_src));
  292. rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
  293. p_mb_params->param, &p_mb_params->mcp_resp,
  294. &p_mb_params->mcp_param);
  295. if (p_mb_params->p_data_dst != NULL)
  296. qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
  297. union_data_addr,
  298. sizeof(*p_mb_params->p_data_dst));
  299. qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
  300. return rc;
  301. }
  302. int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
  303. struct qed_ptt *p_ptt,
  304. u32 cmd,
  305. u32 param,
  306. u32 *o_mcp_resp,
  307. u32 *o_mcp_param)
  308. {
  309. struct qed_mcp_mb_params mb_params;
  310. int rc;
  311. memset(&mb_params, 0, sizeof(mb_params));
  312. mb_params.cmd = cmd;
  313. mb_params.param = param;
  314. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  315. if (rc)
  316. return rc;
  317. *o_mcp_resp = mb_params.mcp_resp;
  318. *o_mcp_param = mb_params.mcp_param;
  319. return 0;
  320. }
  321. int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
  322. struct qed_ptt *p_ptt,
  323. u32 cmd,
  324. u32 param,
  325. u32 *o_mcp_resp,
  326. u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
  327. {
  328. struct qed_mcp_mb_params mb_params;
  329. union drv_union_data union_data;
  330. int rc;
  331. memset(&mb_params, 0, sizeof(mb_params));
  332. mb_params.cmd = cmd;
  333. mb_params.param = param;
  334. mb_params.p_data_dst = &union_data;
  335. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  336. if (rc)
  337. return rc;
  338. *o_mcp_resp = mb_params.mcp_resp;
  339. *o_mcp_param = mb_params.mcp_param;
  340. *o_txn_size = *o_mcp_param;
  341. memcpy(o_buf, &union_data.raw_data, *o_txn_size);
  342. return 0;
  343. }
  344. int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
  345. struct qed_ptt *p_ptt, u32 *p_load_code)
  346. {
  347. struct qed_dev *cdev = p_hwfn->cdev;
  348. struct qed_mcp_mb_params mb_params;
  349. union drv_union_data union_data;
  350. int rc;
  351. memset(&mb_params, 0, sizeof(mb_params));
  352. /* Load Request */
  353. mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
  354. mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
  355. cdev->drv_type;
  356. memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE);
  357. mb_params.p_data_src = &union_data;
  358. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  359. /* if mcp fails to respond we must abort */
  360. if (rc) {
  361. DP_ERR(p_hwfn, "MCP response failure, aborting\n");
  362. return rc;
  363. }
  364. *p_load_code = mb_params.mcp_resp;
  365. /* If MFW refused (e.g. other port is in diagnostic mode) we
  366. * must abort. This can happen in the following cases:
  367. * - Other port is in diagnostic mode
  368. * - Previously loaded function on the engine is not compliant with
  369. * the requester.
  370. * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
  371. * -
  372. */
  373. if (!(*p_load_code) ||
  374. ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
  375. ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
  376. ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
  377. DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
  378. return -EBUSY;
  379. }
  380. return 0;
  381. }
  382. static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
  383. struct qed_ptt *p_ptt)
  384. {
  385. u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
  386. PUBLIC_PATH);
  387. u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
  388. u32 path_addr = SECTION_ADDR(mfw_path_offsize,
  389. QED_PATH_ID(p_hwfn));
  390. u32 disabled_vfs[VF_MAX_STATIC / 32];
  391. int i;
  392. DP_VERBOSE(p_hwfn,
  393. QED_MSG_SP,
  394. "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
  395. mfw_path_offsize, path_addr);
  396. for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
  397. disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
  398. path_addr +
  399. offsetof(struct public_path,
  400. mcp_vf_disabled) +
  401. sizeof(u32) * i);
  402. DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
  403. "FLR-ed VFs [%08x,...,%08x] - %08x\n",
  404. i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
  405. }
  406. if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
  407. qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
  408. }
  409. int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
  410. struct qed_ptt *p_ptt, u32 *vfs_to_ack)
  411. {
  412. u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
  413. PUBLIC_FUNC);
  414. u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
  415. u32 func_addr = SECTION_ADDR(mfw_func_offsize,
  416. MCP_PF_ID(p_hwfn));
  417. struct qed_mcp_mb_params mb_params;
  418. union drv_union_data union_data;
  419. int rc;
  420. int i;
  421. for (i = 0; i < (VF_MAX_STATIC / 32); i++)
  422. DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
  423. "Acking VFs [%08x,...,%08x] - %08x\n",
  424. i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
  425. memset(&mb_params, 0, sizeof(mb_params));
  426. mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
  427. memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
  428. mb_params.p_data_src = &union_data;
  429. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  430. if (rc) {
  431. DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
  432. return -EBUSY;
  433. }
  434. /* Clear the ACK bits */
  435. for (i = 0; i < (VF_MAX_STATIC / 32); i++)
  436. qed_wr(p_hwfn, p_ptt,
  437. func_addr +
  438. offsetof(struct public_func, drv_ack_vf_disabled) +
  439. i * sizeof(u32), 0);
  440. return rc;
  441. }
  442. static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
  443. struct qed_ptt *p_ptt)
  444. {
  445. u32 transceiver_state;
  446. transceiver_state = qed_rd(p_hwfn, p_ptt,
  447. p_hwfn->mcp_info->port_addr +
  448. offsetof(struct public_port,
  449. transceiver_data));
  450. DP_VERBOSE(p_hwfn,
  451. (NETIF_MSG_HW | QED_MSG_SP),
  452. "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
  453. transceiver_state,
  454. (u32)(p_hwfn->mcp_info->port_addr +
  455. offsetof(struct public_port, transceiver_data)));
  456. transceiver_state = GET_FIELD(transceiver_state,
  457. ETH_TRANSCEIVER_STATE);
  458. if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
  459. DP_NOTICE(p_hwfn, "Transceiver is present.\n");
  460. else
  461. DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
  462. }
  463. static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
  464. struct qed_ptt *p_ptt, bool b_reset)
  465. {
  466. struct qed_mcp_link_state *p_link;
  467. u8 max_bw, min_bw;
  468. u32 status = 0;
  469. p_link = &p_hwfn->mcp_info->link_output;
  470. memset(p_link, 0, sizeof(*p_link));
  471. if (!b_reset) {
  472. status = qed_rd(p_hwfn, p_ptt,
  473. p_hwfn->mcp_info->port_addr +
  474. offsetof(struct public_port, link_status));
  475. DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
  476. "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
  477. status,
  478. (u32)(p_hwfn->mcp_info->port_addr +
  479. offsetof(struct public_port, link_status)));
  480. } else {
  481. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  482. "Resetting link indications\n");
  483. return;
  484. }
  485. if (p_hwfn->b_drv_link_init)
  486. p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
  487. else
  488. p_link->link_up = false;
  489. p_link->full_duplex = true;
  490. switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
  491. case LINK_STATUS_SPEED_AND_DUPLEX_100G:
  492. p_link->speed = 100000;
  493. break;
  494. case LINK_STATUS_SPEED_AND_DUPLEX_50G:
  495. p_link->speed = 50000;
  496. break;
  497. case LINK_STATUS_SPEED_AND_DUPLEX_40G:
  498. p_link->speed = 40000;
  499. break;
  500. case LINK_STATUS_SPEED_AND_DUPLEX_25G:
  501. p_link->speed = 25000;
  502. break;
  503. case LINK_STATUS_SPEED_AND_DUPLEX_20G:
  504. p_link->speed = 20000;
  505. break;
  506. case LINK_STATUS_SPEED_AND_DUPLEX_10G:
  507. p_link->speed = 10000;
  508. break;
  509. case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
  510. p_link->full_duplex = false;
  511. /* Fall-through */
  512. case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
  513. p_link->speed = 1000;
  514. break;
  515. default:
  516. p_link->speed = 0;
  517. }
  518. if (p_link->link_up && p_link->speed)
  519. p_link->line_speed = p_link->speed;
  520. else
  521. p_link->line_speed = 0;
  522. max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
  523. min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
  524. /* Max bandwidth configuration */
  525. __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
  526. /* Min bandwidth configuration */
  527. __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
  528. qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate);
  529. p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
  530. p_link->an_complete = !!(status &
  531. LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
  532. p_link->parallel_detection = !!(status &
  533. LINK_STATUS_PARALLEL_DETECTION_USED);
  534. p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
  535. p_link->partner_adv_speed |=
  536. (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
  537. QED_LINK_PARTNER_SPEED_1G_FD : 0;
  538. p_link->partner_adv_speed |=
  539. (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
  540. QED_LINK_PARTNER_SPEED_1G_HD : 0;
  541. p_link->partner_adv_speed |=
  542. (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
  543. QED_LINK_PARTNER_SPEED_10G : 0;
  544. p_link->partner_adv_speed |=
  545. (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
  546. QED_LINK_PARTNER_SPEED_20G : 0;
  547. p_link->partner_adv_speed |=
  548. (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
  549. QED_LINK_PARTNER_SPEED_25G : 0;
  550. p_link->partner_adv_speed |=
  551. (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
  552. QED_LINK_PARTNER_SPEED_40G : 0;
  553. p_link->partner_adv_speed |=
  554. (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
  555. QED_LINK_PARTNER_SPEED_50G : 0;
  556. p_link->partner_adv_speed |=
  557. (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
  558. QED_LINK_PARTNER_SPEED_100G : 0;
  559. p_link->partner_tx_flow_ctrl_en =
  560. !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
  561. p_link->partner_rx_flow_ctrl_en =
  562. !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
  563. switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
  564. case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
  565. p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
  566. break;
  567. case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
  568. p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
  569. break;
  570. case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
  571. p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
  572. break;
  573. default:
  574. p_link->partner_adv_pause = 0;
  575. }
  576. p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
  577. qed_link_update(p_hwfn);
  578. }
  579. int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
  580. {
  581. struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
  582. struct qed_mcp_mb_params mb_params;
  583. union drv_union_data union_data;
  584. struct eth_phy_cfg *phy_cfg;
  585. int rc = 0;
  586. u32 cmd;
  587. /* Set the shmem configuration according to params */
  588. phy_cfg = &union_data.drv_phy_cfg;
  589. memset(phy_cfg, 0, sizeof(*phy_cfg));
  590. cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
  591. if (!params->speed.autoneg)
  592. phy_cfg->speed = params->speed.forced_speed;
  593. phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
  594. phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
  595. phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
  596. phy_cfg->adv_speed = params->speed.advertised_speeds;
  597. phy_cfg->loopback_mode = params->loopback_mode;
  598. p_hwfn->b_drv_link_init = b_up;
  599. if (b_up) {
  600. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  601. "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
  602. phy_cfg->speed,
  603. phy_cfg->pause,
  604. phy_cfg->adv_speed,
  605. phy_cfg->loopback_mode,
  606. phy_cfg->feature_config_flags);
  607. } else {
  608. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  609. "Resetting link\n");
  610. }
  611. memset(&mb_params, 0, sizeof(mb_params));
  612. mb_params.cmd = cmd;
  613. mb_params.p_data_src = &union_data;
  614. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  615. /* if mcp fails to respond we must abort */
  616. if (rc) {
  617. DP_ERR(p_hwfn, "MCP response failure, aborting\n");
  618. return rc;
  619. }
  620. /* Reset the link status if needed */
  621. if (!b_up)
  622. qed_mcp_handle_link_change(p_hwfn, p_ptt, true);
  623. return 0;
  624. }
  625. static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
  626. struct qed_ptt *p_ptt,
  627. enum MFW_DRV_MSG_TYPE type)
  628. {
  629. enum qed_mcp_protocol_type stats_type;
  630. union qed_mcp_protocol_stats stats;
  631. struct qed_mcp_mb_params mb_params;
  632. union drv_union_data union_data;
  633. u32 hsi_param;
  634. switch (type) {
  635. case MFW_DRV_MSG_GET_LAN_STATS:
  636. stats_type = QED_MCP_LAN_STATS;
  637. hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
  638. break;
  639. case MFW_DRV_MSG_GET_FCOE_STATS:
  640. stats_type = QED_MCP_FCOE_STATS;
  641. hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
  642. break;
  643. case MFW_DRV_MSG_GET_ISCSI_STATS:
  644. stats_type = QED_MCP_ISCSI_STATS;
  645. hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
  646. break;
  647. case MFW_DRV_MSG_GET_RDMA_STATS:
  648. stats_type = QED_MCP_RDMA_STATS;
  649. hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
  650. break;
  651. default:
  652. DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
  653. return;
  654. }
  655. qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
  656. memset(&mb_params, 0, sizeof(mb_params));
  657. mb_params.cmd = DRV_MSG_CODE_GET_STATS;
  658. mb_params.param = hsi_param;
  659. memcpy(&union_data, &stats, sizeof(stats));
  660. mb_params.p_data_src = &union_data;
  661. qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  662. }
  663. static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
  664. struct public_func *p_shmem_info)
  665. {
  666. struct qed_mcp_function_info *p_info;
  667. p_info = &p_hwfn->mcp_info->func_info;
  668. p_info->bandwidth_min = (p_shmem_info->config &
  669. FUNC_MF_CFG_MIN_BW_MASK) >>
  670. FUNC_MF_CFG_MIN_BW_SHIFT;
  671. if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
  672. DP_INFO(p_hwfn,
  673. "bandwidth minimum out of bounds [%02x]. Set to 1\n",
  674. p_info->bandwidth_min);
  675. p_info->bandwidth_min = 1;
  676. }
  677. p_info->bandwidth_max = (p_shmem_info->config &
  678. FUNC_MF_CFG_MAX_BW_MASK) >>
  679. FUNC_MF_CFG_MAX_BW_SHIFT;
  680. if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
  681. DP_INFO(p_hwfn,
  682. "bandwidth maximum out of bounds [%02x]. Set to 100\n",
  683. p_info->bandwidth_max);
  684. p_info->bandwidth_max = 100;
  685. }
  686. }
  687. static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
  688. struct qed_ptt *p_ptt,
  689. struct public_func *p_data, int pfid)
  690. {
  691. u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
  692. PUBLIC_FUNC);
  693. u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
  694. u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
  695. u32 i, size;
  696. memset(p_data, 0, sizeof(*p_data));
  697. size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
  698. for (i = 0; i < size / sizeof(u32); i++)
  699. ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
  700. func_addr + (i << 2));
  701. return size;
  702. }
  703. static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  704. {
  705. struct qed_mcp_function_info *p_info;
  706. struct public_func shmem_info;
  707. u32 resp = 0, param = 0;
  708. qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
  709. qed_read_pf_bandwidth(p_hwfn, &shmem_info);
  710. p_info = &p_hwfn->mcp_info->func_info;
  711. qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
  712. qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
  713. /* Acknowledge the MFW */
  714. qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
  715. &param);
  716. }
  717. int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
  718. struct qed_ptt *p_ptt)
  719. {
  720. struct qed_mcp_info *info = p_hwfn->mcp_info;
  721. int rc = 0;
  722. bool found = false;
  723. u16 i;
  724. DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
  725. /* Read Messages from MFW */
  726. qed_mcp_read_mb(p_hwfn, p_ptt);
  727. /* Compare current messages to old ones */
  728. for (i = 0; i < info->mfw_mb_length; i++) {
  729. if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
  730. continue;
  731. found = true;
  732. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  733. "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
  734. i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
  735. switch (i) {
  736. case MFW_DRV_MSG_LINK_CHANGE:
  737. qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
  738. break;
  739. case MFW_DRV_MSG_VF_DISABLED:
  740. qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
  741. break;
  742. case MFW_DRV_MSG_LLDP_DATA_UPDATED:
  743. qed_dcbx_mib_update_event(p_hwfn, p_ptt,
  744. QED_DCBX_REMOTE_LLDP_MIB);
  745. break;
  746. case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
  747. qed_dcbx_mib_update_event(p_hwfn, p_ptt,
  748. QED_DCBX_REMOTE_MIB);
  749. break;
  750. case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
  751. qed_dcbx_mib_update_event(p_hwfn, p_ptt,
  752. QED_DCBX_OPERATIONAL_MIB);
  753. break;
  754. case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
  755. qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
  756. break;
  757. case MFW_DRV_MSG_GET_LAN_STATS:
  758. case MFW_DRV_MSG_GET_FCOE_STATS:
  759. case MFW_DRV_MSG_GET_ISCSI_STATS:
  760. case MFW_DRV_MSG_GET_RDMA_STATS:
  761. qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
  762. break;
  763. case MFW_DRV_MSG_BW_UPDATE:
  764. qed_mcp_update_bw(p_hwfn, p_ptt);
  765. break;
  766. default:
  767. DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
  768. rc = -EINVAL;
  769. }
  770. }
  771. /* ACK everything */
  772. for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
  773. __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
  774. /* MFW expect answer in BE, so we force write in that format */
  775. qed_wr(p_hwfn, p_ptt,
  776. info->mfw_mb_addr + sizeof(u32) +
  777. MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
  778. sizeof(u32) + i * sizeof(u32),
  779. (__force u32)val);
  780. }
  781. if (!found) {
  782. DP_NOTICE(p_hwfn,
  783. "Received an MFW message indication but no new message!\n");
  784. rc = -EINVAL;
  785. }
  786. /* Copy the new mfw messages into the shadow */
  787. memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
  788. return rc;
  789. }
  790. int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
  791. struct qed_ptt *p_ptt,
  792. u32 *p_mfw_ver, u32 *p_running_bundle_id)
  793. {
  794. u32 global_offsize;
  795. if (IS_VF(p_hwfn->cdev)) {
  796. if (p_hwfn->vf_iov_info) {
  797. struct pfvf_acquire_resp_tlv *p_resp;
  798. p_resp = &p_hwfn->vf_iov_info->acquire_resp;
  799. *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
  800. return 0;
  801. } else {
  802. DP_VERBOSE(p_hwfn,
  803. QED_MSG_IOV,
  804. "VF requested MFW version prior to ACQUIRE\n");
  805. return -EINVAL;
  806. }
  807. }
  808. global_offsize = qed_rd(p_hwfn, p_ptt,
  809. SECTION_OFFSIZE_ADDR(p_hwfn->
  810. mcp_info->public_base,
  811. PUBLIC_GLOBAL));
  812. *p_mfw_ver =
  813. qed_rd(p_hwfn, p_ptt,
  814. SECTION_ADDR(global_offsize,
  815. 0) + offsetof(struct public_global, mfw_ver));
  816. if (p_running_bundle_id != NULL) {
  817. *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
  818. SECTION_ADDR(global_offsize, 0) +
  819. offsetof(struct public_global,
  820. running_bundle_id));
  821. }
  822. return 0;
  823. }
  824. int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
  825. {
  826. struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
  827. struct qed_ptt *p_ptt;
  828. if (IS_VF(cdev))
  829. return -EINVAL;
  830. if (!qed_mcp_is_init(p_hwfn)) {
  831. DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
  832. return -EBUSY;
  833. }
  834. *p_media_type = MEDIA_UNSPECIFIED;
  835. p_ptt = qed_ptt_acquire(p_hwfn);
  836. if (!p_ptt)
  837. return -EBUSY;
  838. *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
  839. offsetof(struct public_port, media_type));
  840. qed_ptt_release(p_hwfn, p_ptt);
  841. return 0;
  842. }
  843. static int
  844. qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
  845. struct public_func *p_info,
  846. enum qed_pci_personality *p_proto)
  847. {
  848. int rc = 0;
  849. switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
  850. case FUNC_MF_CFG_PROTOCOL_ETHERNET:
  851. if (test_bit(QED_DEV_CAP_ROCE,
  852. &p_hwfn->hw_info.device_capabilities))
  853. *p_proto = QED_PCI_ETH_ROCE;
  854. else
  855. *p_proto = QED_PCI_ETH;
  856. break;
  857. case FUNC_MF_CFG_PROTOCOL_ISCSI:
  858. *p_proto = QED_PCI_ISCSI;
  859. break;
  860. case FUNC_MF_CFG_PROTOCOL_ROCE:
  861. DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
  862. rc = -EINVAL;
  863. break;
  864. default:
  865. rc = -EINVAL;
  866. }
  867. return rc;
  868. }
  869. int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
  870. struct qed_ptt *p_ptt)
  871. {
  872. struct qed_mcp_function_info *info;
  873. struct public_func shmem_info;
  874. qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
  875. info = &p_hwfn->mcp_info->func_info;
  876. info->pause_on_host = (shmem_info.config &
  877. FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
  878. if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
  879. DP_ERR(p_hwfn, "Unknown personality %08x\n",
  880. (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
  881. return -EINVAL;
  882. }
  883. qed_read_pf_bandwidth(p_hwfn, &shmem_info);
  884. if (shmem_info.mac_upper || shmem_info.mac_lower) {
  885. info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
  886. info->mac[1] = (u8)(shmem_info.mac_upper);
  887. info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
  888. info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
  889. info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
  890. info->mac[5] = (u8)(shmem_info.mac_lower);
  891. } else {
  892. DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
  893. }
  894. info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
  895. (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
  896. info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
  897. (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
  898. info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
  899. DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
  900. "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n",
  901. info->pause_on_host, info->protocol,
  902. info->bandwidth_min, info->bandwidth_max,
  903. info->mac[0], info->mac[1], info->mac[2],
  904. info->mac[3], info->mac[4], info->mac[5],
  905. info->wwn_port, info->wwn_node, info->ovlan);
  906. return 0;
  907. }
  908. struct qed_mcp_link_params
  909. *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
  910. {
  911. if (!p_hwfn || !p_hwfn->mcp_info)
  912. return NULL;
  913. return &p_hwfn->mcp_info->link_input;
  914. }
  915. struct qed_mcp_link_state
  916. *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
  917. {
  918. if (!p_hwfn || !p_hwfn->mcp_info)
  919. return NULL;
  920. return &p_hwfn->mcp_info->link_output;
  921. }
  922. struct qed_mcp_link_capabilities
  923. *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
  924. {
  925. if (!p_hwfn || !p_hwfn->mcp_info)
  926. return NULL;
  927. return &p_hwfn->mcp_info->link_capabilities;
  928. }
  929. int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  930. {
  931. u32 resp = 0, param = 0;
  932. int rc;
  933. rc = qed_mcp_cmd(p_hwfn, p_ptt,
  934. DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
  935. /* Wait for the drain to complete before returning */
  936. msleep(1020);
  937. return rc;
  938. }
  939. int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
  940. struct qed_ptt *p_ptt, u32 *p_flash_size)
  941. {
  942. u32 flash_size;
  943. if (IS_VF(p_hwfn->cdev))
  944. return -EINVAL;
  945. flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
  946. flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
  947. MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
  948. flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
  949. *p_flash_size = flash_size;
  950. return 0;
  951. }
  952. int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
  953. struct qed_ptt *p_ptt, u8 vf_id, u8 num)
  954. {
  955. u32 resp = 0, param = 0, rc_param = 0;
  956. int rc;
  957. /* Only Leader can configure MSIX, and need to take CMT into account */
  958. if (!IS_LEAD_HWFN(p_hwfn))
  959. return 0;
  960. num *= p_hwfn->cdev->num_hwfns;
  961. param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
  962. DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
  963. param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
  964. DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
  965. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
  966. &resp, &rc_param);
  967. if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
  968. DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
  969. rc = -EINVAL;
  970. } else {
  971. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  972. "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
  973. num, vf_id);
  974. }
  975. return rc;
  976. }
  977. int
  978. qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
  979. struct qed_ptt *p_ptt,
  980. struct qed_mcp_drv_version *p_ver)
  981. {
  982. struct drv_version_stc *p_drv_version;
  983. struct qed_mcp_mb_params mb_params;
  984. union drv_union_data union_data;
  985. __be32 val;
  986. u32 i;
  987. int rc;
  988. p_drv_version = &union_data.drv_version;
  989. p_drv_version->version = p_ver->version;
  990. for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) {
  991. val = cpu_to_be32(p_ver->name[i]);
  992. *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
  993. }
  994. memset(&mb_params, 0, sizeof(mb_params));
  995. mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
  996. mb_params.p_data_src = &union_data;
  997. rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
  998. if (rc)
  999. DP_ERR(p_hwfn, "MCP response failure, aborting\n");
  1000. return rc;
  1001. }
  1002. int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  1003. {
  1004. u32 resp = 0, param = 0;
  1005. int rc;
  1006. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
  1007. &param);
  1008. if (rc)
  1009. DP_ERR(p_hwfn, "MCP response failure, aborting\n");
  1010. return rc;
  1011. }
  1012. int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  1013. {
  1014. u32 value, cpu_mode;
  1015. qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
  1016. value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
  1017. value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
  1018. qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
  1019. cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
  1020. return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
  1021. }
  1022. int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
  1023. struct qed_ptt *p_ptt, enum qed_led_mode mode)
  1024. {
  1025. u32 resp = 0, param = 0, drv_mb_param;
  1026. int rc;
  1027. switch (mode) {
  1028. case QED_LED_MODE_ON:
  1029. drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
  1030. break;
  1031. case QED_LED_MODE_OFF:
  1032. drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
  1033. break;
  1034. case QED_LED_MODE_RESTORE:
  1035. drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
  1036. break;
  1037. default:
  1038. DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
  1039. return -EINVAL;
  1040. }
  1041. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
  1042. drv_mb_param, &resp, &param);
  1043. return rc;
  1044. }
  1045. int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
  1046. struct qed_ptt *p_ptt, u32 mask_parities)
  1047. {
  1048. u32 resp = 0, param = 0;
  1049. int rc;
  1050. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
  1051. mask_parities, &resp, &param);
  1052. if (rc) {
  1053. DP_ERR(p_hwfn,
  1054. "MCP response failure for mask parities, aborting\n");
  1055. } else if (resp != FW_MSG_CODE_OK) {
  1056. DP_ERR(p_hwfn,
  1057. "MCP did not acknowledge mask parity request. Old MFW?\n");
  1058. rc = -EINVAL;
  1059. }
  1060. return rc;
  1061. }
  1062. int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  1063. {
  1064. u32 drv_mb_param = 0, rsp, param;
  1065. int rc = 0;
  1066. drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
  1067. DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
  1068. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
  1069. drv_mb_param, &rsp, &param);
  1070. if (rc)
  1071. return rc;
  1072. if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
  1073. (param != DRV_MB_PARAM_BIST_RC_PASSED))
  1074. rc = -EAGAIN;
  1075. return rc;
  1076. }
  1077. int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  1078. {
  1079. u32 drv_mb_param, rsp, param;
  1080. int rc = 0;
  1081. drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
  1082. DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
  1083. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
  1084. drv_mb_param, &rsp, &param);
  1085. if (rc)
  1086. return rc;
  1087. if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
  1088. (param != DRV_MB_PARAM_BIST_RC_PASSED))
  1089. rc = -EAGAIN;
  1090. return rc;
  1091. }