qlge_mpi.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include "qlge.h"
  3. int ql_unpause_mpi_risc(struct ql_adapter *qdev)
  4. {
  5. u32 tmp;
  6. /* Un-pause the RISC */
  7. tmp = ql_read32(qdev, CSR);
  8. if (!(tmp & CSR_RP))
  9. return -EIO;
  10. ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
  11. return 0;
  12. }
  13. int ql_pause_mpi_risc(struct ql_adapter *qdev)
  14. {
  15. u32 tmp;
  16. int count = UDELAY_COUNT;
  17. /* Pause the RISC */
  18. ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
  19. do {
  20. tmp = ql_read32(qdev, CSR);
  21. if (tmp & CSR_RP)
  22. break;
  23. mdelay(UDELAY_DELAY);
  24. count--;
  25. } while (count);
  26. return (count == 0) ? -ETIMEDOUT : 0;
  27. }
  28. int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
  29. {
  30. u32 tmp;
  31. int count = UDELAY_COUNT;
  32. /* Reset the RISC */
  33. ql_write32(qdev, CSR, CSR_CMD_SET_RST);
  34. do {
  35. tmp = ql_read32(qdev, CSR);
  36. if (tmp & CSR_RR) {
  37. ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
  38. break;
  39. }
  40. mdelay(UDELAY_DELAY);
  41. count--;
  42. } while (count);
  43. return (count == 0) ? -ETIMEDOUT : 0;
  44. }
  45. int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
  46. {
  47. int status;
  48. /* wait for reg to come ready */
  49. status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
  50. if (status)
  51. goto exit;
  52. /* set up for reg read */
  53. ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R);
  54. /* wait for reg to come ready */
  55. status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
  56. if (status)
  57. goto exit;
  58. /* get the data */
  59. *data = ql_read32(qdev, PROC_DATA);
  60. exit:
  61. return status;
  62. }
  63. int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data)
  64. {
  65. int status = 0;
  66. /* wait for reg to come ready */
  67. status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
  68. if (status)
  69. goto exit;
  70. /* write the data to the data reg */
  71. ql_write32(qdev, PROC_DATA, data);
  72. /* trigger the write */
  73. ql_write32(qdev, PROC_ADDR, reg);
  74. /* wait for reg to come ready */
  75. status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
  76. if (status)
  77. goto exit;
  78. exit:
  79. return status;
  80. }
  81. int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
  82. {
  83. int status;
  84. status = ql_write_mpi_reg(qdev, 0x00001010, 1);
  85. return status;
  86. }
  87. /* Determine if we are in charge of the firwmare. If
  88. * we are the lower of the 2 NIC pcie functions, or if
  89. * we are the higher function and the lower function
  90. * is not enabled.
  91. */
  92. int ql_own_firmware(struct ql_adapter *qdev)
  93. {
  94. u32 temp;
  95. /* If we are the lower of the 2 NIC functions
  96. * on the chip the we are responsible for
  97. * core dump and firmware reset after an error.
  98. */
  99. if (qdev->func < qdev->alt_func)
  100. return 1;
  101. /* If we are the higher of the 2 NIC functions
  102. * on the chip and the lower function is not
  103. * enabled, then we are responsible for
  104. * core dump and firmware reset after an error.
  105. */
  106. temp = ql_read32(qdev, STS);
  107. if (!(temp & (1 << (8 + qdev->alt_func))))
  108. return 1;
  109. return 0;
  110. }
  111. static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
  112. {
  113. int i, status;
  114. status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
  115. if (status)
  116. return -EBUSY;
  117. for (i = 0; i < mbcp->out_count; i++) {
  118. status =
  119. ql_read_mpi_reg(qdev, qdev->mailbox_out + i,
  120. &mbcp->mbox_out[i]);
  121. if (status) {
  122. netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n");
  123. break;
  124. }
  125. }
  126. ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
  127. return status;
  128. }
  129. /* Wait for a single mailbox command to complete.
  130. * Returns zero on success.
  131. */
  132. static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev)
  133. {
  134. int count = 100;
  135. u32 value;
  136. do {
  137. value = ql_read32(qdev, STS);
  138. if (value & STS_PI)
  139. return 0;
  140. mdelay(UDELAY_DELAY); /* 100ms */
  141. } while (--count);
  142. return -ETIMEDOUT;
  143. }
  144. /* Execute a single mailbox command.
  145. * Caller must hold PROC_ADDR semaphore.
  146. */
  147. static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp)
  148. {
  149. int i, status;
  150. /*
  151. * Make sure there's nothing pending.
  152. * This shouldn't happen.
  153. */
  154. if (ql_read32(qdev, CSR) & CSR_HRI)
  155. return -EIO;
  156. status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
  157. if (status)
  158. return status;
  159. /*
  160. * Fill the outbound mailboxes.
  161. */
  162. for (i = 0; i < mbcp->in_count; i++) {
  163. status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i,
  164. mbcp->mbox_in[i]);
  165. if (status)
  166. goto end;
  167. }
  168. /*
  169. * Wake up the MPI firmware.
  170. */
  171. ql_write32(qdev, CSR, CSR_CMD_SET_H2R_INT);
  172. end:
  173. ql_sem_unlock(qdev, SEM_PROC_REG_MASK);
  174. return status;
  175. }
  176. /* We are being asked by firmware to accept
  177. * a change to the port. This is only
  178. * a change to max frame sizes (Tx/Rx), pause
  179. * parameters, or loopback mode. We wake up a worker
  180. * to handler processing this since a mailbox command
  181. * will need to be sent to ACK the request.
  182. */
  183. static int ql_idc_req_aen(struct ql_adapter *qdev)
  184. {
  185. int status;
  186. struct mbox_params *mbcp = &qdev->idc_mbc;
  187. netif_err(qdev, drv, qdev->ndev, "Enter!\n");
  188. /* Get the status data and start up a thread to
  189. * handle the request.
  190. */
  191. mbcp = &qdev->idc_mbc;
  192. mbcp->out_count = 4;
  193. status = ql_get_mb_sts(qdev, mbcp);
  194. if (status) {
  195. netif_err(qdev, drv, qdev->ndev,
  196. "Could not read MPI, resetting ASIC!\n");
  197. ql_queue_asic_error(qdev);
  198. } else {
  199. /* Begin polled mode early so
  200. * we don't get another interrupt
  201. * when we leave mpi_worker.
  202. */
  203. ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
  204. queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0);
  205. }
  206. return status;
  207. }
  208. /* Process an inter-device event completion.
  209. * If good, signal the caller's completion.
  210. */
  211. static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
  212. {
  213. int status;
  214. struct mbox_params *mbcp = &qdev->idc_mbc;
  215. mbcp->out_count = 4;
  216. status = ql_get_mb_sts(qdev, mbcp);
  217. if (status) {
  218. netif_err(qdev, drv, qdev->ndev,
  219. "Could not read MPI, resetting RISC!\n");
  220. ql_queue_fw_error(qdev);
  221. } else
  222. /* Wake up the sleeping mpi_idc_work thread that is
  223. * waiting for this event.
  224. */
  225. complete(&qdev->ide_completion);
  226. return status;
  227. }
  228. static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
  229. {
  230. int status;
  231. mbcp->out_count = 2;
  232. status = ql_get_mb_sts(qdev, mbcp);
  233. if (status) {
  234. netif_err(qdev, drv, qdev->ndev,
  235. "%s: Could not get mailbox status.\n", __func__);
  236. return;
  237. }
  238. qdev->link_status = mbcp->mbox_out[1];
  239. netif_err(qdev, drv, qdev->ndev, "Link Up.\n");
  240. /* If we're coming back from an IDC event
  241. * then set up the CAM and frame routing.
  242. */
  243. if (test_bit(QL_CAM_RT_SET, &qdev->flags)) {
  244. status = ql_cam_route_initialize(qdev);
  245. if (status) {
  246. netif_err(qdev, ifup, qdev->ndev,
  247. "Failed to init CAM/Routing tables.\n");
  248. return;
  249. } else
  250. clear_bit(QL_CAM_RT_SET, &qdev->flags);
  251. }
  252. /* Queue up a worker to check the frame
  253. * size information, and fix it if it's not
  254. * to our liking.
  255. */
  256. if (!test_bit(QL_PORT_CFG, &qdev->flags)) {
  257. netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n");
  258. set_bit(QL_PORT_CFG, &qdev->flags);
  259. /* Begin polled mode early so
  260. * we don't get another interrupt
  261. * when we leave mpi_worker dpc.
  262. */
  263. ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
  264. queue_delayed_work(qdev->workqueue,
  265. &qdev->mpi_port_cfg_work, 0);
  266. }
  267. ql_link_on(qdev);
  268. }
  269. static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
  270. {
  271. int status;
  272. mbcp->out_count = 3;
  273. status = ql_get_mb_sts(qdev, mbcp);
  274. if (status)
  275. netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n");
  276. ql_link_off(qdev);
  277. }
  278. static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
  279. {
  280. int status;
  281. mbcp->out_count = 5;
  282. status = ql_get_mb_sts(qdev, mbcp);
  283. if (status)
  284. netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n");
  285. else
  286. netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n");
  287. return status;
  288. }
  289. static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp)
  290. {
  291. int status;
  292. mbcp->out_count = 1;
  293. status = ql_get_mb_sts(qdev, mbcp);
  294. if (status)
  295. netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n");
  296. else
  297. netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n");
  298. return status;
  299. }
  300. static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
  301. {
  302. int status;
  303. mbcp->out_count = 6;
  304. status = ql_get_mb_sts(qdev, mbcp);
  305. if (status)
  306. netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n");
  307. else {
  308. int i;
  309. netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n");
  310. for (i = 0; i < mbcp->out_count; i++)
  311. netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n",
  312. i, mbcp->mbox_out[i]);
  313. }
  314. return status;
  315. }
  316. static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
  317. {
  318. int status;
  319. mbcp->out_count = 2;
  320. status = ql_get_mb_sts(qdev, mbcp);
  321. if (status) {
  322. netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n");
  323. } else {
  324. netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n",
  325. mbcp->mbox_out[1]);
  326. qdev->fw_rev_id = mbcp->mbox_out[1];
  327. status = ql_cam_route_initialize(qdev);
  328. if (status)
  329. netif_err(qdev, ifup, qdev->ndev,
  330. "Failed to init CAM/Routing tables.\n");
  331. }
  332. }
  333. /* Process an async event and clear it unless it's an
  334. * error condition.
  335. * This can get called iteratively from the mpi_work thread
  336. * when events arrive via an interrupt.
  337. * It also gets called when a mailbox command is polling for
  338. * it's completion. */
  339. static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
  340. {
  341. int status;
  342. int orig_count = mbcp->out_count;
  343. /* Just get mailbox zero for now. */
  344. mbcp->out_count = 1;
  345. status = ql_get_mb_sts(qdev, mbcp);
  346. if (status) {
  347. netif_err(qdev, drv, qdev->ndev,
  348. "Could not read MPI, resetting ASIC!\n");
  349. ql_queue_asic_error(qdev);
  350. goto end;
  351. }
  352. switch (mbcp->mbox_out[0]) {
  353. /* This case is only active when we arrive here
  354. * as a result of issuing a mailbox command to
  355. * the firmware.
  356. */
  357. case MB_CMD_STS_INTRMDT:
  358. case MB_CMD_STS_GOOD:
  359. case MB_CMD_STS_INVLD_CMD:
  360. case MB_CMD_STS_XFC_ERR:
  361. case MB_CMD_STS_CSUM_ERR:
  362. case MB_CMD_STS_ERR:
  363. case MB_CMD_STS_PARAM_ERR:
  364. /* We can only get mailbox status if we're polling from an
  365. * unfinished command. Get the rest of the status data and
  366. * return back to the caller.
  367. * We only end up here when we're polling for a mailbox
  368. * command completion.
  369. */
  370. mbcp->out_count = orig_count;
  371. status = ql_get_mb_sts(qdev, mbcp);
  372. return status;
  373. /* We are being asked by firmware to accept
  374. * a change to the port. This is only
  375. * a change to max frame sizes (Tx/Rx), pause
  376. * parameters, or loopback mode.
  377. */
  378. case AEN_IDC_REQ:
  379. status = ql_idc_req_aen(qdev);
  380. break;
  381. /* Process and inbound IDC event.
  382. * This will happen when we're trying to
  383. * change tx/rx max frame size, change pause
  384. * parameters or loopback mode.
  385. */
  386. case AEN_IDC_CMPLT:
  387. case AEN_IDC_EXT:
  388. status = ql_idc_cmplt_aen(qdev);
  389. break;
  390. case AEN_LINK_UP:
  391. ql_link_up(qdev, mbcp);
  392. break;
  393. case AEN_LINK_DOWN:
  394. ql_link_down(qdev, mbcp);
  395. break;
  396. case AEN_FW_INIT_DONE:
  397. /* If we're in process on executing the firmware,
  398. * then convert the status to normal mailbox status.
  399. */
  400. if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
  401. mbcp->out_count = orig_count;
  402. status = ql_get_mb_sts(qdev, mbcp);
  403. mbcp->mbox_out[0] = MB_CMD_STS_GOOD;
  404. return status;
  405. }
  406. ql_init_fw_done(qdev, mbcp);
  407. break;
  408. case AEN_AEN_SFP_IN:
  409. ql_sfp_in(qdev, mbcp);
  410. break;
  411. case AEN_AEN_SFP_OUT:
  412. ql_sfp_out(qdev, mbcp);
  413. break;
  414. /* This event can arrive at boot time or after an
  415. * MPI reset if the firmware failed to initialize.
  416. */
  417. case AEN_FW_INIT_FAIL:
  418. /* If we're in process on executing the firmware,
  419. * then convert the status to normal mailbox status.
  420. */
  421. if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
  422. mbcp->out_count = orig_count;
  423. status = ql_get_mb_sts(qdev, mbcp);
  424. mbcp->mbox_out[0] = MB_CMD_STS_ERR;
  425. return status;
  426. }
  427. netif_err(qdev, drv, qdev->ndev,
  428. "Firmware initialization failed.\n");
  429. status = -EIO;
  430. ql_queue_fw_error(qdev);
  431. break;
  432. case AEN_SYS_ERR:
  433. netif_err(qdev, drv, qdev->ndev, "System Error.\n");
  434. ql_queue_fw_error(qdev);
  435. status = -EIO;
  436. break;
  437. case AEN_AEN_LOST:
  438. ql_aen_lost(qdev, mbcp);
  439. break;
  440. case AEN_DCBX_CHG:
  441. /* Need to support AEN 8110 */
  442. break;
  443. default:
  444. netif_err(qdev, drv, qdev->ndev,
  445. "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
  446. /* Clear the MPI firmware status. */
  447. }
  448. end:
  449. ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
  450. /* Restore the original mailbox count to
  451. * what the caller asked for. This can get
  452. * changed when a mailbox command is waiting
  453. * for a response and an AEN arrives and
  454. * is handled.
  455. * */
  456. mbcp->out_count = orig_count;
  457. return status;
  458. }
  459. /* Execute a single mailbox command.
  460. * mbcp is a pointer to an array of u32. Each
  461. * element in the array contains the value for it's
  462. * respective mailbox register.
  463. */
  464. static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
  465. {
  466. int status;
  467. unsigned long count;
  468. mutex_lock(&qdev->mpi_mutex);
  469. /* Begin polled mode for MPI */
  470. ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
  471. /* Load the mailbox registers and wake up MPI RISC. */
  472. status = ql_exec_mb_cmd(qdev, mbcp);
  473. if (status)
  474. goto end;
  475. /* If we're generating a system error, then there's nothing
  476. * to wait for.
  477. */
  478. if (mbcp->mbox_in[0] == MB_CMD_MAKE_SYS_ERR)
  479. goto end;
  480. /* Wait for the command to complete. We loop
  481. * here because some AEN might arrive while
  482. * we're waiting for the mailbox command to
  483. * complete. If more than 5 seconds expire we can
  484. * assume something is wrong. */
  485. count = jiffies + HZ * MAILBOX_TIMEOUT;
  486. do {
  487. /* Wait for the interrupt to come in. */
  488. status = ql_wait_mbx_cmd_cmplt(qdev);
  489. if (status)
  490. continue;
  491. /* Process the event. If it's an AEN, it
  492. * will be handled in-line or a worker
  493. * will be spawned. If it's our completion
  494. * we will catch it below.
  495. */
  496. status = ql_mpi_handler(qdev, mbcp);
  497. if (status)
  498. goto end;
  499. /* It's either the completion for our mailbox
  500. * command complete or an AEN. If it's our
  501. * completion then get out.
  502. */
  503. if (((mbcp->mbox_out[0] & 0x0000f000) ==
  504. MB_CMD_STS_GOOD) ||
  505. ((mbcp->mbox_out[0] & 0x0000f000) ==
  506. MB_CMD_STS_INTRMDT))
  507. goto done;
  508. } while (time_before(jiffies, count));
  509. netif_err(qdev, drv, qdev->ndev,
  510. "Timed out waiting for mailbox complete.\n");
  511. status = -ETIMEDOUT;
  512. goto end;
  513. done:
  514. /* Now we can clear the interrupt condition
  515. * and look at our status.
  516. */
  517. ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
  518. if (((mbcp->mbox_out[0] & 0x0000f000) !=
  519. MB_CMD_STS_GOOD) &&
  520. ((mbcp->mbox_out[0] & 0x0000f000) !=
  521. MB_CMD_STS_INTRMDT)) {
  522. status = -EIO;
  523. }
  524. end:
  525. /* End polled mode for MPI */
  526. ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
  527. mutex_unlock(&qdev->mpi_mutex);
  528. return status;
  529. }
  530. /* Get MPI firmware version. This will be used for
  531. * driver banner and for ethtool info.
  532. * Returns zero on success.
  533. */
  534. int ql_mb_about_fw(struct ql_adapter *qdev)
  535. {
  536. struct mbox_params mbc;
  537. struct mbox_params *mbcp = &mbc;
  538. int status = 0;
  539. memset(mbcp, 0, sizeof(struct mbox_params));
  540. mbcp->in_count = 1;
  541. mbcp->out_count = 3;
  542. mbcp->mbox_in[0] = MB_CMD_ABOUT_FW;
  543. status = ql_mailbox_command(qdev, mbcp);
  544. if (status)
  545. return status;
  546. if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
  547. netif_err(qdev, drv, qdev->ndev,
  548. "Failed about firmware command\n");
  549. status = -EIO;
  550. }
  551. /* Store the firmware version */
  552. qdev->fw_rev_id = mbcp->mbox_out[1];
  553. return status;
  554. }
  555. /* Get functional state for MPI firmware.
  556. * Returns zero on success.
  557. */
  558. int ql_mb_get_fw_state(struct ql_adapter *qdev)
  559. {
  560. struct mbox_params mbc;
  561. struct mbox_params *mbcp = &mbc;
  562. int status = 0;
  563. memset(mbcp, 0, sizeof(struct mbox_params));
  564. mbcp->in_count = 1;
  565. mbcp->out_count = 2;
  566. mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE;
  567. status = ql_mailbox_command(qdev, mbcp);
  568. if (status)
  569. return status;
  570. if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
  571. netif_err(qdev, drv, qdev->ndev,
  572. "Failed Get Firmware State.\n");
  573. status = -EIO;
  574. }
  575. /* If bit zero is set in mbx 1 then the firmware is
  576. * running, but not initialized. This should never
  577. * happen.
  578. */
  579. if (mbcp->mbox_out[1] & 1) {
  580. netif_err(qdev, drv, qdev->ndev,
  581. "Firmware waiting for initialization.\n");
  582. status = -EIO;
  583. }
  584. return status;
  585. }
  586. /* Send and ACK mailbox command to the firmware to
  587. * let it continue with the change.
  588. */
  589. static int ql_mb_idc_ack(struct ql_adapter *qdev)
  590. {
  591. struct mbox_params mbc;
  592. struct mbox_params *mbcp = &mbc;
  593. int status = 0;
  594. memset(mbcp, 0, sizeof(struct mbox_params));
  595. mbcp->in_count = 5;
  596. mbcp->out_count = 1;
  597. mbcp->mbox_in[0] = MB_CMD_IDC_ACK;
  598. mbcp->mbox_in[1] = qdev->idc_mbc.mbox_out[1];
  599. mbcp->mbox_in[2] = qdev->idc_mbc.mbox_out[2];
  600. mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3];
  601. mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4];
  602. status = ql_mailbox_command(qdev, mbcp);
  603. if (status)
  604. return status;
  605. if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
  606. netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n");
  607. status = -EIO;
  608. }
  609. return status;
  610. }
  611. /* Get link settings and maximum frame size settings
  612. * for the current port.
  613. * Most likely will block.
  614. */
  615. int ql_mb_set_port_cfg(struct ql_adapter *qdev)
  616. {
  617. struct mbox_params mbc;
  618. struct mbox_params *mbcp = &mbc;
  619. int status = 0;
  620. memset(mbcp, 0, sizeof(struct mbox_params));
  621. mbcp->in_count = 3;
  622. mbcp->out_count = 1;
  623. mbcp->mbox_in[0] = MB_CMD_SET_PORT_CFG;
  624. mbcp->mbox_in[1] = qdev->link_config;
  625. mbcp->mbox_in[2] = qdev->max_frame_size;
  626. status = ql_mailbox_command(qdev, mbcp);
  627. if (status)
  628. return status;
  629. if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) {
  630. netif_err(qdev, drv, qdev->ndev,
  631. "Port Config sent, wait for IDC.\n");
  632. } else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
  633. netif_err(qdev, drv, qdev->ndev,
  634. "Failed Set Port Configuration.\n");
  635. status = -EIO;
  636. }
  637. return status;
  638. }
  639. static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
  640. u32 size)
  641. {
  642. int status = 0;
  643. struct mbox_params mbc;
  644. struct mbox_params *mbcp = &mbc;
  645. memset(mbcp, 0, sizeof(struct mbox_params));
  646. mbcp->in_count = 9;
  647. mbcp->out_count = 1;
  648. mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM;
  649. mbcp->mbox_in[1] = LSW(addr);
  650. mbcp->mbox_in[2] = MSW(req_dma);
  651. mbcp->mbox_in[3] = LSW(req_dma);
  652. mbcp->mbox_in[4] = MSW(size);
  653. mbcp->mbox_in[5] = LSW(size);
  654. mbcp->mbox_in[6] = MSW(MSD(req_dma));
  655. mbcp->mbox_in[7] = LSW(MSD(req_dma));
  656. mbcp->mbox_in[8] = MSW(addr);
  657. status = ql_mailbox_command(qdev, mbcp);
  658. if (status)
  659. return status;
  660. if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
  661. netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n");
  662. status = -EIO;
  663. }
  664. return status;
  665. }
  666. /* Issue a mailbox command to dump RISC RAM. */
  667. int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
  668. u32 ram_addr, int word_count)
  669. {
  670. int status;
  671. char *my_buf;
  672. dma_addr_t buf_dma;
  673. my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32),
  674. &buf_dma);
  675. if (!my_buf)
  676. return -EIO;
  677. status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
  678. if (!status)
  679. memcpy(buf, my_buf, word_count * sizeof(u32));
  680. pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf,
  681. buf_dma);
  682. return status;
  683. }
  684. /* Get link settings and maximum frame size settings
  685. * for the current port.
  686. * Most likely will block.
  687. */
  688. int ql_mb_get_port_cfg(struct ql_adapter *qdev)
  689. {
  690. struct mbox_params mbc;
  691. struct mbox_params *mbcp = &mbc;
  692. int status = 0;
  693. memset(mbcp, 0, sizeof(struct mbox_params));
  694. mbcp->in_count = 1;
  695. mbcp->out_count = 3;
  696. mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG;
  697. status = ql_mailbox_command(qdev, mbcp);
  698. if (status)
  699. return status;
  700. if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
  701. netif_err(qdev, drv, qdev->ndev,
  702. "Failed Get Port Configuration.\n");
  703. status = -EIO;
  704. } else {
  705. netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
  706. "Passed Get Port Configuration.\n");
  707. qdev->link_config = mbcp->mbox_out[1];
  708. qdev->max_frame_size = mbcp->mbox_out[2];
  709. }
  710. return status;
  711. }
  712. int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
  713. {
  714. struct mbox_params mbc;
  715. struct mbox_params *mbcp = &mbc;
  716. int status;
  717. memset(mbcp, 0, sizeof(struct mbox_params));
  718. mbcp->in_count = 2;
  719. mbcp->out_count = 1;
  720. mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
  721. mbcp->mbox_in[1] = wol;
  722. status = ql_mailbox_command(qdev, mbcp);
  723. if (status)
  724. return status;
  725. if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
  726. netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
  727. status = -EIO;
  728. }
  729. return status;
  730. }
  731. int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
  732. {
  733. struct mbox_params mbc;
  734. struct mbox_params *mbcp = &mbc;
  735. int status;
  736. u8 *addr = qdev->ndev->dev_addr;
  737. memset(mbcp, 0, sizeof(struct mbox_params));
  738. mbcp->in_count = 8;
  739. mbcp->out_count = 1;
  740. mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC;
  741. if (enable_wol) {
  742. mbcp->mbox_in[1] = (u32)addr[0];
  743. mbcp->mbox_in[2] = (u32)addr[1];
  744. mbcp->mbox_in[3] = (u32)addr[2];
  745. mbcp->mbox_in[4] = (u32)addr[3];
  746. mbcp->mbox_in[5] = (u32)addr[4];
  747. mbcp->mbox_in[6] = (u32)addr[5];
  748. mbcp->mbox_in[7] = 0;
  749. } else {
  750. mbcp->mbox_in[1] = 0;
  751. mbcp->mbox_in[2] = 1;
  752. mbcp->mbox_in[3] = 1;
  753. mbcp->mbox_in[4] = 1;
  754. mbcp->mbox_in[5] = 1;
  755. mbcp->mbox_in[6] = 1;
  756. mbcp->mbox_in[7] = 0;
  757. }
  758. status = ql_mailbox_command(qdev, mbcp);
  759. if (status)
  760. return status;
  761. if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
  762. netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
  763. status = -EIO;
  764. }
  765. return status;
  766. }
  767. /* IDC - Inter Device Communication...
  768. * Some firmware commands require consent of adjacent FCOE
  769. * function. This function waits for the OK, or a
  770. * counter-request for a little more time.i
  771. * The firmware will complete the request if the other
  772. * function doesn't respond.
  773. */
  774. static int ql_idc_wait(struct ql_adapter *qdev)
  775. {
  776. int status = -ETIMEDOUT;
  777. long wait_time = 1 * HZ;
  778. struct mbox_params *mbcp = &qdev->idc_mbc;
  779. do {
  780. /* Wait here for the command to complete
  781. * via the IDC process.
  782. */
  783. wait_time =
  784. wait_for_completion_timeout(&qdev->ide_completion,
  785. wait_time);
  786. if (!wait_time) {
  787. netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n");
  788. break;
  789. }
  790. /* Now examine the response from the IDC process.
  791. * We might have a good completion or a request for
  792. * more wait time.
  793. */
  794. if (mbcp->mbox_out[0] == AEN_IDC_EXT) {
  795. netif_err(qdev, drv, qdev->ndev,
  796. "IDC Time Extension from function.\n");
  797. wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f;
  798. } else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) {
  799. netif_err(qdev, drv, qdev->ndev, "IDC Success.\n");
  800. status = 0;
  801. break;
  802. } else {
  803. netif_err(qdev, drv, qdev->ndev,
  804. "IDC: Invalid State 0x%.04x.\n",
  805. mbcp->mbox_out[0]);
  806. status = -EIO;
  807. break;
  808. }
  809. } while (wait_time);
  810. return status;
  811. }
  812. int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
  813. {
  814. struct mbox_params mbc;
  815. struct mbox_params *mbcp = &mbc;
  816. int status;
  817. memset(mbcp, 0, sizeof(struct mbox_params));
  818. mbcp->in_count = 2;
  819. mbcp->out_count = 1;
  820. mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
  821. mbcp->mbox_in[1] = led_config;
  822. status = ql_mailbox_command(qdev, mbcp);
  823. if (status)
  824. return status;
  825. if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
  826. netif_err(qdev, drv, qdev->ndev,
  827. "Failed to set LED Configuration.\n");
  828. status = -EIO;
  829. }
  830. return status;
  831. }
  832. int ql_mb_get_led_cfg(struct ql_adapter *qdev)
  833. {
  834. struct mbox_params mbc;
  835. struct mbox_params *mbcp = &mbc;
  836. int status;
  837. memset(mbcp, 0, sizeof(struct mbox_params));
  838. mbcp->in_count = 1;
  839. mbcp->out_count = 2;
  840. mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG;
  841. status = ql_mailbox_command(qdev, mbcp);
  842. if (status)
  843. return status;
  844. if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
  845. netif_err(qdev, drv, qdev->ndev,
  846. "Failed to get LED Configuration.\n");
  847. status = -EIO;
  848. } else
  849. qdev->led_config = mbcp->mbox_out[1];
  850. return status;
  851. }
  852. int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
  853. {
  854. struct mbox_params mbc;
  855. struct mbox_params *mbcp = &mbc;
  856. int status;
  857. memset(mbcp, 0, sizeof(struct mbox_params));
  858. mbcp->in_count = 1;
  859. mbcp->out_count = 2;
  860. mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL;
  861. mbcp->mbox_in[1] = control;
  862. status = ql_mailbox_command(qdev, mbcp);
  863. if (status)
  864. return status;
  865. if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD)
  866. return status;
  867. if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
  868. netif_err(qdev, drv, qdev->ndev,
  869. "Command not supported by firmware.\n");
  870. status = -EINVAL;
  871. } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
  872. /* This indicates that the firmware is
  873. * already in the state we are trying to
  874. * change it to.
  875. */
  876. netif_err(qdev, drv, qdev->ndev,
  877. "Command parameters make no change.\n");
  878. }
  879. return status;
  880. }
  881. /* Returns a negative error code or the mailbox command status. */
  882. static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
  883. {
  884. struct mbox_params mbc;
  885. struct mbox_params *mbcp = &mbc;
  886. int status;
  887. memset(mbcp, 0, sizeof(struct mbox_params));
  888. *control = 0;
  889. mbcp->in_count = 1;
  890. mbcp->out_count = 1;
  891. mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL;
  892. status = ql_mailbox_command(qdev, mbcp);
  893. if (status)
  894. return status;
  895. if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) {
  896. *control = mbcp->mbox_in[1];
  897. return status;
  898. }
  899. if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
  900. netif_err(qdev, drv, qdev->ndev,
  901. "Command not supported by firmware.\n");
  902. status = -EINVAL;
  903. } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
  904. netif_err(qdev, drv, qdev->ndev,
  905. "Failed to get MPI traffic control.\n");
  906. status = -EIO;
  907. }
  908. return status;
  909. }
  910. int ql_wait_fifo_empty(struct ql_adapter *qdev)
  911. {
  912. int count = 5;
  913. u32 mgmnt_fifo_empty;
  914. u32 nic_fifo_empty;
  915. do {
  916. nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE;
  917. ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty);
  918. mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY;
  919. if (nic_fifo_empty && mgmnt_fifo_empty)
  920. return 0;
  921. msleep(100);
  922. } while (count-- > 0);
  923. return -ETIMEDOUT;
  924. }
  925. /* API called in work thread context to set new TX/RX
  926. * maximum frame size values to match MTU.
  927. */
  928. static int ql_set_port_cfg(struct ql_adapter *qdev)
  929. {
  930. int status;
  931. status = ql_mb_set_port_cfg(qdev);
  932. if (status)
  933. return status;
  934. status = ql_idc_wait(qdev);
  935. return status;
  936. }
  937. /* The following routines are worker threads that process
  938. * events that may sleep waiting for completion.
  939. */
  940. /* This thread gets the maximum TX and RX frame size values
  941. * from the firmware and, if necessary, changes them to match
  942. * the MTU setting.
  943. */
  944. void ql_mpi_port_cfg_work(struct work_struct *work)
  945. {
  946. struct ql_adapter *qdev =
  947. container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
  948. int status;
  949. status = ql_mb_get_port_cfg(qdev);
  950. if (status) {
  951. netif_err(qdev, drv, qdev->ndev,
  952. "Bug: Failed to get port config data.\n");
  953. goto err;
  954. }
  955. if (qdev->link_config & CFG_JUMBO_FRAME_SIZE &&
  956. qdev->max_frame_size ==
  957. CFG_DEFAULT_MAX_FRAME_SIZE)
  958. goto end;
  959. qdev->link_config |= CFG_JUMBO_FRAME_SIZE;
  960. qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE;
  961. status = ql_set_port_cfg(qdev);
  962. if (status) {
  963. netif_err(qdev, drv, qdev->ndev,
  964. "Bug: Failed to set port config data.\n");
  965. goto err;
  966. }
  967. end:
  968. clear_bit(QL_PORT_CFG, &qdev->flags);
  969. return;
  970. err:
  971. ql_queue_fw_error(qdev);
  972. goto end;
  973. }
  974. /* Process an inter-device request. This is issues by
  975. * the firmware in response to another function requesting
  976. * a change to the port. We set a flag to indicate a change
  977. * has been made and then send a mailbox command ACKing
  978. * the change request.
  979. */
  980. void ql_mpi_idc_work(struct work_struct *work)
  981. {
  982. struct ql_adapter *qdev =
  983. container_of(work, struct ql_adapter, mpi_idc_work.work);
  984. int status;
  985. struct mbox_params *mbcp = &qdev->idc_mbc;
  986. u32 aen;
  987. int timeout;
  988. aen = mbcp->mbox_out[1] >> 16;
  989. timeout = (mbcp->mbox_out[1] >> 8) & 0xf;
  990. switch (aen) {
  991. default:
  992. netif_err(qdev, drv, qdev->ndev,
  993. "Bug: Unhandled IDC action.\n");
  994. break;
  995. case MB_CMD_PORT_RESET:
  996. case MB_CMD_STOP_FW:
  997. ql_link_off(qdev);
  998. case MB_CMD_SET_PORT_CFG:
  999. /* Signal the resulting link up AEN
  1000. * that the frame routing and mac addr
  1001. * needs to be set.
  1002. * */
  1003. set_bit(QL_CAM_RT_SET, &qdev->flags);
  1004. /* Do ACK if required */
  1005. if (timeout) {
  1006. status = ql_mb_idc_ack(qdev);
  1007. if (status)
  1008. netif_err(qdev, drv, qdev->ndev,
  1009. "Bug: No pending IDC!\n");
  1010. } else {
  1011. netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
  1012. "IDC ACK not required\n");
  1013. status = 0; /* success */
  1014. }
  1015. break;
  1016. /* These sub-commands issued by another (FCoE)
  1017. * function are requesting to do an operation
  1018. * on the shared resource (MPI environment).
  1019. * We currently don't issue these so we just
  1020. * ACK the request.
  1021. */
  1022. case MB_CMD_IOP_RESTART_MPI:
  1023. case MB_CMD_IOP_PREP_LINK_DOWN:
  1024. /* Drop the link, reload the routing
  1025. * table when link comes up.
  1026. */
  1027. ql_link_off(qdev);
  1028. set_bit(QL_CAM_RT_SET, &qdev->flags);
  1029. /* Fall through. */
  1030. case MB_CMD_IOP_DVR_START:
  1031. case MB_CMD_IOP_FLASH_ACC:
  1032. case MB_CMD_IOP_CORE_DUMP_MPI:
  1033. case MB_CMD_IOP_PREP_UPDATE_MPI:
  1034. case MB_CMD_IOP_COMP_UPDATE_MPI:
  1035. case MB_CMD_IOP_NONE: /* an IDC without params */
  1036. /* Do ACK if required */
  1037. if (timeout) {
  1038. status = ql_mb_idc_ack(qdev);
  1039. if (status)
  1040. netif_err(qdev, drv, qdev->ndev,
  1041. "Bug: No pending IDC!\n");
  1042. } else {
  1043. netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
  1044. "IDC ACK not required\n");
  1045. status = 0; /* success */
  1046. }
  1047. break;
  1048. }
  1049. }
  1050. void ql_mpi_work(struct work_struct *work)
  1051. {
  1052. struct ql_adapter *qdev =
  1053. container_of(work, struct ql_adapter, mpi_work.work);
  1054. struct mbox_params mbc;
  1055. struct mbox_params *mbcp = &mbc;
  1056. int err = 0;
  1057. mutex_lock(&qdev->mpi_mutex);
  1058. /* Begin polled mode for MPI */
  1059. ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
  1060. while (ql_read32(qdev, STS) & STS_PI) {
  1061. memset(mbcp, 0, sizeof(struct mbox_params));
  1062. mbcp->out_count = 1;
  1063. /* Don't continue if an async event
  1064. * did not complete properly.
  1065. */
  1066. err = ql_mpi_handler(qdev, mbcp);
  1067. if (err)
  1068. break;
  1069. }
  1070. /* End polled mode for MPI */
  1071. ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
  1072. mutex_unlock(&qdev->mpi_mutex);
  1073. ql_enable_completion_interrupt(qdev, 0);
  1074. }
  1075. void ql_mpi_reset_work(struct work_struct *work)
  1076. {
  1077. struct ql_adapter *qdev =
  1078. container_of(work, struct ql_adapter, mpi_reset_work.work);
  1079. cancel_delayed_work_sync(&qdev->mpi_work);
  1080. cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
  1081. cancel_delayed_work_sync(&qdev->mpi_idc_work);
  1082. /* If we're not the dominant NIC function,
  1083. * then there is nothing to do.
  1084. */
  1085. if (!ql_own_firmware(qdev)) {
  1086. netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
  1087. return;
  1088. }
  1089. if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) {
  1090. netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
  1091. qdev->core_is_dumped = 1;
  1092. queue_delayed_work(qdev->workqueue,
  1093. &qdev->mpi_core_to_log, 5 * HZ);
  1094. }
  1095. ql_soft_reset_mpi_risc(qdev);
  1096. }