bfa_ioc_ct.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944
  1. /*
  2. * Linux network driver for QLogic BR-series Converged Network Adapter.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License (GPL) Version 2 as
  6. * published by the Free Software Foundation
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. /*
  14. * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  15. * Copyright (c) 2014-2015 QLogic Corporation
  16. * All rights reserved
  17. * www.qlogic.com
  18. */
  19. #include "bfa_ioc.h"
  20. #include "cna.h"
  21. #include "bfi.h"
  22. #include "bfi_reg.h"
  23. #include "bfa_defs.h"
  24. #define bfa_ioc_ct_sync_pos(__ioc) BIT(bfa_ioc_pcifn(__ioc))
  25. #define BFA_IOC_SYNC_REQD_SH 16
  26. #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
  27. #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
  28. #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
  29. #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
  30. (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
  31. /*
  32. * forward declarations
  33. */
  34. static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
  35. static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
  36. static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
  37. static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc);
  38. static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
  39. static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc);
  40. static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
  41. static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
  42. static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
  43. static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
  44. static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
  45. static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
  46. static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
  47. static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
  48. static void bfa_ioc_ct_set_cur_ioc_fwstate(
  49. struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
  50. static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc);
  51. static void bfa_ioc_ct_set_alt_ioc_fwstate(
  52. struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
  53. static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc);
  54. static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
  55. enum bfi_asic_mode asic_mode);
  56. static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
  57. enum bfi_asic_mode asic_mode);
  58. static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc);
  59. static const struct bfa_ioc_hwif nw_hwif_ct = {
  60. .ioc_pll_init = bfa_ioc_ct_pll_init,
  61. .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
  62. .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
  63. .ioc_reg_init = bfa_ioc_ct_reg_init,
  64. .ioc_map_port = bfa_ioc_ct_map_port,
  65. .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
  66. .ioc_notify_fail = bfa_ioc_ct_notify_fail,
  67. .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
  68. .ioc_sync_start = bfa_ioc_ct_sync_start,
  69. .ioc_sync_join = bfa_ioc_ct_sync_join,
  70. .ioc_sync_leave = bfa_ioc_ct_sync_leave,
  71. .ioc_sync_ack = bfa_ioc_ct_sync_ack,
  72. .ioc_sync_complete = bfa_ioc_ct_sync_complete,
  73. .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate,
  74. .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate,
  75. .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate,
  76. .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate,
  77. };
  78. static const struct bfa_ioc_hwif nw_hwif_ct2 = {
  79. .ioc_pll_init = bfa_ioc_ct2_pll_init,
  80. .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
  81. .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
  82. .ioc_reg_init = bfa_ioc_ct2_reg_init,
  83. .ioc_map_port = bfa_ioc_ct2_map_port,
  84. .ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat,
  85. .ioc_isr_mode_set = NULL,
  86. .ioc_notify_fail = bfa_ioc_ct_notify_fail,
  87. .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
  88. .ioc_sync_start = bfa_ioc_ct_sync_start,
  89. .ioc_sync_join = bfa_ioc_ct_sync_join,
  90. .ioc_sync_leave = bfa_ioc_ct_sync_leave,
  91. .ioc_sync_ack = bfa_ioc_ct_sync_ack,
  92. .ioc_sync_complete = bfa_ioc_ct_sync_complete,
  93. .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate,
  94. .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate,
  95. .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate,
  96. .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate,
  97. };
  98. /* Called from bfa_ioc_attach() to map asic specific calls. */
  99. void
  100. bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
  101. {
  102. ioc->ioc_hwif = &nw_hwif_ct;
  103. }
  104. void
  105. bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc)
  106. {
  107. ioc->ioc_hwif = &nw_hwif_ct2;
  108. }
  109. /* Return true if firmware of current driver matches the running firmware. */
  110. static bool
  111. bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
  112. {
  113. enum bfi_ioc_state ioc_fwstate;
  114. u32 usecnt;
  115. struct bfi_ioc_image_hdr fwhdr;
  116. /**
  117. * If bios boot (flash based) -- do not increment usage count
  118. */
  119. if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
  120. BFA_IOC_FWIMG_MINSZ)
  121. return true;
  122. bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  123. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  124. /**
  125. * If usage count is 0, always return TRUE.
  126. */
  127. if (usecnt == 0) {
  128. writel(1, ioc->ioc_regs.ioc_usage_reg);
  129. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  130. writel(0, ioc->ioc_regs.ioc_fail_sync);
  131. return true;
  132. }
  133. ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  134. /**
  135. * Use count cannot be non-zero and chip in uninitialized state.
  136. */
  137. BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
  138. /**
  139. * Check if another driver with a different firmware is active
  140. */
  141. bfa_nw_ioc_fwver_get(ioc, &fwhdr);
  142. if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
  143. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  144. return false;
  145. }
  146. /**
  147. * Same firmware version. Increment the reference count.
  148. */
  149. usecnt++;
  150. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  151. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  152. return true;
  153. }
  154. static void
  155. bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
  156. {
  157. u32 usecnt;
  158. /**
  159. * If bios boot (flash based) -- do not decrement usage count
  160. */
  161. if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
  162. BFA_IOC_FWIMG_MINSZ)
  163. return;
  164. /**
  165. * decrement usage count
  166. */
  167. bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  168. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  169. BUG_ON(!(usecnt > 0));
  170. usecnt--;
  171. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  172. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  173. }
  174. /* Notify other functions on HB failure. */
  175. static void
  176. bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
  177. {
  178. writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
  179. writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
  180. /* Wait for halt to take effect */
  181. readl(ioc->ioc_regs.ll_halt);
  182. readl(ioc->ioc_regs.alt_ll_halt);
  183. }
  184. /* Host to LPU mailbox message addresses */
  185. static const struct {
  186. u32 hfn_mbox;
  187. u32 lpu_mbox;
  188. u32 hfn_pgn;
  189. } ct_fnreg[] = {
  190. { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
  191. { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
  192. { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
  193. { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
  194. };
  195. /* Host <-> LPU mailbox command/status registers - port 0 */
  196. static const struct {
  197. u32 hfn;
  198. u32 lpu;
  199. } ct_p0reg[] = {
  200. { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
  201. { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
  202. { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
  203. { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
  204. };
  205. /* Host <-> LPU mailbox command/status registers - port 1 */
  206. static const struct {
  207. u32 hfn;
  208. u32 lpu;
  209. } ct_p1reg[] = {
  210. { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
  211. { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
  212. { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
  213. { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
  214. };
  215. static const struct {
  216. u32 hfn_mbox;
  217. u32 lpu_mbox;
  218. u32 hfn_pgn;
  219. u32 hfn;
  220. u32 lpu;
  221. u32 lpu_read;
  222. } ct2_reg[] = {
  223. { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
  224. CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
  225. CT2_HOSTFN_LPU0_READ_STAT},
  226. { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
  227. CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
  228. CT2_HOSTFN_LPU1_READ_STAT},
  229. };
  230. static void
  231. bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
  232. {
  233. void __iomem *rb;
  234. int pcifn = bfa_ioc_pcifn(ioc);
  235. rb = bfa_ioc_bar0(ioc);
  236. ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
  237. ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
  238. ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
  239. if (ioc->port_id == 0) {
  240. ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
  241. ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  242. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
  243. ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
  244. ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
  245. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
  246. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
  247. } else {
  248. ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG;
  249. ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG;
  250. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  251. ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
  252. ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
  253. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
  254. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
  255. }
  256. /*
  257. * PSS control registers
  258. */
  259. ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
  260. ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
  261. ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG;
  262. ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG;
  263. /*
  264. * IOC semaphore registers and serialization
  265. */
  266. ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG;
  267. ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG;
  268. ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG;
  269. ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT;
  270. ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC;
  271. /**
  272. * sram memory access
  273. */
  274. ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
  275. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
  276. /*
  277. * err set reg : for notification of hb failure in fcmode
  278. */
  279. ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
  280. }
  281. static void
  282. bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc)
  283. {
  284. void __iomem *rb;
  285. int port = bfa_ioc_portid(ioc);
  286. rb = bfa_ioc_bar0(ioc);
  287. ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
  288. ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
  289. ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
  290. ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
  291. ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
  292. ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
  293. if (port == 0) {
  294. ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
  295. ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
  296. ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
  297. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
  298. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
  299. } else {
  300. ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG;
  301. ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
  302. ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
  303. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
  304. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
  305. }
  306. /*
  307. * PSS control registers
  308. */
  309. ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
  310. ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
  311. ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG;
  312. ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG;
  313. /*
  314. * IOC semaphore registers and serialization
  315. */
  316. ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG;
  317. ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG;
  318. ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG;
  319. ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT;
  320. ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC;
  321. /**
  322. * sram memory access
  323. */
  324. ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
  325. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
  326. /*
  327. * err set reg : for notification of hb failure in fcmode
  328. */
  329. ioc->ioc_regs.err_set = rb + ERR_SET_REG;
  330. }
  331. /* Initialize IOC to port mapping. */
  332. #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
  333. static void
  334. bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
  335. {
  336. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  337. u32 r32;
  338. /**
  339. * For catapult, base port id on personality register and IOC type
  340. */
  341. r32 = readl(rb + FNC_PERS_REG);
  342. r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
  343. ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
  344. }
  345. static void
  346. bfa_ioc_ct2_map_port(struct bfa_ioc *ioc)
  347. {
  348. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  349. u32 r32;
  350. r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
  351. ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
  352. }
  353. /* Set interrupt mode for a function: INTX or MSIX */
  354. static void
  355. bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
  356. {
  357. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  358. u32 r32, mode;
  359. r32 = readl(rb + FNC_PERS_REG);
  360. mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
  361. __F0_INTX_STATUS;
  362. /**
  363. * If already in desired mode, do not change anything
  364. */
  365. if ((!msix && mode) || (msix && !mode))
  366. return;
  367. if (msix)
  368. mode = __F0_INTX_STATUS_MSIX;
  369. else
  370. mode = __F0_INTX_STATUS_INTA;
  371. r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  372. r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  373. writel(r32, rb + FNC_PERS_REG);
  374. }
  375. static bool
  376. bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc)
  377. {
  378. u32 r32;
  379. r32 = readl(ioc->ioc_regs.lpu_read_stat);
  380. if (r32) {
  381. writel(1, ioc->ioc_regs.lpu_read_stat);
  382. return true;
  383. }
  384. return false;
  385. }
  386. /* MSI-X resource allocation for 1860 with no asic block */
  387. #define HOSTFN_MSIX_DEFAULT 64
  388. #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
  389. #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
  390. #define __MSIX_VT_NUMVT__MK 0x003ff800
  391. #define __MSIX_VT_NUMVT__SH 11
  392. #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
  393. #define __MSIX_VT_OFST_ 0x000007ff
  394. void
  395. bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc)
  396. {
  397. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  398. u32 r32;
  399. r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
  400. if (r32 & __MSIX_VT_NUMVT__MK) {
  401. writel(r32 & __MSIX_VT_OFST_,
  402. rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
  403. return;
  404. }
  405. writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
  406. HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
  407. rb + HOSTFN_MSIX_VT_OFST_NUMVT);
  408. writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
  409. rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
  410. }
  411. /* Cleanup hw semaphore and usecnt registers */
  412. static void
  413. bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
  414. {
  415. bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  416. writel(0, ioc->ioc_regs.ioc_usage_reg);
  417. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  418. /*
  419. * Read the hw sem reg to make sure that it is locked
  420. * before we clear it. If it is not locked, writing 1
  421. * will lock it instead of clearing it.
  422. */
  423. readl(ioc->ioc_regs.ioc_sem_reg);
  424. bfa_nw_ioc_hw_sem_release(ioc);
  425. }
  426. /* Synchronized IOC failure processing routines */
  427. static bool
  428. bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
  429. {
  430. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  431. u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
  432. /*
  433. * Driver load time. If the sync required bit for this PCI fn
  434. * is set, it is due to an unclean exit by the driver for this
  435. * PCI fn in the previous incarnation. Whoever comes here first
  436. * should clean it up, no matter which PCI fn.
  437. */
  438. if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
  439. writel(0, ioc->ioc_regs.ioc_fail_sync);
  440. writel(1, ioc->ioc_regs.ioc_usage_reg);
  441. writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
  442. writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
  443. return true;
  444. }
  445. return bfa_ioc_ct_sync_complete(ioc);
  446. }
  447. /* Synchronized IOC failure processing routines */
  448. static void
  449. bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
  450. {
  451. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  452. u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
  453. writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
  454. }
  455. static void
  456. bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
  457. {
  458. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  459. u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
  460. bfa_ioc_ct_sync_pos(ioc);
  461. writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
  462. }
  463. static void
  464. bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
  465. {
  466. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  467. writel(r32 | bfa_ioc_ct_sync_pos(ioc), ioc->ioc_regs.ioc_fail_sync);
  468. }
  469. static bool
  470. bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
  471. {
  472. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  473. u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
  474. u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
  475. u32 tmp_ackd;
  476. if (sync_ackd == 0)
  477. return true;
  478. /**
  479. * The check below is to see whether any other PCI fn
  480. * has reinitialized the ASIC (reset sync_ackd bits)
  481. * and failed again while this IOC was waiting for hw
  482. * semaphore (in bfa_iocpf_sm_semwait()).
  483. */
  484. tmp_ackd = sync_ackd;
  485. if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
  486. !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
  487. sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
  488. if (sync_reqd == sync_ackd) {
  489. writel(bfa_ioc_ct_clear_sync_ackd(r32),
  490. ioc->ioc_regs.ioc_fail_sync);
  491. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  492. writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
  493. return true;
  494. }
  495. /**
  496. * If another PCI fn reinitialized and failed again while
  497. * this IOC was waiting for hw sem, the sync_ackd bit for
  498. * this IOC need to be set again to allow reinitialization.
  499. */
  500. if (tmp_ackd != sync_ackd)
  501. writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
  502. return false;
  503. }
  504. static void
  505. bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc *ioc,
  506. enum bfi_ioc_state fwstate)
  507. {
  508. writel(fwstate, ioc->ioc_regs.ioc_fwstate);
  509. }
  510. static enum bfi_ioc_state
  511. bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc)
  512. {
  513. return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
  514. }
  515. static void
  516. bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc *ioc,
  517. enum bfi_ioc_state fwstate)
  518. {
  519. writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
  520. }
  521. static enum bfi_ioc_state
  522. bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc)
  523. {
  524. return (enum bfi_ioc_state)readl(ioc->ioc_regs.alt_ioc_fwstate);
  525. }
  526. static enum bfa_status
  527. bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
  528. {
  529. u32 pll_sclk, pll_fclk, r32;
  530. bool fcmode = (asic_mode == BFI_ASIC_MODE_FC);
  531. pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
  532. __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
  533. __APP_PLL_SCLK_JITLMT0_1(3U) |
  534. __APP_PLL_SCLK_CNTLMT0_1(1U);
  535. pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
  536. __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
  537. __APP_PLL_LCLK_JITLMT0_1(3U) |
  538. __APP_PLL_LCLK_CNTLMT0_1(1U);
  539. if (fcmode) {
  540. writel(0, (rb + OP_MODE));
  541. writel(__APP_EMS_CMLCKSEL |
  542. __APP_EMS_REFCKBUFEN2 |
  543. __APP_EMS_CHANNEL_SEL,
  544. (rb + ETH_MAC_SER_REG));
  545. } else {
  546. writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
  547. writel(__APP_EMS_REFCKBUFEN1,
  548. (rb + ETH_MAC_SER_REG));
  549. }
  550. writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
  551. writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
  552. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  553. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  554. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  555. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  556. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  557. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  558. writel(pll_sclk |
  559. __APP_PLL_SCLK_LOGIC_SOFT_RESET,
  560. rb + APP_PLL_SCLK_CTL_REG);
  561. writel(pll_fclk |
  562. __APP_PLL_LCLK_LOGIC_SOFT_RESET,
  563. rb + APP_PLL_LCLK_CTL_REG);
  564. writel(pll_sclk |
  565. __APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE,
  566. rb + APP_PLL_SCLK_CTL_REG);
  567. writel(pll_fclk |
  568. __APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE,
  569. rb + APP_PLL_LCLK_CTL_REG);
  570. readl(rb + HOSTFN0_INT_MSK);
  571. udelay(2000);
  572. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  573. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  574. writel(pll_sclk |
  575. __APP_PLL_SCLK_ENABLE,
  576. rb + APP_PLL_SCLK_CTL_REG);
  577. writel(pll_fclk |
  578. __APP_PLL_LCLK_ENABLE,
  579. rb + APP_PLL_LCLK_CTL_REG);
  580. if (!fcmode) {
  581. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
  582. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
  583. }
  584. r32 = readl(rb + PSS_CTL_REG);
  585. r32 &= ~__PSS_LMEM_RESET;
  586. writel(r32, (rb + PSS_CTL_REG));
  587. udelay(1000);
  588. if (!fcmode) {
  589. writel(0, (rb + PMM_1T_RESET_REG_P0));
  590. writel(0, (rb + PMM_1T_RESET_REG_P1));
  591. }
  592. writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
  593. udelay(1000);
  594. r32 = readl(rb + MBIST_STAT_REG);
  595. writel(0, (rb + MBIST_CTL_REG));
  596. return BFA_STATUS_OK;
  597. }
  598. static void
  599. bfa_ioc_ct2_sclk_init(void __iomem *rb)
  600. {
  601. u32 r32;
  602. /*
  603. * put s_clk PLL and PLL FSM in reset
  604. */
  605. r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
  606. r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
  607. r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
  608. __APP_PLL_SCLK_LOGIC_SOFT_RESET);
  609. writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  610. /*
  611. * Ignore mode and program for the max clock (which is FC16)
  612. * Firmware/NFC will do the PLL init appropriately
  613. */
  614. r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
  615. r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
  616. writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  617. /*
  618. * while doing PLL init dont clock gate ethernet subsystem
  619. */
  620. r32 = readl(rb + CT2_CHIP_MISC_PRG);
  621. writel(r32 | __ETH_CLK_ENABLE_PORT0,
  622. rb + CT2_CHIP_MISC_PRG);
  623. r32 = readl(rb + CT2_PCIE_MISC_REG);
  624. writel(r32 | __ETH_CLK_ENABLE_PORT1,
  625. rb + CT2_PCIE_MISC_REG);
  626. /*
  627. * set sclk value
  628. */
  629. r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
  630. r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
  631. __APP_PLL_SCLK_CLK_DIV2);
  632. writel(r32 | 0x1061731b, rb + CT2_APP_PLL_SCLK_CTL_REG);
  633. /*
  634. * poll for s_clk lock or delay 1ms
  635. */
  636. udelay(1000);
  637. /*
  638. * Dont do clock gating for ethernet subsystem, firmware/NFC will
  639. * do this appropriately
  640. */
  641. }
  642. static void
  643. bfa_ioc_ct2_lclk_init(void __iomem *rb)
  644. {
  645. u32 r32;
  646. /*
  647. * put l_clk PLL and PLL FSM in reset
  648. */
  649. r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
  650. r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
  651. r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
  652. __APP_PLL_LCLK_LOGIC_SOFT_RESET);
  653. writel(r32, rb + CT2_APP_PLL_LCLK_CTL_REG);
  654. /*
  655. * set LPU speed (set for FC16 which will work for other modes)
  656. */
  657. r32 = readl(rb + CT2_CHIP_MISC_PRG);
  658. writel(r32, (rb + CT2_CHIP_MISC_PRG));
  659. /*
  660. * set LPU half speed (set for FC16 which will work for other modes)
  661. */
  662. r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
  663. writel(r32, rb + CT2_APP_PLL_LCLK_CTL_REG);
  664. /*
  665. * set lclk for mode (set for FC16)
  666. */
  667. r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
  668. r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
  669. r32 |= 0x20c1731b;
  670. writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
  671. /*
  672. * poll for s_clk lock or delay 1ms
  673. */
  674. udelay(1000);
  675. }
  676. static void
  677. bfa_ioc_ct2_mem_init(void __iomem *rb)
  678. {
  679. u32 r32;
  680. r32 = readl(rb + PSS_CTL_REG);
  681. r32 &= ~__PSS_LMEM_RESET;
  682. writel(r32, rb + PSS_CTL_REG);
  683. udelay(1000);
  684. writel(__EDRAM_BISTR_START, rb + CT2_MBIST_CTL_REG);
  685. udelay(1000);
  686. writel(0, rb + CT2_MBIST_CTL_REG);
  687. }
  688. static void
  689. bfa_ioc_ct2_mac_reset(void __iomem *rb)
  690. {
  691. volatile u32 r32;
  692. bfa_ioc_ct2_sclk_init(rb);
  693. bfa_ioc_ct2_lclk_init(rb);
  694. /*
  695. * release soft reset on s_clk & l_clk
  696. */
  697. r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
  698. writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
  699. rb + CT2_APP_PLL_SCLK_CTL_REG);
  700. /*
  701. * release soft reset on s_clk & l_clk
  702. */
  703. r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
  704. writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
  705. rb + CT2_APP_PLL_LCLK_CTL_REG);
  706. /* put port0, port1 MAC & AHB in reset */
  707. writel(__CSI_MAC_RESET | __CSI_MAC_AHB_RESET,
  708. rb + CT2_CSI_MAC_CONTROL_REG(0));
  709. writel(__CSI_MAC_RESET | __CSI_MAC_AHB_RESET,
  710. rb + CT2_CSI_MAC_CONTROL_REG(1));
  711. }
  712. #define CT2_NFC_MAX_DELAY 1000
  713. #define CT2_NFC_VER_VALID 0x143
  714. #define BFA_IOC_PLL_POLL 1000000
  715. static bool
  716. bfa_ioc_ct2_nfc_halted(void __iomem *rb)
  717. {
  718. volatile u32 r32;
  719. r32 = readl(rb + CT2_NFC_CSR_SET_REG);
  720. if (r32 & __NFC_CONTROLLER_HALTED)
  721. return true;
  722. return false;
  723. }
  724. static void
  725. bfa_ioc_ct2_nfc_resume(void __iomem *rb)
  726. {
  727. volatile u32 r32;
  728. int i;
  729. writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
  730. for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
  731. r32 = readl(rb + CT2_NFC_CSR_SET_REG);
  732. if (!(r32 & __NFC_CONTROLLER_HALTED))
  733. return;
  734. udelay(1000);
  735. }
  736. BUG_ON(1);
  737. }
  738. static enum bfa_status
  739. bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
  740. {
  741. volatile u32 wgn, r32;
  742. u32 nfc_ver, i;
  743. wgn = readl(rb + CT2_WGN_STATUS);
  744. nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
  745. if (wgn == (__A2T_AHB_LOAD | __WGN_READY) &&
  746. nfc_ver >= CT2_NFC_VER_VALID) {
  747. if (bfa_ioc_ct2_nfc_halted(rb))
  748. bfa_ioc_ct2_nfc_resume(rb);
  749. writel(__RESET_AND_START_SCLK_LCLK_PLLS,
  750. rb + CT2_CSI_FW_CTL_SET_REG);
  751. for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
  752. r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
  753. if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
  754. break;
  755. }
  756. BUG_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
  757. for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
  758. r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
  759. if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
  760. break;
  761. }
  762. BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
  763. udelay(1000);
  764. r32 = readl(rb + CT2_CSI_FW_CTL_REG);
  765. BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
  766. } else {
  767. writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG));
  768. for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
  769. r32 = readl(rb + CT2_NFC_CSR_SET_REG);
  770. if (r32 & __NFC_CONTROLLER_HALTED)
  771. break;
  772. udelay(1000);
  773. }
  774. bfa_ioc_ct2_mac_reset(rb);
  775. bfa_ioc_ct2_sclk_init(rb);
  776. bfa_ioc_ct2_lclk_init(rb);
  777. /* release soft reset on s_clk & l_clk */
  778. r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
  779. writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
  780. rb + CT2_APP_PLL_SCLK_CTL_REG);
  781. r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
  782. writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
  783. rb + CT2_APP_PLL_LCLK_CTL_REG);
  784. }
  785. /* Announce flash device presence, if flash was corrupted. */
  786. if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
  787. r32 = readl(rb + PSS_GPIO_OUT_REG);
  788. writel(r32 & ~1, rb + PSS_GPIO_OUT_REG);
  789. r32 = readl(rb + PSS_GPIO_OE_REG);
  790. writel(r32 | 1, rb + PSS_GPIO_OE_REG);
  791. }
  792. /*
  793. * Mask the interrupts and clear any
  794. * pending interrupts left by BIOS/EFI
  795. */
  796. writel(1, rb + CT2_LPU0_HOSTFN_MBOX0_MSK);
  797. writel(1, rb + CT2_LPU1_HOSTFN_MBOX0_MSK);
  798. /* For first time initialization, no need to clear interrupts */
  799. r32 = readl(rb + HOST_SEM5_REG);
  800. if (r32 & 0x1) {
  801. r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
  802. if (r32 == 1) {
  803. writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT);
  804. readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
  805. }
  806. r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
  807. if (r32 == 1) {
  808. writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT);
  809. readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
  810. }
  811. }
  812. bfa_ioc_ct2_mem_init(rb);
  813. writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG);
  814. writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG);
  815. return BFA_STATUS_OK;
  816. }