qplib_rcfw.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694
  1. /*
  2. * Broadcom NetXtreme-E RoCE driver.
  3. *
  4. * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
  5. * Broadcom refers to Broadcom Limited and/or its subsidiaries.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or without
  14. * modification, are permitted provided that the following conditions
  15. * are met:
  16. *
  17. * 1. Redistributions of source code must retain the above copyright
  18. * notice, this list of conditions and the following disclaimer.
  19. * 2. Redistributions in binary form must reproduce the above copyright
  20. * notice, this list of conditions and the following disclaimer in
  21. * the documentation and/or other materials provided with the
  22. * distribution.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  26. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
  28. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  32. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  33. * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  34. * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35. *
  36. * Description: RDMA Controller HW interface
  37. */
  38. #include <linux/interrupt.h>
  39. #include <linux/spinlock.h>
  40. #include <linux/pci.h>
  41. #include <linux/prefetch.h>
  42. #include "roce_hsi.h"
  43. #include "qplib_res.h"
  44. #include "qplib_rcfw.h"
  45. static void bnxt_qplib_service_creq(unsigned long data);
  46. /* Hardware communication channel */
  47. int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
  48. {
  49. u16 cbit;
  50. int rc;
  51. cookie &= RCFW_MAX_COOKIE_VALUE;
  52. cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
  53. if (!test_bit(cbit, rcfw->cmdq_bitmap))
  54. dev_warn(&rcfw->pdev->dev,
  55. "QPLIB: CMD bit %d for cookie 0x%x is not set?",
  56. cbit, cookie);
  57. rc = wait_event_timeout(rcfw->waitq,
  58. !test_bit(cbit, rcfw->cmdq_bitmap),
  59. msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS));
  60. if (!rc) {
  61. dev_warn(&rcfw->pdev->dev,
  62. "QPLIB: Bono Error: timeout %d msec, msg {0x%x}\n",
  63. RCFW_CMD_WAIT_TIME_MS, cookie);
  64. }
  65. return rc;
  66. };
  67. int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
  68. {
  69. u32 count = -1;
  70. u16 cbit;
  71. cookie &= RCFW_MAX_COOKIE_VALUE;
  72. cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
  73. if (!test_bit(cbit, rcfw->cmdq_bitmap))
  74. goto done;
  75. do {
  76. bnxt_qplib_service_creq((unsigned long)rcfw);
  77. } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count);
  78. done:
  79. return count;
  80. };
  81. void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
  82. struct cmdq_base *req, void **crsbe,
  83. u8 is_block)
  84. {
  85. struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
  86. struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr;
  87. struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
  88. struct bnxt_qplib_hwq *crsb = &rcfw->crsb;
  89. struct bnxt_qplib_crsqe *crsqe = NULL;
  90. struct bnxt_qplib_crsbe **crsb_ptr;
  91. u32 sw_prod, cmdq_prod;
  92. u8 retry_cnt = 0xFF;
  93. dma_addr_t dma_addr;
  94. unsigned long flags;
  95. u32 size, opcode;
  96. u16 cookie, cbit;
  97. int pg, idx;
  98. u8 *preq;
  99. retry:
  100. opcode = req->opcode;
  101. if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
  102. (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
  103. opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW)) {
  104. dev_err(&rcfw->pdev->dev,
  105. "QPLIB: RCFW not initialized, reject opcode 0x%x",
  106. opcode);
  107. return NULL;
  108. }
  109. if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
  110. opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
  111. dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
  112. return NULL;
  113. }
  114. /* Cmdq are in 16-byte units, each request can consume 1 or more
  115. * cmdqe
  116. */
  117. spin_lock_irqsave(&cmdq->lock, flags);
  118. if (req->cmd_size > cmdq->max_elements -
  119. ((HWQ_CMP(cmdq->prod, cmdq) - HWQ_CMP(cmdq->cons, cmdq)) &
  120. (cmdq->max_elements - 1))) {
  121. dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!");
  122. spin_unlock_irqrestore(&cmdq->lock, flags);
  123. if (!retry_cnt--)
  124. return NULL;
  125. goto retry;
  126. }
  127. retry_cnt = 0xFF;
  128. cookie = atomic_inc_return(&rcfw->seq_num) & RCFW_MAX_COOKIE_VALUE;
  129. cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
  130. if (is_block)
  131. cookie |= RCFW_CMD_IS_BLOCKING;
  132. req->cookie = cpu_to_le16(cookie);
  133. if (test_and_set_bit(cbit, rcfw->cmdq_bitmap)) {
  134. dev_err(&rcfw->pdev->dev,
  135. "QPLIB: RCFW MAX outstanding cmd reached!");
  136. atomic_dec(&rcfw->seq_num);
  137. spin_unlock_irqrestore(&cmdq->lock, flags);
  138. if (!retry_cnt--)
  139. return NULL;
  140. goto retry;
  141. }
  142. /* Reserve a resp buffer slot if requested */
  143. if (req->resp_size && crsbe) {
  144. spin_lock(&crsb->lock);
  145. sw_prod = HWQ_CMP(crsb->prod, crsb);
  146. crsb_ptr = (struct bnxt_qplib_crsbe **)crsb->pbl_ptr;
  147. *crsbe = (void *)&crsb_ptr[get_crsb_pg(sw_prod)]
  148. [get_crsb_idx(sw_prod)];
  149. bnxt_qplib_crsb_dma_next(crsb->pbl_dma_ptr, sw_prod, &dma_addr);
  150. req->resp_addr = cpu_to_le64(dma_addr);
  151. crsb->prod++;
  152. spin_unlock(&crsb->lock);
  153. req->resp_size = (sizeof(struct bnxt_qplib_crsbe) +
  154. BNXT_QPLIB_CMDQE_UNITS - 1) /
  155. BNXT_QPLIB_CMDQE_UNITS;
  156. }
  157. cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
  158. preq = (u8 *)req;
  159. size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
  160. do {
  161. pg = 0;
  162. idx = 0;
  163. /* Locate the next cmdq slot */
  164. sw_prod = HWQ_CMP(cmdq->prod, cmdq);
  165. cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod)][get_cmdq_idx(sw_prod)];
  166. if (!cmdqe) {
  167. dev_err(&rcfw->pdev->dev,
  168. "QPLIB: RCFW request failed with no cmdqe!");
  169. goto done;
  170. }
  171. /* Copy a segment of the req cmd to the cmdq */
  172. memset(cmdqe, 0, sizeof(*cmdqe));
  173. memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe)));
  174. preq += min_t(u32, size, sizeof(*cmdqe));
  175. size -= min_t(u32, size, sizeof(*cmdqe));
  176. cmdq->prod++;
  177. } while (size > 0);
  178. cmdq_prod = cmdq->prod;
  179. if (rcfw->flags & FIRMWARE_FIRST_FLAG) {
  180. /* The very first doorbell write is required to set this flag
  181. * which prompts the FW to reset its internal pointers
  182. */
  183. cmdq_prod |= FIRMWARE_FIRST_FLAG;
  184. rcfw->flags &= ~FIRMWARE_FIRST_FLAG;
  185. }
  186. sw_prod = HWQ_CMP(crsq->prod, crsq);
  187. crsqe = &crsq->crsq[sw_prod];
  188. memset(crsqe, 0, sizeof(*crsqe));
  189. crsq->prod++;
  190. crsqe->req_size = req->cmd_size;
  191. /* ring CMDQ DB */
  192. writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem +
  193. rcfw->cmdq_bar_reg_prod_off);
  194. writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem +
  195. rcfw->cmdq_bar_reg_trig_off);
  196. done:
  197. spin_unlock_irqrestore(&cmdq->lock, flags);
  198. /* Return the CREQ response pointer */
  199. return crsqe ? &crsqe->qp_event : NULL;
  200. }
  201. /* Completions */
  202. static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
  203. struct creq_func_event *func_event)
  204. {
  205. switch (func_event->event) {
  206. case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
  207. break;
  208. case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
  209. break;
  210. case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
  211. break;
  212. case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
  213. break;
  214. case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
  215. break;
  216. case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
  217. break;
  218. case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
  219. break;
  220. case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
  221. /* SRQ ctx error, call srq_handler??
  222. * But there's no SRQ handle!
  223. */
  224. break;
  225. case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
  226. break;
  227. case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
  228. break;
  229. case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
  230. break;
  231. case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST:
  232. break;
  233. case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED:
  234. break;
  235. default:
  236. return -EINVAL;
  237. }
  238. return 0;
  239. }
  240. static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
  241. struct creq_qp_event *qp_event)
  242. {
  243. struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
  244. struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
  245. struct bnxt_qplib_crsqe *crsqe;
  246. u16 cbit, cookie, blocked = 0;
  247. unsigned long flags;
  248. u32 sw_cons;
  249. switch (qp_event->event) {
  250. case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
  251. dev_dbg(&rcfw->pdev->dev,
  252. "QPLIB: Received QP error notification");
  253. break;
  254. default:
  255. /* Command Response */
  256. spin_lock_irqsave(&cmdq->lock, flags);
  257. sw_cons = HWQ_CMP(crsq->cons, crsq);
  258. crsqe = &crsq->crsq[sw_cons];
  259. crsq->cons++;
  260. memcpy(&crsqe->qp_event, qp_event, sizeof(crsqe->qp_event));
  261. cookie = le16_to_cpu(crsqe->qp_event.cookie);
  262. blocked = cookie & RCFW_CMD_IS_BLOCKING;
  263. cookie &= RCFW_MAX_COOKIE_VALUE;
  264. cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
  265. if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
  266. dev_warn(&rcfw->pdev->dev,
  267. "QPLIB: CMD bit %d was not requested", cbit);
  268. cmdq->cons += crsqe->req_size;
  269. spin_unlock_irqrestore(&cmdq->lock, flags);
  270. if (!blocked)
  271. wake_up(&rcfw->waitq);
  272. break;
  273. }
  274. return 0;
  275. }
  276. /* SP - CREQ Completion handlers */
  277. static void bnxt_qplib_service_creq(unsigned long data)
  278. {
  279. struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data;
  280. struct bnxt_qplib_hwq *creq = &rcfw->creq;
  281. struct creq_base *creqe, **creq_ptr;
  282. u32 sw_cons, raw_cons;
  283. unsigned long flags;
  284. u32 type;
  285. /* Service the CREQ until empty */
  286. spin_lock_irqsave(&creq->lock, flags);
  287. raw_cons = creq->cons;
  288. while (1) {
  289. sw_cons = HWQ_CMP(raw_cons, creq);
  290. creq_ptr = (struct creq_base **)creq->pbl_ptr;
  291. creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
  292. if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements))
  293. break;
  294. type = creqe->type & CREQ_BASE_TYPE_MASK;
  295. switch (type) {
  296. case CREQ_BASE_TYPE_QP_EVENT:
  297. if (!bnxt_qplib_process_qp_event
  298. (rcfw, (struct creq_qp_event *)creqe))
  299. rcfw->creq_qp_event_processed++;
  300. else {
  301. dev_warn(&rcfw->pdev->dev, "QPLIB: crsqe with");
  302. dev_warn(&rcfw->pdev->dev,
  303. "QPLIB: type = 0x%x not handled",
  304. type);
  305. }
  306. break;
  307. case CREQ_BASE_TYPE_FUNC_EVENT:
  308. if (!bnxt_qplib_process_func_event
  309. (rcfw, (struct creq_func_event *)creqe))
  310. rcfw->creq_func_event_processed++;
  311. else
  312. dev_warn
  313. (&rcfw->pdev->dev, "QPLIB:aeqe:%#x Not handled",
  314. type);
  315. break;
  316. default:
  317. dev_warn(&rcfw->pdev->dev, "QPLIB: creqe with ");
  318. dev_warn(&rcfw->pdev->dev,
  319. "QPLIB: op_event = 0x%x not handled", type);
  320. break;
  321. }
  322. raw_cons++;
  323. }
  324. if (creq->cons != raw_cons) {
  325. creq->cons = raw_cons;
  326. CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons,
  327. creq->max_elements);
  328. }
  329. spin_unlock_irqrestore(&creq->lock, flags);
  330. }
  331. static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
  332. {
  333. struct bnxt_qplib_rcfw *rcfw = dev_instance;
  334. struct bnxt_qplib_hwq *creq = &rcfw->creq;
  335. struct creq_base **creq_ptr;
  336. u32 sw_cons;
  337. /* Prefetch the CREQ element */
  338. sw_cons = HWQ_CMP(creq->cons, creq);
  339. creq_ptr = (struct creq_base **)rcfw->creq.pbl_ptr;
  340. prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]);
  341. tasklet_schedule(&rcfw->worker);
  342. return IRQ_HANDLED;
  343. }
  344. /* RCFW */
  345. int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
  346. {
  347. struct creq_deinitialize_fw_resp *resp;
  348. struct cmdq_deinitialize_fw req;
  349. u16 cmd_flags = 0;
  350. RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags);
  351. resp = (struct creq_deinitialize_fw_resp *)
  352. bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
  353. NULL, 0);
  354. if (!resp)
  355. return -EINVAL;
  356. if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie)))
  357. return -ETIMEDOUT;
  358. if (resp->status ||
  359. le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie))
  360. return -EFAULT;
  361. clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
  362. return 0;
  363. }
  364. static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl)
  365. {
  366. return (pbl->pg_size == ROCE_PG_SIZE_4K ?
  367. CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K :
  368. pbl->pg_size == ROCE_PG_SIZE_8K ?
  369. CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K :
  370. pbl->pg_size == ROCE_PG_SIZE_64K ?
  371. CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K :
  372. pbl->pg_size == ROCE_PG_SIZE_2M ?
  373. CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M :
  374. pbl->pg_size == ROCE_PG_SIZE_8M ?
  375. CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M :
  376. pbl->pg_size == ROCE_PG_SIZE_1G ?
  377. CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G :
  378. CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K);
  379. }
  380. int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
  381. struct bnxt_qplib_ctx *ctx, int is_virtfn)
  382. {
  383. struct creq_initialize_fw_resp *resp;
  384. struct cmdq_initialize_fw req;
  385. u16 cmd_flags = 0, level;
  386. RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
  387. /*
  388. * VFs need not setup the HW context area, PF
  389. * shall setup this area for VF. Skipping the
  390. * HW programming
  391. */
  392. if (is_virtfn)
  393. goto skip_ctx_setup;
  394. level = ctx->qpc_tbl.level;
  395. req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) |
  396. __get_pbl_pg_idx(&ctx->qpc_tbl.pbl[level]);
  397. level = ctx->mrw_tbl.level;
  398. req.mrw_pg_size_mrw_lvl = (level << CMDQ_INITIALIZE_FW_MRW_LVL_SFT) |
  399. __get_pbl_pg_idx(&ctx->mrw_tbl.pbl[level]);
  400. level = ctx->srqc_tbl.level;
  401. req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
  402. __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
  403. level = ctx->cq_tbl.level;
  404. req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
  405. __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
  406. level = ctx->srqc_tbl.level;
  407. req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
  408. __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
  409. level = ctx->cq_tbl.level;
  410. req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
  411. __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
  412. level = ctx->tim_tbl.level;
  413. req.tim_pg_size_tim_lvl = (level << CMDQ_INITIALIZE_FW_TIM_LVL_SFT) |
  414. __get_pbl_pg_idx(&ctx->tim_tbl.pbl[level]);
  415. level = ctx->tqm_pde_level;
  416. req.tqm_pg_size_tqm_lvl = (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) |
  417. __get_pbl_pg_idx(&ctx->tqm_pde.pbl[level]);
  418. req.qpc_page_dir =
  419. cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
  420. req.mrw_page_dir =
  421. cpu_to_le64(ctx->mrw_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
  422. req.srq_page_dir =
  423. cpu_to_le64(ctx->srqc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
  424. req.cq_page_dir =
  425. cpu_to_le64(ctx->cq_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
  426. req.tim_page_dir =
  427. cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
  428. req.tqm_page_dir =
  429. cpu_to_le64(ctx->tqm_pde.pbl[PBL_LVL_0].pg_map_arr[0]);
  430. req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements);
  431. req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements);
  432. req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements);
  433. req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements);
  434. req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
  435. req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
  436. req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
  437. req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
  438. req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
  439. skip_ctx_setup:
  440. req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
  441. resp = (struct creq_initialize_fw_resp *)
  442. bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
  443. NULL, 0);
  444. if (!resp) {
  445. dev_err(&rcfw->pdev->dev,
  446. "QPLIB: RCFW: INITIALIZE_FW send failed");
  447. return -EINVAL;
  448. }
  449. if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
  450. /* Cmd timed out */
  451. dev_err(&rcfw->pdev->dev,
  452. "QPLIB: RCFW: INITIALIZE_FW timed out");
  453. return -ETIMEDOUT;
  454. }
  455. if (resp->status ||
  456. le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
  457. dev_err(&rcfw->pdev->dev,
  458. "QPLIB: RCFW: INITIALIZE_FW failed");
  459. return -EINVAL;
  460. }
  461. set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
  462. return 0;
  463. }
  464. void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
  465. {
  466. bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->crsb);
  467. kfree(rcfw->crsq.crsq);
  468. bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
  469. bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
  470. rcfw->pdev = NULL;
  471. }
  472. int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
  473. struct bnxt_qplib_rcfw *rcfw)
  474. {
  475. rcfw->pdev = pdev;
  476. rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT;
  477. if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL, 0,
  478. &rcfw->creq.max_elements,
  479. BNXT_QPLIB_CREQE_UNITS, 0, PAGE_SIZE,
  480. HWQ_TYPE_L2_CMPL)) {
  481. dev_err(&rcfw->pdev->dev,
  482. "QPLIB: HW channel CREQ allocation failed");
  483. goto fail;
  484. }
  485. rcfw->cmdq.max_elements = BNXT_QPLIB_CMDQE_MAX_CNT;
  486. if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->cmdq, NULL, 0,
  487. &rcfw->cmdq.max_elements,
  488. BNXT_QPLIB_CMDQE_UNITS, 0, PAGE_SIZE,
  489. HWQ_TYPE_CTX)) {
  490. dev_err(&rcfw->pdev->dev,
  491. "QPLIB: HW channel CMDQ allocation failed");
  492. goto fail;
  493. }
  494. rcfw->crsq.max_elements = rcfw->cmdq.max_elements;
  495. rcfw->crsq.crsq = kcalloc(rcfw->crsq.max_elements,
  496. sizeof(*rcfw->crsq.crsq), GFP_KERNEL);
  497. if (!rcfw->crsq.crsq)
  498. goto fail;
  499. rcfw->crsb.max_elements = BNXT_QPLIB_CRSBE_MAX_CNT;
  500. if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->crsb, NULL, 0,
  501. &rcfw->crsb.max_elements,
  502. BNXT_QPLIB_CRSBE_UNITS, 0, PAGE_SIZE,
  503. HWQ_TYPE_CTX)) {
  504. dev_err(&rcfw->pdev->dev,
  505. "QPLIB: HW channel CRSB allocation failed");
  506. goto fail;
  507. }
  508. return 0;
  509. fail:
  510. bnxt_qplib_free_rcfw_channel(rcfw);
  511. return -ENOMEM;
  512. }
  513. void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
  514. {
  515. unsigned long indx;
  516. /* Make sure the HW channel is stopped! */
  517. synchronize_irq(rcfw->vector);
  518. tasklet_disable(&rcfw->worker);
  519. tasklet_kill(&rcfw->worker);
  520. if (rcfw->requested) {
  521. free_irq(rcfw->vector, rcfw);
  522. rcfw->requested = false;
  523. }
  524. if (rcfw->cmdq_bar_reg_iomem)
  525. iounmap(rcfw->cmdq_bar_reg_iomem);
  526. rcfw->cmdq_bar_reg_iomem = NULL;
  527. if (rcfw->creq_bar_reg_iomem)
  528. iounmap(rcfw->creq_bar_reg_iomem);
  529. rcfw->creq_bar_reg_iomem = NULL;
  530. indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size);
  531. if (indx != rcfw->bmap_size)
  532. dev_err(&rcfw->pdev->dev,
  533. "QPLIB: disabling RCFW with pending cmd-bit %lx", indx);
  534. kfree(rcfw->cmdq_bitmap);
  535. rcfw->bmap_size = 0;
  536. rcfw->aeq_handler = NULL;
  537. rcfw->vector = 0;
  538. }
  539. int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
  540. struct bnxt_qplib_rcfw *rcfw,
  541. int msix_vector,
  542. int cp_bar_reg_off, int virt_fn,
  543. int (*aeq_handler)(struct bnxt_qplib_rcfw *,
  544. struct creq_func_event *))
  545. {
  546. resource_size_t res_base;
  547. struct cmdq_init init;
  548. u16 bmap_size;
  549. int rc;
  550. /* General */
  551. atomic_set(&rcfw->seq_num, 0);
  552. rcfw->flags = FIRMWARE_FIRST_FLAG;
  553. bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD *
  554. sizeof(unsigned long));
  555. rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
  556. if (!rcfw->cmdq_bitmap)
  557. return -ENOMEM;
  558. rcfw->bmap_size = bmap_size;
  559. /* CMDQ */
  560. rcfw->cmdq_bar_reg = RCFW_COMM_PCI_BAR_REGION;
  561. res_base = pci_resource_start(pdev, rcfw->cmdq_bar_reg);
  562. if (!res_base)
  563. return -ENOMEM;
  564. rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base +
  565. RCFW_COMM_BASE_OFFSET,
  566. RCFW_COMM_SIZE);
  567. if (!rcfw->cmdq_bar_reg_iomem) {
  568. dev_err(&rcfw->pdev->dev,
  569. "QPLIB: CMDQ BAR region %d mapping failed",
  570. rcfw->cmdq_bar_reg);
  571. return -ENOMEM;
  572. }
  573. rcfw->cmdq_bar_reg_prod_off = virt_fn ? RCFW_VF_COMM_PROD_OFFSET :
  574. RCFW_PF_COMM_PROD_OFFSET;
  575. rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET;
  576. /* CRSQ */
  577. rcfw->crsq.prod = 0;
  578. rcfw->crsq.cons = 0;
  579. /* CREQ */
  580. rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION;
  581. res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
  582. if (!res_base)
  583. dev_err(&rcfw->pdev->dev,
  584. "QPLIB: CREQ BAR region %d resc start is 0!",
  585. rcfw->creq_bar_reg);
  586. rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off,
  587. 4);
  588. if (!rcfw->creq_bar_reg_iomem) {
  589. dev_err(&rcfw->pdev->dev,
  590. "QPLIB: CREQ BAR region %d mapping failed",
  591. rcfw->creq_bar_reg);
  592. return -ENOMEM;
  593. }
  594. rcfw->creq_qp_event_processed = 0;
  595. rcfw->creq_func_event_processed = 0;
  596. rcfw->vector = msix_vector;
  597. if (aeq_handler)
  598. rcfw->aeq_handler = aeq_handler;
  599. tasklet_init(&rcfw->worker, bnxt_qplib_service_creq,
  600. (unsigned long)rcfw);
  601. rcfw->requested = false;
  602. rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
  603. "bnxt_qplib_creq", rcfw);
  604. if (rc) {
  605. dev_err(&rcfw->pdev->dev,
  606. "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
  607. bnxt_qplib_disable_rcfw_channel(rcfw);
  608. return rc;
  609. }
  610. rcfw->requested = true;
  611. init_waitqueue_head(&rcfw->waitq);
  612. CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements);
  613. init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
  614. init.cmdq_size_cmdq_lvl = cpu_to_le16(
  615. ((BNXT_QPLIB_CMDQE_MAX_CNT << CMDQ_INIT_CMDQ_SIZE_SFT) &
  616. CMDQ_INIT_CMDQ_SIZE_MASK) |
  617. ((rcfw->cmdq.level << CMDQ_INIT_CMDQ_LVL_SFT) &
  618. CMDQ_INIT_CMDQ_LVL_MASK));
  619. init.creq_ring_id = cpu_to_le16(rcfw->creq_ring_id);
  620. /* Write to the Bono mailbox register */
  621. __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4);
  622. return 0;
  623. }