qplib_rcfw.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738
  1. /*
  2. * Broadcom NetXtreme-E RoCE driver.
  3. *
  4. * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
  5. * Broadcom refers to Broadcom Limited and/or its subsidiaries.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or without
  14. * modification, are permitted provided that the following conditions
  15. * are met:
  16. *
  17. * 1. Redistributions of source code must retain the above copyright
  18. * notice, this list of conditions and the following disclaimer.
  19. * 2. Redistributions in binary form must reproduce the above copyright
  20. * notice, this list of conditions and the following disclaimer in
  21. * the documentation and/or other materials provided with the
  22. * distribution.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  26. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
  28. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  32. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  33. * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  34. * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35. *
  36. * Description: RDMA Controller HW interface
  37. */
  38. #include <linux/interrupt.h>
  39. #include <linux/spinlock.h>
  40. #include <linux/pci.h>
  41. #include <linux/prefetch.h>
  42. #include <linux/delay.h>
  43. #include "roce_hsi.h"
  44. #include "qplib_res.h"
  45. #include "qplib_rcfw.h"
  46. #include "qplib_sp.h"
  47. #include "qplib_fp.h"
  48. static void bnxt_qplib_service_creq(unsigned long data);
  49. /* Hardware communication channel */
  50. static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
  51. {
  52. u16 cbit;
  53. int rc;
  54. cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
  55. rc = wait_event_timeout(rcfw->waitq,
  56. !test_bit(cbit, rcfw->cmdq_bitmap),
  57. msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS));
  58. return rc ? 0 : -ETIMEDOUT;
  59. };
  60. static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
  61. {
  62. u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT;
  63. u16 cbit;
  64. cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
  65. if (!test_bit(cbit, rcfw->cmdq_bitmap))
  66. goto done;
  67. do {
  68. mdelay(1); /* 1m sec */
  69. bnxt_qplib_service_creq((unsigned long)rcfw);
  70. } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count);
  71. done:
  72. return count ? 0 : -ETIMEDOUT;
  73. };
  74. static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
  75. struct creq_base *resp, void *sb, u8 is_block)
  76. {
  77. struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr;
  78. struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
  79. struct bnxt_qplib_crsq *crsqe;
  80. u32 sw_prod, cmdq_prod;
  81. unsigned long flags;
  82. u32 size, opcode;
  83. u16 cookie, cbit;
  84. u8 *preq;
  85. opcode = req->opcode;
  86. if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
  87. (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
  88. opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW)) {
  89. dev_err(&rcfw->pdev->dev,
  90. "QPLIB: RCFW not initialized, reject opcode 0x%x",
  91. opcode);
  92. return -EINVAL;
  93. }
  94. if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
  95. opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
  96. dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
  97. return -EINVAL;
  98. }
  99. if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags))
  100. return -ETIMEDOUT;
  101. /* Cmdq are in 16-byte units, each request can consume 1 or more
  102. * cmdqe
  103. */
  104. spin_lock_irqsave(&cmdq->lock, flags);
  105. if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) {
  106. dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!");
  107. spin_unlock_irqrestore(&cmdq->lock, flags);
  108. return -EAGAIN;
  109. }
  110. cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE;
  111. cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
  112. if (is_block)
  113. cookie |= RCFW_CMD_IS_BLOCKING;
  114. set_bit(cbit, rcfw->cmdq_bitmap);
  115. req->cookie = cpu_to_le16(cookie);
  116. crsqe = &rcfw->crsqe_tbl[cbit];
  117. if (crsqe->resp) {
  118. spin_unlock_irqrestore(&cmdq->lock, flags);
  119. return -EBUSY;
  120. }
  121. memset(resp, 0, sizeof(*resp));
  122. crsqe->resp = (struct creq_qp_event *)resp;
  123. crsqe->resp->cookie = req->cookie;
  124. crsqe->req_size = req->cmd_size;
  125. if (req->resp_size && sb) {
  126. struct bnxt_qplib_rcfw_sbuf *sbuf = sb;
  127. req->resp_addr = cpu_to_le64(sbuf->dma_addr);
  128. req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) /
  129. BNXT_QPLIB_CMDQE_UNITS;
  130. }
  131. cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
  132. preq = (u8 *)req;
  133. size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
  134. do {
  135. /* Locate the next cmdq slot */
  136. sw_prod = HWQ_CMP(cmdq->prod, cmdq);
  137. cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod)][get_cmdq_idx(sw_prod)];
  138. if (!cmdqe) {
  139. dev_err(&rcfw->pdev->dev,
  140. "QPLIB: RCFW request failed with no cmdqe!");
  141. goto done;
  142. }
  143. /* Copy a segment of the req cmd to the cmdq */
  144. memset(cmdqe, 0, sizeof(*cmdqe));
  145. memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe)));
  146. preq += min_t(u32, size, sizeof(*cmdqe));
  147. size -= min_t(u32, size, sizeof(*cmdqe));
  148. cmdq->prod++;
  149. rcfw->seq_num++;
  150. } while (size > 0);
  151. rcfw->seq_num++;
  152. cmdq_prod = cmdq->prod;
  153. if (test_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags)) {
  154. /* The very first doorbell write
  155. * is required to set this flag
  156. * which prompts the FW to reset
  157. * its internal pointers
  158. */
  159. cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG);
  160. clear_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags);
  161. }
  162. /* ring CMDQ DB */
  163. wmb();
  164. writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem +
  165. rcfw->cmdq_bar_reg_prod_off);
  166. writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem +
  167. rcfw->cmdq_bar_reg_trig_off);
  168. done:
  169. spin_unlock_irqrestore(&cmdq->lock, flags);
  170. /* Return the CREQ response pointer */
  171. return 0;
  172. }
  173. int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
  174. struct cmdq_base *req,
  175. struct creq_base *resp,
  176. void *sb, u8 is_block)
  177. {
  178. struct creq_qp_event *evnt = (struct creq_qp_event *)resp;
  179. u16 cookie;
  180. u8 opcode, retry_cnt = 0xFF;
  181. int rc = 0;
  182. do {
  183. opcode = req->opcode;
  184. rc = __send_message(rcfw, req, resp, sb, is_block);
  185. cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE;
  186. if (!rc)
  187. break;
  188. if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) {
  189. /* send failed */
  190. dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed",
  191. cookie, opcode);
  192. return rc;
  193. }
  194. is_block ? mdelay(1) : usleep_range(500, 1000);
  195. } while (retry_cnt--);
  196. if (is_block)
  197. rc = __block_for_resp(rcfw, cookie);
  198. else
  199. rc = __wait_for_resp(rcfw, cookie);
  200. if (rc) {
  201. /* timed out */
  202. dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
  203. cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
  204. set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags);
  205. return rc;
  206. }
  207. if (evnt->status) {
  208. /* failed with status */
  209. dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x",
  210. cookie, opcode, evnt->status);
  211. rc = -EFAULT;
  212. }
  213. return rc;
  214. }
  215. /* Completions */
  216. static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
  217. struct creq_func_event *func_event)
  218. {
  219. switch (func_event->event) {
  220. case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
  221. break;
  222. case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
  223. break;
  224. case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
  225. break;
  226. case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
  227. break;
  228. case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
  229. break;
  230. case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
  231. break;
  232. case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
  233. break;
  234. case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
  235. /* SRQ ctx error, call srq_handler??
  236. * But there's no SRQ handle!
  237. */
  238. break;
  239. case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
  240. break;
  241. case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
  242. break;
  243. case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
  244. break;
  245. case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST:
  246. break;
  247. case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED:
  248. break;
  249. default:
  250. return -EINVAL;
  251. }
  252. return 0;
  253. }
  254. static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
  255. struct creq_qp_event *qp_event)
  256. {
  257. struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
  258. struct creq_qp_error_notification *err_event;
  259. struct bnxt_qplib_crsq *crsqe;
  260. unsigned long flags;
  261. struct bnxt_qplib_qp *qp;
  262. u16 cbit, blocked = 0;
  263. u16 cookie;
  264. __le16 mcookie;
  265. u32 qp_id;
  266. switch (qp_event->event) {
  267. case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
  268. err_event = (struct creq_qp_error_notification *)qp_event;
  269. qp_id = le32_to_cpu(err_event->xid);
  270. qp = rcfw->qp_tbl[qp_id].qp_handle;
  271. dev_dbg(&rcfw->pdev->dev,
  272. "QPLIB: Received QP error notification");
  273. dev_dbg(&rcfw->pdev->dev,
  274. "QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
  275. qp_id, err_event->req_err_state_reason,
  276. err_event->res_err_state_reason);
  277. if (!qp)
  278. break;
  279. bnxt_qplib_acquire_cq_locks(qp, &flags);
  280. bnxt_qplib_mark_qp_error(qp);
  281. bnxt_qplib_release_cq_locks(qp, &flags);
  282. break;
  283. default:
  284. /* Command Response */
  285. spin_lock_irqsave(&cmdq->lock, flags);
  286. cookie = le16_to_cpu(qp_event->cookie);
  287. mcookie = qp_event->cookie;
  288. blocked = cookie & RCFW_CMD_IS_BLOCKING;
  289. cookie &= RCFW_MAX_COOKIE_VALUE;
  290. cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
  291. crsqe = &rcfw->crsqe_tbl[cbit];
  292. if (crsqe->resp &&
  293. crsqe->resp->cookie == mcookie) {
  294. memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
  295. crsqe->resp = NULL;
  296. } else {
  297. dev_err(&rcfw->pdev->dev,
  298. "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x",
  299. crsqe->resp ? "mismatch" : "collision",
  300. crsqe->resp ? crsqe->resp->cookie : 0, mcookie);
  301. }
  302. if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
  303. dev_warn(&rcfw->pdev->dev,
  304. "QPLIB: CMD bit %d was not requested", cbit);
  305. cmdq->cons += crsqe->req_size;
  306. crsqe->req_size = 0;
  307. if (!blocked)
  308. wake_up(&rcfw->waitq);
  309. spin_unlock_irqrestore(&cmdq->lock, flags);
  310. }
  311. return 0;
  312. }
  313. /* SP - CREQ Completion handlers */
  314. static void bnxt_qplib_service_creq(unsigned long data)
  315. {
  316. struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data;
  317. struct bnxt_qplib_hwq *creq = &rcfw->creq;
  318. struct creq_base *creqe, **creq_ptr;
  319. u32 sw_cons, raw_cons;
  320. unsigned long flags;
  321. u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
  322. /* Service the CREQ until budget is over */
  323. spin_lock_irqsave(&creq->lock, flags);
  324. raw_cons = creq->cons;
  325. while (budget > 0) {
  326. sw_cons = HWQ_CMP(raw_cons, creq);
  327. creq_ptr = (struct creq_base **)creq->pbl_ptr;
  328. creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
  329. if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements))
  330. break;
  331. /* The valid test of the entry must be done first before
  332. * reading any further.
  333. */
  334. dma_rmb();
  335. type = creqe->type & CREQ_BASE_TYPE_MASK;
  336. switch (type) {
  337. case CREQ_BASE_TYPE_QP_EVENT:
  338. bnxt_qplib_process_qp_event
  339. (rcfw, (struct creq_qp_event *)creqe);
  340. rcfw->creq_qp_event_processed++;
  341. break;
  342. case CREQ_BASE_TYPE_FUNC_EVENT:
  343. if (!bnxt_qplib_process_func_event
  344. (rcfw, (struct creq_func_event *)creqe))
  345. rcfw->creq_func_event_processed++;
  346. else
  347. dev_warn
  348. (&rcfw->pdev->dev, "QPLIB:aeqe:%#x Not handled",
  349. type);
  350. break;
  351. default:
  352. dev_warn(&rcfw->pdev->dev, "QPLIB: creqe with ");
  353. dev_warn(&rcfw->pdev->dev,
  354. "QPLIB: op_event = 0x%x not handled", type);
  355. break;
  356. }
  357. raw_cons++;
  358. budget--;
  359. }
  360. if (creq->cons != raw_cons) {
  361. creq->cons = raw_cons;
  362. CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons,
  363. creq->max_elements);
  364. }
  365. spin_unlock_irqrestore(&creq->lock, flags);
  366. }
  367. static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
  368. {
  369. struct bnxt_qplib_rcfw *rcfw = dev_instance;
  370. struct bnxt_qplib_hwq *creq = &rcfw->creq;
  371. struct creq_base **creq_ptr;
  372. u32 sw_cons;
  373. /* Prefetch the CREQ element */
  374. sw_cons = HWQ_CMP(creq->cons, creq);
  375. creq_ptr = (struct creq_base **)rcfw->creq.pbl_ptr;
  376. prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]);
  377. tasklet_schedule(&rcfw->worker);
  378. return IRQ_HANDLED;
  379. }
  380. /* RCFW */
  381. int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
  382. {
  383. struct cmdq_deinitialize_fw req;
  384. struct creq_deinitialize_fw_resp resp;
  385. u16 cmd_flags = 0;
  386. int rc;
  387. RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags);
  388. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
  389. NULL, 0);
  390. if (rc)
  391. return rc;
  392. clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
  393. return 0;
  394. }
  395. static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl)
  396. {
  397. return (pbl->pg_size == ROCE_PG_SIZE_4K ?
  398. CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K :
  399. pbl->pg_size == ROCE_PG_SIZE_8K ?
  400. CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K :
  401. pbl->pg_size == ROCE_PG_SIZE_64K ?
  402. CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K :
  403. pbl->pg_size == ROCE_PG_SIZE_2M ?
  404. CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M :
  405. pbl->pg_size == ROCE_PG_SIZE_8M ?
  406. CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M :
  407. pbl->pg_size == ROCE_PG_SIZE_1G ?
  408. CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G :
  409. CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K);
  410. }
  411. int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
  412. struct bnxt_qplib_ctx *ctx, int is_virtfn)
  413. {
  414. struct cmdq_initialize_fw req;
  415. struct creq_initialize_fw_resp resp;
  416. u16 cmd_flags = 0, level;
  417. int rc;
  418. RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
  419. /*
  420. * VFs need not setup the HW context area, PF
  421. * shall setup this area for VF. Skipping the
  422. * HW programming
  423. */
  424. if (is_virtfn)
  425. goto skip_ctx_setup;
  426. level = ctx->qpc_tbl.level;
  427. req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) |
  428. __get_pbl_pg_idx(&ctx->qpc_tbl.pbl[level]);
  429. level = ctx->mrw_tbl.level;
  430. req.mrw_pg_size_mrw_lvl = (level << CMDQ_INITIALIZE_FW_MRW_LVL_SFT) |
  431. __get_pbl_pg_idx(&ctx->mrw_tbl.pbl[level]);
  432. level = ctx->srqc_tbl.level;
  433. req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
  434. __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
  435. level = ctx->cq_tbl.level;
  436. req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
  437. __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
  438. level = ctx->srqc_tbl.level;
  439. req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
  440. __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
  441. level = ctx->cq_tbl.level;
  442. req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
  443. __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
  444. level = ctx->tim_tbl.level;
  445. req.tim_pg_size_tim_lvl = (level << CMDQ_INITIALIZE_FW_TIM_LVL_SFT) |
  446. __get_pbl_pg_idx(&ctx->tim_tbl.pbl[level]);
  447. level = ctx->tqm_pde_level;
  448. req.tqm_pg_size_tqm_lvl = (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) |
  449. __get_pbl_pg_idx(&ctx->tqm_pde.pbl[level]);
  450. req.qpc_page_dir =
  451. cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
  452. req.mrw_page_dir =
  453. cpu_to_le64(ctx->mrw_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
  454. req.srq_page_dir =
  455. cpu_to_le64(ctx->srqc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
  456. req.cq_page_dir =
  457. cpu_to_le64(ctx->cq_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
  458. req.tim_page_dir =
  459. cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
  460. req.tqm_page_dir =
  461. cpu_to_le64(ctx->tqm_pde.pbl[PBL_LVL_0].pg_map_arr[0]);
  462. req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements);
  463. req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements);
  464. req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements);
  465. req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements);
  466. req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
  467. req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
  468. req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
  469. req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
  470. req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
  471. skip_ctx_setup:
  472. req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
  473. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
  474. NULL, 0);
  475. if (rc)
  476. return rc;
  477. set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
  478. return 0;
  479. }
  480. void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
  481. {
  482. kfree(rcfw->qp_tbl);
  483. kfree(rcfw->crsqe_tbl);
  484. bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
  485. bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
  486. rcfw->pdev = NULL;
  487. }
  488. int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
  489. struct bnxt_qplib_rcfw *rcfw,
  490. int qp_tbl_sz)
  491. {
  492. rcfw->pdev = pdev;
  493. rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT;
  494. if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL, 0,
  495. &rcfw->creq.max_elements,
  496. BNXT_QPLIB_CREQE_UNITS, 0, PAGE_SIZE,
  497. HWQ_TYPE_L2_CMPL)) {
  498. dev_err(&rcfw->pdev->dev,
  499. "QPLIB: HW channel CREQ allocation failed");
  500. goto fail;
  501. }
  502. rcfw->cmdq.max_elements = BNXT_QPLIB_CMDQE_MAX_CNT;
  503. if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->cmdq, NULL, 0,
  504. &rcfw->cmdq.max_elements,
  505. BNXT_QPLIB_CMDQE_UNITS, 0, PAGE_SIZE,
  506. HWQ_TYPE_CTX)) {
  507. dev_err(&rcfw->pdev->dev,
  508. "QPLIB: HW channel CMDQ allocation failed");
  509. goto fail;
  510. }
  511. rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements,
  512. sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
  513. if (!rcfw->crsqe_tbl)
  514. goto fail;
  515. rcfw->qp_tbl_size = qp_tbl_sz;
  516. rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node),
  517. GFP_KERNEL);
  518. if (!rcfw->qp_tbl)
  519. goto fail;
  520. return 0;
  521. fail:
  522. bnxt_qplib_free_rcfw_channel(rcfw);
  523. return -ENOMEM;
  524. }
  525. void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
  526. {
  527. unsigned long indx;
  528. /* Make sure the HW channel is stopped! */
  529. synchronize_irq(rcfw->vector);
  530. tasklet_disable(&rcfw->worker);
  531. tasklet_kill(&rcfw->worker);
  532. if (rcfw->requested) {
  533. free_irq(rcfw->vector, rcfw);
  534. rcfw->requested = false;
  535. }
  536. if (rcfw->cmdq_bar_reg_iomem)
  537. iounmap(rcfw->cmdq_bar_reg_iomem);
  538. rcfw->cmdq_bar_reg_iomem = NULL;
  539. if (rcfw->creq_bar_reg_iomem)
  540. iounmap(rcfw->creq_bar_reg_iomem);
  541. rcfw->creq_bar_reg_iomem = NULL;
  542. indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size);
  543. if (indx != rcfw->bmap_size)
  544. dev_err(&rcfw->pdev->dev,
  545. "QPLIB: disabling RCFW with pending cmd-bit %lx", indx);
  546. kfree(rcfw->cmdq_bitmap);
  547. rcfw->bmap_size = 0;
  548. rcfw->aeq_handler = NULL;
  549. rcfw->vector = 0;
  550. }
  551. int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
  552. struct bnxt_qplib_rcfw *rcfw,
  553. int msix_vector,
  554. int cp_bar_reg_off, int virt_fn,
  555. int (*aeq_handler)(struct bnxt_qplib_rcfw *,
  556. struct creq_func_event *))
  557. {
  558. resource_size_t res_base;
  559. struct cmdq_init init;
  560. u16 bmap_size;
  561. int rc;
  562. /* General */
  563. rcfw->seq_num = 0;
  564. set_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags);
  565. bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD *
  566. sizeof(unsigned long));
  567. rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
  568. if (!rcfw->cmdq_bitmap)
  569. return -ENOMEM;
  570. rcfw->bmap_size = bmap_size;
  571. /* CMDQ */
  572. rcfw->cmdq_bar_reg = RCFW_COMM_PCI_BAR_REGION;
  573. res_base = pci_resource_start(pdev, rcfw->cmdq_bar_reg);
  574. if (!res_base)
  575. return -ENOMEM;
  576. rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base +
  577. RCFW_COMM_BASE_OFFSET,
  578. RCFW_COMM_SIZE);
  579. if (!rcfw->cmdq_bar_reg_iomem) {
  580. dev_err(&rcfw->pdev->dev,
  581. "QPLIB: CMDQ BAR region %d mapping failed",
  582. rcfw->cmdq_bar_reg);
  583. return -ENOMEM;
  584. }
  585. rcfw->cmdq_bar_reg_prod_off = virt_fn ? RCFW_VF_COMM_PROD_OFFSET :
  586. RCFW_PF_COMM_PROD_OFFSET;
  587. rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET;
  588. /* CREQ */
  589. rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION;
  590. res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
  591. if (!res_base)
  592. dev_err(&rcfw->pdev->dev,
  593. "QPLIB: CREQ BAR region %d resc start is 0!",
  594. rcfw->creq_bar_reg);
  595. rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off,
  596. 4);
  597. if (!rcfw->creq_bar_reg_iomem) {
  598. dev_err(&rcfw->pdev->dev,
  599. "QPLIB: CREQ BAR region %d mapping failed",
  600. rcfw->creq_bar_reg);
  601. return -ENOMEM;
  602. }
  603. rcfw->creq_qp_event_processed = 0;
  604. rcfw->creq_func_event_processed = 0;
  605. rcfw->vector = msix_vector;
  606. if (aeq_handler)
  607. rcfw->aeq_handler = aeq_handler;
  608. tasklet_init(&rcfw->worker, bnxt_qplib_service_creq,
  609. (unsigned long)rcfw);
  610. rcfw->requested = false;
  611. rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
  612. "bnxt_qplib_creq", rcfw);
  613. if (rc) {
  614. dev_err(&rcfw->pdev->dev,
  615. "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
  616. bnxt_qplib_disable_rcfw_channel(rcfw);
  617. return rc;
  618. }
  619. rcfw->requested = true;
  620. init_waitqueue_head(&rcfw->waitq);
  621. CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements);
  622. init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
  623. init.cmdq_size_cmdq_lvl = cpu_to_le16(
  624. ((BNXT_QPLIB_CMDQE_MAX_CNT << CMDQ_INIT_CMDQ_SIZE_SFT) &
  625. CMDQ_INIT_CMDQ_SIZE_MASK) |
  626. ((rcfw->cmdq.level << CMDQ_INIT_CMDQ_LVL_SFT) &
  627. CMDQ_INIT_CMDQ_LVL_MASK));
  628. init.creq_ring_id = cpu_to_le16(rcfw->creq_ring_id);
  629. /* Write to the Bono mailbox register */
  630. __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4);
  631. return 0;
  632. }
  633. struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
  634. struct bnxt_qplib_rcfw *rcfw,
  635. u32 size)
  636. {
  637. struct bnxt_qplib_rcfw_sbuf *sbuf;
  638. sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC);
  639. if (!sbuf)
  640. return NULL;
  641. sbuf->size = size;
  642. sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size,
  643. &sbuf->dma_addr, GFP_ATOMIC);
  644. if (!sbuf->sb)
  645. goto bail;
  646. return sbuf;
  647. bail:
  648. kfree(sbuf);
  649. return NULL;
  650. }
  651. void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
  652. struct bnxt_qplib_rcfw_sbuf *sbuf)
  653. {
  654. if (sbuf->sb)
  655. dma_free_coherent(&rcfw->pdev->dev, sbuf->size,
  656. sbuf->sb, sbuf->dma_addr);
  657. kfree(sbuf);
  658. }