qplib_sp.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731
  1. /*
  2. * Broadcom NetXtreme-E RoCE driver.
  3. *
  4. * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
  5. * Broadcom refers to Broadcom Limited and/or its subsidiaries.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or without
  14. * modification, are permitted provided that the following conditions
  15. * are met:
  16. *
  17. * 1. Redistributions of source code must retain the above copyright
  18. * notice, this list of conditions and the following disclaimer.
  19. * 2. Redistributions in binary form must reproduce the above copyright
  20. * notice, this list of conditions and the following disclaimer in
  21. * the documentation and/or other materials provided with the
  22. * distribution.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  26. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
  28. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  32. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  33. * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  34. * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35. *
  36. * Description: Slow Path Operators
  37. */
  38. #include <linux/interrupt.h>
  39. #include <linux/spinlock.h>
  40. #include <linux/sched.h>
  41. #include <linux/pci.h>
  42. #include "roce_hsi.h"
  43. #include "qplib_res.h"
  44. #include "qplib_rcfw.h"
  45. #include "qplib_sp.h"
  46. const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
  47. 0, 0, 0, 0, 0, 0, 0, 0 } };
  48. /* Device */
  49. static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
  50. {
  51. int rc;
  52. u16 pcie_ctl2;
  53. rc = pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2,
  54. &pcie_ctl2);
  55. if (rc)
  56. return false;
  57. return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
  58. }
  59. int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
  60. struct bnxt_qplib_dev_attr *attr)
  61. {
  62. struct cmdq_query_func req;
  63. struct creq_query_func_resp resp;
  64. struct bnxt_qplib_rcfw_sbuf *sbuf;
  65. struct creq_query_func_resp_sb *sb;
  66. u16 cmd_flags = 0;
  67. u32 temp;
  68. u8 *tqm_alloc;
  69. int i, rc = 0;
  70. RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags);
  71. sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
  72. if (!sbuf) {
  73. dev_err(&rcfw->pdev->dev,
  74. "QPLIB: SP: QUERY_FUNC alloc side buffer failed");
  75. return -ENOMEM;
  76. }
  77. sb = sbuf->sb;
  78. req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
  79. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
  80. (void *)sbuf, 0);
  81. if (rc)
  82. goto bail;
  83. /* Extract the context from the side buffer */
  84. attr->max_qp = le32_to_cpu(sb->max_qp);
  85. /* max_qp value reported by FW for PF doesn't include the QP1 for PF */
  86. attr->max_qp += 1;
  87. attr->max_qp_rd_atom =
  88. sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
  89. BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
  90. attr->max_qp_init_rd_atom =
  91. sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
  92. BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
  93. attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
  94. /*
  95. * 128 WQEs needs to be reserved for the HW (8916). Prevent
  96. * reporting the max number
  97. */
  98. attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS;
  99. attr->max_qp_sges = sb->max_sge;
  100. attr->max_cq = le32_to_cpu(sb->max_cq);
  101. attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
  102. attr->max_cq_sges = attr->max_qp_sges;
  103. attr->max_mr = le32_to_cpu(sb->max_mr);
  104. attr->max_mw = le32_to_cpu(sb->max_mw);
  105. attr->max_mr_size = le64_to_cpu(sb->max_mr_size);
  106. attr->max_pd = 64 * 1024;
  107. attr->max_raw_ethy_qp = le32_to_cpu(sb->max_raw_eth_qp);
  108. attr->max_ah = le32_to_cpu(sb->max_ah);
  109. attr->max_fmr = le32_to_cpu(sb->max_fmr);
  110. attr->max_map_per_fmr = sb->max_map_per_fmr;
  111. attr->max_srq = le16_to_cpu(sb->max_srq);
  112. attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1;
  113. attr->max_srq_sges = sb->max_srq_sge;
  114. /* Bono only reports 1 PKEY for now, but it can support > 1 */
  115. attr->max_pkey = le32_to_cpu(sb->max_pkeys);
  116. attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
  117. attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE;
  118. attr->max_sgid = le32_to_cpu(sb->max_gid);
  119. strlcpy(attr->fw_ver, "20.6.28.0", sizeof(attr->fw_ver));
  120. for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) {
  121. temp = le32_to_cpu(sb->tqm_alloc_reqs[i]);
  122. tqm_alloc = (u8 *)&temp;
  123. attr->tqm_alloc_reqs[i * 4] = *tqm_alloc;
  124. attr->tqm_alloc_reqs[i * 4 + 1] = *(++tqm_alloc);
  125. attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc);
  126. attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
  127. }
  128. attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw);
  129. bail:
  130. bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
  131. return rc;
  132. }
  133. /* SGID */
  134. int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
  135. struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
  136. struct bnxt_qplib_gid *gid)
  137. {
  138. if (index > sgid_tbl->max) {
  139. dev_err(&res->pdev->dev,
  140. "QPLIB: Index %d exceeded SGID table max (%d)",
  141. index, sgid_tbl->max);
  142. return -EINVAL;
  143. }
  144. memcpy(gid, &sgid_tbl->tbl[index], sizeof(*gid));
  145. return 0;
  146. }
  147. int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
  148. struct bnxt_qplib_gid *gid, bool update)
  149. {
  150. struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
  151. struct bnxt_qplib_res,
  152. sgid_tbl);
  153. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  154. int index;
  155. if (!sgid_tbl) {
  156. dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated");
  157. return -EINVAL;
  158. }
  159. /* Do we need a sgid_lock here? */
  160. if (!sgid_tbl->active) {
  161. dev_err(&res->pdev->dev,
  162. "QPLIB: SGID table has no active entries");
  163. return -ENOMEM;
  164. }
  165. for (index = 0; index < sgid_tbl->max; index++) {
  166. if (!memcmp(&sgid_tbl->tbl[index], gid, sizeof(*gid)))
  167. break;
  168. }
  169. if (index == sgid_tbl->max) {
  170. dev_warn(&res->pdev->dev, "GID not found in the SGID table");
  171. return 0;
  172. }
  173. /* Remove GID from the SGID table */
  174. if (update) {
  175. struct cmdq_delete_gid req;
  176. struct creq_delete_gid_resp resp;
  177. u16 cmd_flags = 0;
  178. int rc;
  179. RCFW_CMD_PREP(req, DELETE_GID, cmd_flags);
  180. if (sgid_tbl->hw_id[index] == 0xFFFF) {
  181. dev_err(&res->pdev->dev,
  182. "QPLIB: GID entry contains an invalid HW id");
  183. return -EINVAL;
  184. }
  185. req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]);
  186. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
  187. (void *)&resp, NULL, 0);
  188. if (rc)
  189. return rc;
  190. }
  191. memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
  192. sizeof(bnxt_qplib_gid_zero));
  193. sgid_tbl->vlan[index] = 0;
  194. sgid_tbl->active--;
  195. dev_dbg(&res->pdev->dev,
  196. "QPLIB: SGID deleted hw_id[0x%x] = 0x%x active = 0x%x",
  197. index, sgid_tbl->hw_id[index], sgid_tbl->active);
  198. sgid_tbl->hw_id[index] = (u16)-1;
  199. /* unlock */
  200. return 0;
  201. }
  202. int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
  203. struct bnxt_qplib_gid *gid, u8 *smac, u16 vlan_id,
  204. bool update, u32 *index)
  205. {
  206. struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
  207. struct bnxt_qplib_res,
  208. sgid_tbl);
  209. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  210. int i, free_idx;
  211. if (!sgid_tbl) {
  212. dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated");
  213. return -EINVAL;
  214. }
  215. /* Do we need a sgid_lock here? */
  216. if (sgid_tbl->active == sgid_tbl->max) {
  217. dev_err(&res->pdev->dev, "QPLIB: SGID table is full");
  218. return -ENOMEM;
  219. }
  220. free_idx = sgid_tbl->max;
  221. for (i = 0; i < sgid_tbl->max; i++) {
  222. if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid))) {
  223. dev_dbg(&res->pdev->dev,
  224. "QPLIB: SGID entry already exist in entry %d!",
  225. i);
  226. *index = i;
  227. return -EALREADY;
  228. } else if (!memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
  229. sizeof(bnxt_qplib_gid_zero)) &&
  230. free_idx == sgid_tbl->max) {
  231. free_idx = i;
  232. }
  233. }
  234. if (free_idx == sgid_tbl->max) {
  235. dev_err(&res->pdev->dev,
  236. "QPLIB: SGID table is FULL but count is not MAX??");
  237. return -ENOMEM;
  238. }
  239. if (update) {
  240. struct cmdq_add_gid req;
  241. struct creq_add_gid_resp resp;
  242. u16 cmd_flags = 0;
  243. int rc;
  244. RCFW_CMD_PREP(req, ADD_GID, cmd_flags);
  245. req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
  246. req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
  247. req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]);
  248. req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]);
  249. /*
  250. * driver should ensure that all RoCE traffic is always VLAN
  251. * tagged if RoCE traffic is running on non-zero VLAN ID or
  252. * RoCE traffic is running on non-zero Priority.
  253. */
  254. if ((vlan_id != 0xFFFF) || res->prio) {
  255. if (vlan_id != 0xFFFF)
  256. req.vlan = cpu_to_le16
  257. (vlan_id & CMDQ_ADD_GID_VLAN_VLAN_ID_MASK);
  258. req.vlan |= cpu_to_le16
  259. (CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
  260. CMDQ_ADD_GID_VLAN_VLAN_EN);
  261. }
  262. /* MAC in network format */
  263. req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
  264. req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
  265. req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
  266. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
  267. (void *)&resp, NULL, 0);
  268. if (rc)
  269. return rc;
  270. sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid);
  271. }
  272. /* Add GID to the sgid_tbl */
  273. memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
  274. sgid_tbl->active++;
  275. if (vlan_id != 0xFFFF)
  276. sgid_tbl->vlan[free_idx] = 1;
  277. dev_dbg(&res->pdev->dev,
  278. "QPLIB: SGID added hw_id[0x%x] = 0x%x active = 0x%x",
  279. free_idx, sgid_tbl->hw_id[free_idx], sgid_tbl->active);
  280. *index = free_idx;
  281. /* unlock */
  282. return 0;
  283. }
  284. int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
  285. struct bnxt_qplib_gid *gid, u16 gid_idx,
  286. u8 *smac)
  287. {
  288. struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
  289. struct bnxt_qplib_res,
  290. sgid_tbl);
  291. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  292. struct creq_modify_gid_resp resp;
  293. struct cmdq_modify_gid req;
  294. int rc;
  295. u16 cmd_flags = 0;
  296. RCFW_CMD_PREP(req, MODIFY_GID, cmd_flags);
  297. req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
  298. req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
  299. req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]);
  300. req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]);
  301. if (res->prio) {
  302. req.vlan |= cpu_to_le16
  303. (CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
  304. CMDQ_ADD_GID_VLAN_VLAN_EN);
  305. }
  306. /* MAC in network format */
  307. req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
  308. req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
  309. req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
  310. req.gid_index = cpu_to_le16(gid_idx);
  311. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
  312. (void *)&resp, NULL, 0);
  313. return rc;
  314. }
  315. /* pkeys */
  316. int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
  317. struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index,
  318. u16 *pkey)
  319. {
  320. if (index == 0xFFFF) {
  321. *pkey = 0xFFFF;
  322. return 0;
  323. }
  324. if (index > pkey_tbl->max) {
  325. dev_err(&res->pdev->dev,
  326. "QPLIB: Index %d exceeded PKEY table max (%d)",
  327. index, pkey_tbl->max);
  328. return -EINVAL;
  329. }
  330. memcpy(pkey, &pkey_tbl->tbl[index], sizeof(*pkey));
  331. return 0;
  332. }
  333. int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res,
  334. struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
  335. bool update)
  336. {
  337. int i, rc = 0;
  338. if (!pkey_tbl) {
  339. dev_err(&res->pdev->dev, "QPLIB: PKEY table not allocated");
  340. return -EINVAL;
  341. }
  342. /* Do we need a pkey_lock here? */
  343. if (!pkey_tbl->active) {
  344. dev_err(&res->pdev->dev,
  345. "QPLIB: PKEY table has no active entries");
  346. return -ENOMEM;
  347. }
  348. for (i = 0; i < pkey_tbl->max; i++) {
  349. if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey)))
  350. break;
  351. }
  352. if (i == pkey_tbl->max) {
  353. dev_err(&res->pdev->dev,
  354. "QPLIB: PKEY 0x%04x not found in the pkey table",
  355. *pkey);
  356. return -ENOMEM;
  357. }
  358. memset(&pkey_tbl->tbl[i], 0, sizeof(*pkey));
  359. pkey_tbl->active--;
  360. /* unlock */
  361. return rc;
  362. }
  363. int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
  364. struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
  365. bool update)
  366. {
  367. int i, free_idx, rc = 0;
  368. if (!pkey_tbl) {
  369. dev_err(&res->pdev->dev, "QPLIB: PKEY table not allocated");
  370. return -EINVAL;
  371. }
  372. /* Do we need a pkey_lock here? */
  373. if (pkey_tbl->active == pkey_tbl->max) {
  374. dev_err(&res->pdev->dev, "QPLIB: PKEY table is full");
  375. return -ENOMEM;
  376. }
  377. free_idx = pkey_tbl->max;
  378. for (i = 0; i < pkey_tbl->max; i++) {
  379. if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey)))
  380. return -EALREADY;
  381. else if (!pkey_tbl->tbl[i] && free_idx == pkey_tbl->max)
  382. free_idx = i;
  383. }
  384. if (free_idx == pkey_tbl->max) {
  385. dev_err(&res->pdev->dev,
  386. "QPLIB: PKEY table is FULL but count is not MAX??");
  387. return -ENOMEM;
  388. }
  389. /* Add PKEY to the pkey_tbl */
  390. memcpy(&pkey_tbl->tbl[free_idx], pkey, sizeof(*pkey));
  391. pkey_tbl->active++;
  392. /* unlock */
  393. return rc;
  394. }
  395. /* AH */
  396. int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
  397. {
  398. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  399. struct cmdq_create_ah req;
  400. struct creq_create_ah_resp resp;
  401. u16 cmd_flags = 0;
  402. u32 temp32[4];
  403. u16 temp16[3];
  404. int rc;
  405. RCFW_CMD_PREP(req, CREATE_AH, cmd_flags);
  406. memcpy(temp32, ah->dgid.data, sizeof(struct bnxt_qplib_gid));
  407. req.dgid[0] = cpu_to_le32(temp32[0]);
  408. req.dgid[1] = cpu_to_le32(temp32[1]);
  409. req.dgid[2] = cpu_to_le32(temp32[2]);
  410. req.dgid[3] = cpu_to_le32(temp32[3]);
  411. req.type = ah->nw_type;
  412. req.hop_limit = ah->hop_limit;
  413. req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id[ah->sgid_index]);
  414. req.dest_vlan_id_flow_label = cpu_to_le32((ah->flow_label &
  415. CMDQ_CREATE_AH_FLOW_LABEL_MASK) |
  416. CMDQ_CREATE_AH_DEST_VLAN_ID_MASK);
  417. req.pd_id = cpu_to_le32(ah->pd->id);
  418. req.traffic_class = ah->traffic_class;
  419. /* MAC in network format */
  420. memcpy(temp16, ah->dmac, 6);
  421. req.dest_mac[0] = cpu_to_le16(temp16[0]);
  422. req.dest_mac[1] = cpu_to_le16(temp16[1]);
  423. req.dest_mac[2] = cpu_to_le16(temp16[2]);
  424. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
  425. NULL, 1);
  426. if (rc)
  427. return rc;
  428. ah->id = le32_to_cpu(resp.xid);
  429. return 0;
  430. }
  431. int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
  432. {
  433. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  434. struct cmdq_destroy_ah req;
  435. struct creq_destroy_ah_resp resp;
  436. u16 cmd_flags = 0;
  437. int rc;
  438. /* Clean up the AH table in the device */
  439. RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags);
  440. req.ah_cid = cpu_to_le32(ah->id);
  441. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
  442. NULL, 1);
  443. if (rc)
  444. return rc;
  445. return 0;
  446. }
  447. /* MRW */
  448. int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
  449. {
  450. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  451. struct cmdq_deallocate_key req;
  452. struct creq_deallocate_key_resp resp;
  453. u16 cmd_flags = 0;
  454. int rc;
  455. if (mrw->lkey == 0xFFFFFFFF) {
  456. dev_info(&res->pdev->dev,
  457. "QPLIB: SP: Free a reserved lkey MRW");
  458. return 0;
  459. }
  460. RCFW_CMD_PREP(req, DEALLOCATE_KEY, cmd_flags);
  461. req.mrw_flags = mrw->type;
  462. if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) ||
  463. (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
  464. (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
  465. req.key = cpu_to_le32(mrw->rkey);
  466. else
  467. req.key = cpu_to_le32(mrw->lkey);
  468. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
  469. NULL, 0);
  470. if (rc)
  471. return rc;
  472. /* Free the qplib's MRW memory */
  473. if (mrw->hwq.max_elements)
  474. bnxt_qplib_free_hwq(res->pdev, &mrw->hwq);
  475. return 0;
  476. }
  477. int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
  478. {
  479. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  480. struct cmdq_allocate_mrw req;
  481. struct creq_allocate_mrw_resp resp;
  482. u16 cmd_flags = 0;
  483. unsigned long tmp;
  484. int rc;
  485. RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags);
  486. req.pd_id = cpu_to_le32(mrw->pd->id);
  487. req.mrw_flags = mrw->type;
  488. if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR &&
  489. mrw->flags & BNXT_QPLIB_FR_PMR) ||
  490. mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A ||
  491. mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)
  492. req.access = CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY;
  493. tmp = (unsigned long)mrw;
  494. req.mrw_handle = cpu_to_le64(tmp);
  495. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
  496. (void *)&resp, NULL, 0);
  497. if (rc)
  498. return rc;
  499. if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) ||
  500. (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
  501. (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
  502. mrw->rkey = le32_to_cpu(resp.xid);
  503. else
  504. mrw->lkey = le32_to_cpu(resp.xid);
  505. return 0;
  506. }
  507. int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
  508. bool block)
  509. {
  510. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  511. struct cmdq_deregister_mr req;
  512. struct creq_deregister_mr_resp resp;
  513. u16 cmd_flags = 0;
  514. int rc;
  515. RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags);
  516. req.lkey = cpu_to_le32(mrw->lkey);
  517. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
  518. (void *)&resp, NULL, block);
  519. if (rc)
  520. return rc;
  521. /* Free the qplib's MR memory */
  522. if (mrw->hwq.max_elements) {
  523. mrw->va = 0;
  524. mrw->total_size = 0;
  525. bnxt_qplib_free_hwq(res->pdev, &mrw->hwq);
  526. }
  527. return 0;
  528. }
  529. int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
  530. u64 *pbl_tbl, int num_pbls, bool block)
  531. {
  532. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  533. struct cmdq_register_mr req;
  534. struct creq_register_mr_resp resp;
  535. u16 cmd_flags = 0, level;
  536. int pg_ptrs, pages, i, rc;
  537. dma_addr_t **pbl_ptr;
  538. u32 pg_size;
  539. if (num_pbls) {
  540. pg_ptrs = roundup_pow_of_two(num_pbls);
  541. pages = pg_ptrs >> MAX_PBL_LVL_1_PGS_SHIFT;
  542. if (!pages)
  543. pages++;
  544. if (pages > MAX_PBL_LVL_1_PGS) {
  545. dev_err(&res->pdev->dev, "QPLIB: SP: Reg MR pages ");
  546. dev_err(&res->pdev->dev,
  547. "requested (0x%x) exceeded max (0x%x)",
  548. pages, MAX_PBL_LVL_1_PGS);
  549. return -ENOMEM;
  550. }
  551. /* Free the hwq if it already exist, must be a rereg */
  552. if (mr->hwq.max_elements)
  553. bnxt_qplib_free_hwq(res->pdev, &mr->hwq);
  554. mr->hwq.max_elements = pages;
  555. rc = bnxt_qplib_alloc_init_hwq(res->pdev, &mr->hwq, NULL, 0,
  556. &mr->hwq.max_elements,
  557. PAGE_SIZE, 0, PAGE_SIZE,
  558. HWQ_TYPE_CTX);
  559. if (rc) {
  560. dev_err(&res->pdev->dev,
  561. "SP: Reg MR memory allocation failed");
  562. return -ENOMEM;
  563. }
  564. /* Write to the hwq */
  565. pbl_ptr = (dma_addr_t **)mr->hwq.pbl_ptr;
  566. for (i = 0; i < num_pbls; i++)
  567. pbl_ptr[PTR_PG(i)][PTR_IDX(i)] =
  568. (pbl_tbl[i] & PAGE_MASK) | PTU_PTE_VALID;
  569. }
  570. RCFW_CMD_PREP(req, REGISTER_MR, cmd_flags);
  571. /* Configure the request */
  572. if (mr->hwq.level == PBL_LVL_MAX) {
  573. level = 0;
  574. req.pbl = 0;
  575. pg_size = PAGE_SIZE;
  576. } else {
  577. level = mr->hwq.level + 1;
  578. req.pbl = cpu_to_le64(mr->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
  579. pg_size = mr->hwq.pbl[PBL_LVL_0].pg_size;
  580. }
  581. req.log2_pg_size_lvl = (level << CMDQ_REGISTER_MR_LVL_SFT) |
  582. ((ilog2(pg_size) <<
  583. CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT) &
  584. CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK);
  585. req.access = (mr->flags & 0xFFFF);
  586. req.va = cpu_to_le64(mr->va);
  587. req.key = cpu_to_le32(mr->lkey);
  588. req.mr_size = cpu_to_le64(mr->total_size);
  589. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
  590. (void *)&resp, NULL, block);
  591. if (rc)
  592. goto fail;
  593. return 0;
  594. fail:
  595. if (mr->hwq.max_elements)
  596. bnxt_qplib_free_hwq(res->pdev, &mr->hwq);
  597. return rc;
  598. }
  599. int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
  600. struct bnxt_qplib_frpl *frpl,
  601. int max_pg_ptrs)
  602. {
  603. int pg_ptrs, pages, rc;
  604. /* Re-calculate the max to fit the HWQ allocation model */
  605. pg_ptrs = roundup_pow_of_two(max_pg_ptrs);
  606. pages = pg_ptrs >> MAX_PBL_LVL_1_PGS_SHIFT;
  607. if (!pages)
  608. pages++;
  609. if (pages > MAX_PBL_LVL_1_PGS)
  610. return -ENOMEM;
  611. frpl->hwq.max_elements = pages;
  612. rc = bnxt_qplib_alloc_init_hwq(res->pdev, &frpl->hwq, NULL, 0,
  613. &frpl->hwq.max_elements, PAGE_SIZE, 0,
  614. PAGE_SIZE, HWQ_TYPE_CTX);
  615. if (!rc)
  616. frpl->max_pg_ptrs = pg_ptrs;
  617. return rc;
  618. }
  619. int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
  620. struct bnxt_qplib_frpl *frpl)
  621. {
  622. bnxt_qplib_free_hwq(res->pdev, &frpl->hwq);
  623. return 0;
  624. }
  625. int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
  626. {
  627. struct bnxt_qplib_rcfw *rcfw = res->rcfw;
  628. struct cmdq_map_tc_to_cos req;
  629. struct creq_map_tc_to_cos_resp resp;
  630. u16 cmd_flags = 0;
  631. RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags);
  632. req.cos0 = cpu_to_le16(cids[0]);
  633. req.cos1 = cpu_to_le16(cids[1]);
  634. bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL,
  635. 0);
  636. return 0;
  637. }