qplib_res.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825
  1. /*
  2. * Broadcom NetXtreme-E RoCE driver.
  3. *
  4. * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
  5. * Broadcom refers to Broadcom Limited and/or its subsidiaries.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or without
  14. * modification, are permitted provided that the following conditions
  15. * are met:
  16. *
  17. * 1. Redistributions of source code must retain the above copyright
  18. * notice, this list of conditions and the following disclaimer.
  19. * 2. Redistributions in binary form must reproduce the above copyright
  20. * notice, this list of conditions and the following disclaimer in
  21. * the documentation and/or other materials provided with the
  22. * distribution.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  26. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
  28. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  32. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  33. * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  34. * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35. *
  36. * Description: QPLib resource manager
  37. */
  38. #include <linux/spinlock.h>
  39. #include <linux/pci.h>
  40. #include <linux/interrupt.h>
  41. #include <linux/inetdevice.h>
  42. #include <linux/dma-mapping.h>
  43. #include <linux/if_vlan.h>
  44. #include "roce_hsi.h"
  45. #include "qplib_res.h"
  46. #include "qplib_sp.h"
  47. #include "qplib_rcfw.h"
  48. static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
  49. struct bnxt_qplib_stats *stats);
  50. static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
  51. struct bnxt_qplib_stats *stats);
  52. /* PBL */
  53. static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
  54. bool is_umem)
  55. {
  56. int i;
  57. if (!is_umem) {
  58. for (i = 0; i < pbl->pg_count; i++) {
  59. if (pbl->pg_arr[i])
  60. dma_free_coherent(&pdev->dev, pbl->pg_size,
  61. (void *)((unsigned long)
  62. pbl->pg_arr[i] &
  63. PAGE_MASK),
  64. pbl->pg_map_arr[i]);
  65. else
  66. dev_warn(&pdev->dev,
  67. "QPLIB: PBL free pg_arr[%d] empty?!",
  68. i);
  69. pbl->pg_arr[i] = NULL;
  70. }
  71. }
  72. kfree(pbl->pg_arr);
  73. pbl->pg_arr = NULL;
  74. kfree(pbl->pg_map_arr);
  75. pbl->pg_map_arr = NULL;
  76. pbl->pg_count = 0;
  77. pbl->pg_size = 0;
  78. }
  79. static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
  80. struct scatterlist *sghead, u32 pages, u32 pg_size)
  81. {
  82. struct scatterlist *sg;
  83. bool is_umem = false;
  84. int i;
  85. /* page ptr arrays */
  86. pbl->pg_arr = kcalloc(pages, sizeof(void *), GFP_KERNEL);
  87. if (!pbl->pg_arr)
  88. return -ENOMEM;
  89. pbl->pg_map_arr = kcalloc(pages, sizeof(dma_addr_t), GFP_KERNEL);
  90. if (!pbl->pg_map_arr) {
  91. kfree(pbl->pg_arr);
  92. pbl->pg_arr = NULL;
  93. return -ENOMEM;
  94. }
  95. pbl->pg_count = 0;
  96. pbl->pg_size = pg_size;
  97. if (!sghead) {
  98. for (i = 0; i < pages; i++) {
  99. pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
  100. pbl->pg_size,
  101. &pbl->pg_map_arr[i],
  102. GFP_KERNEL);
  103. if (!pbl->pg_arr[i])
  104. goto fail;
  105. memset(pbl->pg_arr[i], 0, pbl->pg_size);
  106. pbl->pg_count++;
  107. }
  108. } else {
  109. i = 0;
  110. is_umem = true;
  111. for_each_sg(sghead, sg, pages, i) {
  112. pbl->pg_map_arr[i] = sg_dma_address(sg);
  113. pbl->pg_arr[i] = sg_virt(sg);
  114. if (!pbl->pg_arr[i])
  115. goto fail;
  116. pbl->pg_count++;
  117. }
  118. }
  119. return 0;
  120. fail:
  121. __free_pbl(pdev, pbl, is_umem);
  122. return -ENOMEM;
  123. }
  124. /* HWQ */
  125. void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq)
  126. {
  127. int i;
  128. if (!hwq->max_elements)
  129. return;
  130. if (hwq->level >= PBL_LVL_MAX)
  131. return;
  132. for (i = 0; i < hwq->level + 1; i++) {
  133. if (i == hwq->level)
  134. __free_pbl(pdev, &hwq->pbl[i], hwq->is_user);
  135. else
  136. __free_pbl(pdev, &hwq->pbl[i], false);
  137. }
  138. hwq->level = PBL_LVL_MAX;
  139. hwq->max_elements = 0;
  140. hwq->element_size = 0;
  141. hwq->prod = 0;
  142. hwq->cons = 0;
  143. hwq->cp_bit = 0;
  144. }
  145. /* All HWQs are power of 2 in size */
  146. int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
  147. struct scatterlist *sghead, int nmap,
  148. u32 *elements, u32 element_size, u32 aux,
  149. u32 pg_size, enum bnxt_qplib_hwq_type hwq_type)
  150. {
  151. u32 pages, slots, size, aux_pages = 0, aux_size = 0;
  152. dma_addr_t *src_phys_ptr, **dst_virt_ptr;
  153. int i, rc;
  154. hwq->level = PBL_LVL_MAX;
  155. slots = roundup_pow_of_two(*elements);
  156. if (aux) {
  157. aux_size = roundup_pow_of_two(aux);
  158. aux_pages = (slots * aux_size) / pg_size;
  159. if ((slots * aux_size) % pg_size)
  160. aux_pages++;
  161. }
  162. size = roundup_pow_of_two(element_size);
  163. if (!sghead) {
  164. hwq->is_user = false;
  165. pages = (slots * size) / pg_size + aux_pages;
  166. if ((slots * size) % pg_size)
  167. pages++;
  168. if (!pages)
  169. return -EINVAL;
  170. } else {
  171. hwq->is_user = true;
  172. pages = nmap;
  173. }
  174. /* Alloc the 1st memory block; can be a PDL/PTL/PBL */
  175. if (sghead && (pages == MAX_PBL_LVL_0_PGS))
  176. rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], sghead,
  177. pages, pg_size);
  178. else
  179. rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL, 1, pg_size);
  180. if (rc)
  181. goto fail;
  182. hwq->level = PBL_LVL_0;
  183. if (pages > MAX_PBL_LVL_0_PGS) {
  184. if (pages > MAX_PBL_LVL_1_PGS) {
  185. /* 2 levels of indirection */
  186. rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], NULL,
  187. MAX_PBL_LVL_1_PGS_FOR_LVL_2, pg_size);
  188. if (rc)
  189. goto fail;
  190. /* Fill in lvl0 PBL */
  191. dst_virt_ptr =
  192. (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
  193. src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
  194. for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
  195. dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
  196. src_phys_ptr[i] | PTU_PDE_VALID;
  197. hwq->level = PBL_LVL_1;
  198. rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_2], sghead,
  199. pages, pg_size);
  200. if (rc)
  201. goto fail;
  202. /* Fill in lvl1 PBL */
  203. dst_virt_ptr =
  204. (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
  205. src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
  206. for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
  207. dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
  208. src_phys_ptr[i] | PTU_PTE_VALID;
  209. }
  210. if (hwq_type == HWQ_TYPE_QUEUE) {
  211. /* Find the last pg of the size */
  212. i = hwq->pbl[PBL_LVL_2].pg_count;
  213. dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
  214. PTU_PTE_LAST;
  215. if (i > 1)
  216. dst_virt_ptr[PTR_PG(i - 2)]
  217. [PTR_IDX(i - 2)] |=
  218. PTU_PTE_NEXT_TO_LAST;
  219. }
  220. hwq->level = PBL_LVL_2;
  221. } else {
  222. u32 flag = hwq_type == HWQ_TYPE_L2_CMPL ? 0 :
  223. PTU_PTE_VALID;
  224. /* 1 level of indirection */
  225. rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], sghead,
  226. pages, pg_size);
  227. if (rc)
  228. goto fail;
  229. /* Fill in lvl0 PBL */
  230. dst_virt_ptr =
  231. (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
  232. src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
  233. for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) {
  234. dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
  235. src_phys_ptr[i] | flag;
  236. }
  237. if (hwq_type == HWQ_TYPE_QUEUE) {
  238. /* Find the last pg of the size */
  239. i = hwq->pbl[PBL_LVL_1].pg_count;
  240. dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
  241. PTU_PTE_LAST;
  242. if (i > 1)
  243. dst_virt_ptr[PTR_PG(i - 2)]
  244. [PTR_IDX(i - 2)] |=
  245. PTU_PTE_NEXT_TO_LAST;
  246. }
  247. hwq->level = PBL_LVL_1;
  248. }
  249. }
  250. hwq->pdev = pdev;
  251. spin_lock_init(&hwq->lock);
  252. hwq->prod = 0;
  253. hwq->cons = 0;
  254. *elements = hwq->max_elements = slots;
  255. hwq->element_size = size;
  256. /* For direct access to the elements */
  257. hwq->pbl_ptr = hwq->pbl[hwq->level].pg_arr;
  258. hwq->pbl_dma_ptr = hwq->pbl[hwq->level].pg_map_arr;
  259. return 0;
  260. fail:
  261. bnxt_qplib_free_hwq(pdev, hwq);
  262. return -ENOMEM;
  263. }
  264. /* Context Tables */
  265. void bnxt_qplib_free_ctx(struct pci_dev *pdev,
  266. struct bnxt_qplib_ctx *ctx)
  267. {
  268. int i;
  269. bnxt_qplib_free_hwq(pdev, &ctx->qpc_tbl);
  270. bnxt_qplib_free_hwq(pdev, &ctx->mrw_tbl);
  271. bnxt_qplib_free_hwq(pdev, &ctx->srqc_tbl);
  272. bnxt_qplib_free_hwq(pdev, &ctx->cq_tbl);
  273. bnxt_qplib_free_hwq(pdev, &ctx->tim_tbl);
  274. for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
  275. bnxt_qplib_free_hwq(pdev, &ctx->tqm_tbl[i]);
  276. bnxt_qplib_free_hwq(pdev, &ctx->tqm_pde);
  277. bnxt_qplib_free_stats_ctx(pdev, &ctx->stats);
  278. }
  279. /*
  280. * Routine: bnxt_qplib_alloc_ctx
  281. * Description:
  282. * Context tables are memories which are used by the chip fw.
  283. * The 6 tables defined are:
  284. * QPC ctx - holds QP states
  285. * MRW ctx - holds memory region and window
  286. * SRQ ctx - holds shared RQ states
  287. * CQ ctx - holds completion queue states
  288. * TQM ctx - holds Tx Queue Manager context
  289. * TIM ctx - holds timer context
  290. * Depending on the size of the tbl requested, either a 1 Page Buffer List
  291. * or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
  292. * instead.
  293. * Table might be employed as follows:
  294. * For 0 < ctx size <= 1 PAGE, 0 level of ind is used
  295. * For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
  296. * For 512 < ctx size <= MAX, 2 levels of ind is used
  297. * Returns:
  298. * 0 if success, else -ERRORS
  299. */
  300. int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
  301. struct bnxt_qplib_ctx *ctx,
  302. bool virt_fn)
  303. {
  304. int i, j, k, rc = 0;
  305. int fnz_idx = -1;
  306. __le64 **pbl_ptr;
  307. if (virt_fn)
  308. goto stats_alloc;
  309. /* QPC Tables */
  310. ctx->qpc_tbl.max_elements = ctx->qpc_count;
  311. rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL, 0,
  312. &ctx->qpc_tbl.max_elements,
  313. BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE, 0,
  314. PAGE_SIZE, HWQ_TYPE_CTX);
  315. if (rc)
  316. goto fail;
  317. /* MRW Tables */
  318. ctx->mrw_tbl.max_elements = ctx->mrw_count;
  319. rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL, 0,
  320. &ctx->mrw_tbl.max_elements,
  321. BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE, 0,
  322. PAGE_SIZE, HWQ_TYPE_CTX);
  323. if (rc)
  324. goto fail;
  325. /* SRQ Tables */
  326. ctx->srqc_tbl.max_elements = ctx->srqc_count;
  327. rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL, 0,
  328. &ctx->srqc_tbl.max_elements,
  329. BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE, 0,
  330. PAGE_SIZE, HWQ_TYPE_CTX);
  331. if (rc)
  332. goto fail;
  333. /* CQ Tables */
  334. ctx->cq_tbl.max_elements = ctx->cq_count;
  335. rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL, 0,
  336. &ctx->cq_tbl.max_elements,
  337. BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE, 0,
  338. PAGE_SIZE, HWQ_TYPE_CTX);
  339. if (rc)
  340. goto fail;
  341. /* TQM Buffer */
  342. ctx->tqm_pde.max_elements = 512;
  343. rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL, 0,
  344. &ctx->tqm_pde.max_elements, sizeof(u64),
  345. 0, PAGE_SIZE, HWQ_TYPE_CTX);
  346. if (rc)
  347. goto fail;
  348. for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
  349. if (!ctx->tqm_count[i])
  350. continue;
  351. ctx->tqm_tbl[i].max_elements = ctx->qpc_count *
  352. ctx->tqm_count[i];
  353. rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL, 0,
  354. &ctx->tqm_tbl[i].max_elements, 1,
  355. 0, PAGE_SIZE, HWQ_TYPE_CTX);
  356. if (rc)
  357. goto fail;
  358. }
  359. pbl_ptr = (__le64 **)ctx->tqm_pde.pbl_ptr;
  360. for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
  361. i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
  362. if (!ctx->tqm_tbl[i].max_elements)
  363. continue;
  364. if (fnz_idx == -1)
  365. fnz_idx = i;
  366. switch (ctx->tqm_tbl[i].level) {
  367. case PBL_LVL_2:
  368. for (k = 0; k < ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_count;
  369. k++)
  370. pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)] =
  371. cpu_to_le64(
  372. ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_map_arr[k]
  373. | PTU_PTE_VALID);
  374. break;
  375. case PBL_LVL_1:
  376. case PBL_LVL_0:
  377. default:
  378. pbl_ptr[PTR_PG(j)][PTR_IDX(j)] = cpu_to_le64(
  379. ctx->tqm_tbl[i].pbl[PBL_LVL_0].pg_map_arr[0] |
  380. PTU_PTE_VALID);
  381. break;
  382. }
  383. }
  384. if (fnz_idx == -1)
  385. fnz_idx = 0;
  386. ctx->tqm_pde_level = ctx->tqm_tbl[fnz_idx].level == PBL_LVL_2 ?
  387. PBL_LVL_2 : ctx->tqm_tbl[fnz_idx].level + 1;
  388. /* TIM Buffer */
  389. ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
  390. rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL, 0,
  391. &ctx->tim_tbl.max_elements, 1,
  392. 0, PAGE_SIZE, HWQ_TYPE_CTX);
  393. if (rc)
  394. goto fail;
  395. stats_alloc:
  396. /* Stats */
  397. rc = bnxt_qplib_alloc_stats_ctx(pdev, &ctx->stats);
  398. if (rc)
  399. goto fail;
  400. return 0;
  401. fail:
  402. bnxt_qplib_free_ctx(pdev, ctx);
  403. return rc;
  404. }
  405. /* GUID */
  406. void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid)
  407. {
  408. u8 mac[ETH_ALEN];
  409. /* MAC-48 to EUI-64 mapping */
  410. memcpy(mac, dev_addr, ETH_ALEN);
  411. guid[0] = mac[0] ^ 2;
  412. guid[1] = mac[1];
  413. guid[2] = mac[2];
  414. guid[3] = 0xff;
  415. guid[4] = 0xfe;
  416. guid[5] = mac[3];
  417. guid[6] = mac[4];
  418. guid[7] = mac[5];
  419. }
  420. static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
  421. struct bnxt_qplib_sgid_tbl *sgid_tbl)
  422. {
  423. kfree(sgid_tbl->tbl);
  424. kfree(sgid_tbl->hw_id);
  425. kfree(sgid_tbl->ctx);
  426. sgid_tbl->tbl = NULL;
  427. sgid_tbl->hw_id = NULL;
  428. sgid_tbl->ctx = NULL;
  429. sgid_tbl->max = 0;
  430. sgid_tbl->active = 0;
  431. }
  432. static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
  433. struct bnxt_qplib_sgid_tbl *sgid_tbl,
  434. u16 max)
  435. {
  436. sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL);
  437. if (!sgid_tbl->tbl)
  438. return -ENOMEM;
  439. sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
  440. if (!sgid_tbl->hw_id)
  441. goto out_free1;
  442. sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
  443. if (!sgid_tbl->ctx)
  444. goto out_free2;
  445. sgid_tbl->max = max;
  446. return 0;
  447. out_free2:
  448. kfree(sgid_tbl->hw_id);
  449. sgid_tbl->hw_id = NULL;
  450. out_free1:
  451. kfree(sgid_tbl->tbl);
  452. sgid_tbl->tbl = NULL;
  453. return -ENOMEM;
  454. };
  455. static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
  456. struct bnxt_qplib_sgid_tbl *sgid_tbl)
  457. {
  458. int i;
  459. for (i = 0; i < sgid_tbl->max; i++) {
  460. if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
  461. sizeof(bnxt_qplib_gid_zero)))
  462. bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true);
  463. }
  464. memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
  465. memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
  466. sgid_tbl->active = 0;
  467. }
  468. static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
  469. struct net_device *netdev)
  470. {
  471. memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
  472. memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
  473. }
  474. static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
  475. struct bnxt_qplib_pkey_tbl *pkey_tbl)
  476. {
  477. if (!pkey_tbl->tbl)
  478. dev_dbg(&res->pdev->dev, "QPLIB: PKEY tbl not present");
  479. else
  480. kfree(pkey_tbl->tbl);
  481. pkey_tbl->tbl = NULL;
  482. pkey_tbl->max = 0;
  483. pkey_tbl->active = 0;
  484. }
  485. static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res,
  486. struct bnxt_qplib_pkey_tbl *pkey_tbl,
  487. u16 max)
  488. {
  489. pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL);
  490. if (!pkey_tbl->tbl)
  491. return -ENOMEM;
  492. pkey_tbl->max = max;
  493. return 0;
  494. };
  495. /* PDs */
  496. int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
  497. {
  498. u32 bit_num;
  499. bit_num = find_first_bit(pdt->tbl, pdt->max);
  500. if (bit_num == pdt->max)
  501. return -ENOMEM;
  502. /* Found unused PD */
  503. clear_bit(bit_num, pdt->tbl);
  504. pd->id = bit_num;
  505. return 0;
  506. }
  507. int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
  508. struct bnxt_qplib_pd_tbl *pdt,
  509. struct bnxt_qplib_pd *pd)
  510. {
  511. if (test_and_set_bit(pd->id, pdt->tbl)) {
  512. dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d",
  513. pd->id);
  514. return -EINVAL;
  515. }
  516. pd->id = 0;
  517. return 0;
  518. }
  519. static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
  520. {
  521. kfree(pdt->tbl);
  522. pdt->tbl = NULL;
  523. pdt->max = 0;
  524. }
  525. static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
  526. struct bnxt_qplib_pd_tbl *pdt,
  527. u32 max)
  528. {
  529. u32 bytes;
  530. bytes = max >> 3;
  531. if (!bytes)
  532. bytes = 1;
  533. pdt->tbl = kmalloc(bytes, GFP_KERNEL);
  534. if (!pdt->tbl)
  535. return -ENOMEM;
  536. pdt->max = max;
  537. memset((u8 *)pdt->tbl, 0xFF, bytes);
  538. return 0;
  539. }
  540. /* DPIs */
  541. int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
  542. struct bnxt_qplib_dpi *dpi,
  543. void *app)
  544. {
  545. u32 bit_num;
  546. bit_num = find_first_bit(dpit->tbl, dpit->max);
  547. if (bit_num == dpit->max)
  548. return -ENOMEM;
  549. /* Found unused DPI */
  550. clear_bit(bit_num, dpit->tbl);
  551. dpit->app_tbl[bit_num] = app;
  552. dpi->dpi = bit_num;
  553. dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
  554. dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
  555. return 0;
  556. }
  557. int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
  558. struct bnxt_qplib_dpi_tbl *dpit,
  559. struct bnxt_qplib_dpi *dpi)
  560. {
  561. if (dpi->dpi >= dpit->max) {
  562. dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d", dpi->dpi);
  563. return -EINVAL;
  564. }
  565. if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
  566. dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d",
  567. dpi->dpi);
  568. return -EINVAL;
  569. }
  570. if (dpit->app_tbl)
  571. dpit->app_tbl[dpi->dpi] = NULL;
  572. memset(dpi, 0, sizeof(*dpi));
  573. return 0;
  574. }
  575. static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res *res,
  576. struct bnxt_qplib_dpi_tbl *dpit)
  577. {
  578. kfree(dpit->tbl);
  579. kfree(dpit->app_tbl);
  580. if (dpit->dbr_bar_reg_iomem)
  581. pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
  582. memset(dpit, 0, sizeof(*dpit));
  583. }
  584. static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
  585. struct bnxt_qplib_dpi_tbl *dpit,
  586. u32 dbr_offset)
  587. {
  588. u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
  589. resource_size_t bar_reg_base;
  590. u32 dbr_len, bytes;
  591. if (dpit->dbr_bar_reg_iomem) {
  592. dev_err(&res->pdev->dev,
  593. "QPLIB: DBR BAR region %d already mapped", dbr_bar_reg);
  594. return -EALREADY;
  595. }
  596. bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
  597. if (!bar_reg_base) {
  598. dev_err(&res->pdev->dev,
  599. "QPLIB: BAR region %d resc start failed", dbr_bar_reg);
  600. return -ENOMEM;
  601. }
  602. dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
  603. if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
  604. dev_err(&res->pdev->dev, "QPLIB: Invalid DBR length %d",
  605. dbr_len);
  606. return -ENOMEM;
  607. }
  608. dpit->dbr_bar_reg_iomem = ioremap_nocache(bar_reg_base + dbr_offset,
  609. dbr_len);
  610. if (!dpit->dbr_bar_reg_iomem) {
  611. dev_err(&res->pdev->dev,
  612. "QPLIB: FP: DBR BAR region %d mapping failed",
  613. dbr_bar_reg);
  614. return -ENOMEM;
  615. }
  616. dpit->unmapped_dbr = bar_reg_base + dbr_offset;
  617. dpit->max = dbr_len / PAGE_SIZE;
  618. dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
  619. if (!dpit->app_tbl) {
  620. pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
  621. dev_err(&res->pdev->dev,
  622. "QPLIB: DPI app tbl allocation failed");
  623. return -ENOMEM;
  624. }
  625. bytes = dpit->max >> 3;
  626. if (!bytes)
  627. bytes = 1;
  628. dpit->tbl = kmalloc(bytes, GFP_KERNEL);
  629. if (!dpit->tbl) {
  630. pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
  631. kfree(dpit->app_tbl);
  632. dpit->app_tbl = NULL;
  633. dev_err(&res->pdev->dev,
  634. "QPLIB: DPI tbl allocation failed for size = %d",
  635. bytes);
  636. return -ENOMEM;
  637. }
  638. memset((u8 *)dpit->tbl, 0xFF, bytes);
  639. return 0;
  640. }
  641. /* PKEYs */
  642. static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl)
  643. {
  644. memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
  645. pkey_tbl->active = 0;
  646. }
  647. static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res,
  648. struct bnxt_qplib_pkey_tbl *pkey_tbl)
  649. {
  650. u16 pkey = 0xFFFF;
  651. memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
  652. /* pkey default = 0xFFFF */
  653. bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false);
  654. }
  655. /* Stats */
  656. static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
  657. struct bnxt_qplib_stats *stats)
  658. {
  659. if (stats->dma) {
  660. dma_free_coherent(&pdev->dev, stats->size,
  661. stats->dma, stats->dma_map);
  662. }
  663. memset(stats, 0, sizeof(*stats));
  664. stats->fw_id = -1;
  665. }
  666. static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
  667. struct bnxt_qplib_stats *stats)
  668. {
  669. memset(stats, 0, sizeof(*stats));
  670. stats->fw_id = -1;
  671. stats->size = sizeof(struct ctx_hw_stats);
  672. stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
  673. &stats->dma_map, GFP_KERNEL);
  674. if (!stats->dma) {
  675. dev_err(&pdev->dev, "QPLIB: Stats DMA allocation failed");
  676. return -ENOMEM;
  677. }
  678. return 0;
  679. }
  680. void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
  681. {
  682. bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl);
  683. bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
  684. }
  685. int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
  686. {
  687. bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
  688. bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl);
  689. return 0;
  690. }
  691. void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
  692. {
  693. bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl);
  694. bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
  695. bnxt_qplib_free_pd_tbl(&res->pd_tbl);
  696. bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
  697. res->netdev = NULL;
  698. res->pdev = NULL;
  699. }
  700. int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
  701. struct net_device *netdev,
  702. struct bnxt_qplib_dev_attr *dev_attr)
  703. {
  704. int rc = 0;
  705. res->pdev = pdev;
  706. res->netdev = netdev;
  707. rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
  708. if (rc)
  709. goto fail;
  710. rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey);
  711. if (rc)
  712. goto fail;
  713. rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
  714. if (rc)
  715. goto fail;
  716. rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
  717. if (rc)
  718. goto fail;
  719. return 0;
  720. fail:
  721. bnxt_qplib_free_res(res);
  722. return rc;
  723. }