i40iw_pble.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618
  1. /*******************************************************************************
  2. *
  3. * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenFabrics.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. *
  33. *******************************************************************************/
  34. #include "i40iw_status.h"
  35. #include "i40iw_osdep.h"
  36. #include "i40iw_register.h"
  37. #include "i40iw_hmc.h"
  38. #include "i40iw_d.h"
  39. #include "i40iw_type.h"
  40. #include "i40iw_p.h"
  41. #include <linux/pci.h>
  42. #include <linux/genalloc.h>
  43. #include <linux/vmalloc.h>
  44. #include "i40iw_pble.h"
  45. #include "i40iw.h"
  46. struct i40iw_device;
  47. static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
  48. struct i40iw_hmc_pble_rsrc *pble_rsrc);
  49. static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk);
  50. /**
  51. * i40iw_destroy_pble_pool - destroy pool during module unload
  52. * @pble_rsrc: pble resources
  53. */
  54. void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc)
  55. {
  56. struct list_head *clist;
  57. struct list_head *tlist;
  58. struct i40iw_chunk *chunk;
  59. struct i40iw_pble_pool *pinfo = &pble_rsrc->pinfo;
  60. if (pinfo->pool) {
  61. list_for_each_safe(clist, tlist, &pinfo->clist) {
  62. chunk = list_entry(clist, struct i40iw_chunk, list);
  63. if (chunk->type == I40IW_VMALLOC)
  64. i40iw_free_vmalloc_mem(dev->hw, chunk);
  65. kfree(chunk);
  66. }
  67. gen_pool_destroy(pinfo->pool);
  68. }
  69. }
  70. /**
  71. * i40iw_hmc_init_pble - Initialize pble resources during module load
  72. * @dev: i40iw_sc_dev struct
  73. * @pble_rsrc: pble resources
  74. */
  75. enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev,
  76. struct i40iw_hmc_pble_rsrc *pble_rsrc)
  77. {
  78. struct i40iw_hmc_info *hmc_info;
  79. u32 fpm_idx = 0;
  80. hmc_info = dev->hmc_info;
  81. pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].base;
  82. /* Now start the pble' on 4k boundary */
  83. if (pble_rsrc->fpm_base_addr & 0xfff)
  84. fpm_idx = (PAGE_SIZE - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
  85. pble_rsrc->unallocated_pble =
  86. hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt - fpm_idx;
  87. pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
  88. pble_rsrc->pinfo.pool_shift = POOL_SHIFT;
  89. pble_rsrc->pinfo.pool = gen_pool_create(pble_rsrc->pinfo.pool_shift, -1);
  90. INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
  91. if (!pble_rsrc->pinfo.pool)
  92. goto error;
  93. if (add_pble_pool(dev, pble_rsrc))
  94. goto error;
  95. return 0;
  96. error:i40iw_destroy_pble_pool(dev, pble_rsrc);
  97. return I40IW_ERR_NO_MEMORY;
  98. }
  99. /**
  100. * get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address
  101. * @ pble_rsrc: structure containing fpm address
  102. * @ idx: where to return indexes
  103. */
  104. static inline void get_sd_pd_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc,
  105. struct sd_pd_idx *idx)
  106. {
  107. idx->sd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_DIRECT_BP_SIZE;
  108. idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_PAGED_BP_SIZE;
  109. idx->rel_pd_idx = (idx->pd_idx % I40IW_HMC_PD_CNT_IN_SD);
  110. }
  111. /**
  112. * add_sd_direct - add sd direct for pble
  113. * @dev: hardware control device structure
  114. * @pble_rsrc: pble resource ptr
  115. * @info: page info for sd
  116. */
  117. static enum i40iw_status_code add_sd_direct(struct i40iw_sc_dev *dev,
  118. struct i40iw_hmc_pble_rsrc *pble_rsrc,
  119. struct i40iw_add_page_info *info)
  120. {
  121. enum i40iw_status_code ret_code = 0;
  122. struct sd_pd_idx *idx = &info->idx;
  123. struct i40iw_chunk *chunk = info->chunk;
  124. struct i40iw_hmc_info *hmc_info = info->hmc_info;
  125. struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;
  126. u32 offset = 0;
  127. if (!sd_entry->valid) {
  128. if (dev->is_pf) {
  129. ret_code = i40iw_add_sd_table_entry(dev->hw, hmc_info,
  130. info->idx.sd_idx,
  131. I40IW_SD_TYPE_DIRECT,
  132. I40IW_HMC_DIRECT_BP_SIZE);
  133. if (ret_code)
  134. return ret_code;
  135. chunk->type = I40IW_DMA_COHERENT;
  136. }
  137. }
  138. offset = idx->rel_pd_idx << I40IW_HMC_PAGED_BP_SHIFT;
  139. chunk->size = info->pages << I40IW_HMC_PAGED_BP_SHIFT;
  140. chunk->vaddr = ((u8 *)sd_entry->u.bp.addr.va + offset);
  141. chunk->fpm_addr = pble_rsrc->next_fpm_addr;
  142. i40iw_debug(dev, I40IW_DEBUG_PBLE, "chunk_size[%d] = 0x%x vaddr=%p fpm_addr = %llx\n",
  143. chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
  144. return 0;
  145. }
  146. /**
  147. * i40iw_free_vmalloc_mem - free vmalloc during close
  148. * @hw: hw struct
  149. * @chunk: chunk information for vmalloc
  150. */
  151. static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk)
  152. {
  153. struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
  154. int i;
  155. if (!chunk->pg_cnt)
  156. goto done;
  157. for (i = 0; i < chunk->pg_cnt; i++)
  158. dma_unmap_page(&pcidev->dev, chunk->dmaaddrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
  159. done:
  160. kfree(chunk->dmaaddrs);
  161. chunk->dmaaddrs = NULL;
  162. vfree(chunk->vaddr);
  163. chunk->vaddr = NULL;
  164. chunk->type = 0;
  165. }
  166. /**
  167. * i40iw_get_vmalloc_mem - get 2M page for sd
  168. * @hw: hardware address
  169. * @chunk: chunk to adf
  170. * @pg_cnt: #of 4 K pages
  171. */
  172. static enum i40iw_status_code i40iw_get_vmalloc_mem(struct i40iw_hw *hw,
  173. struct i40iw_chunk *chunk,
  174. int pg_cnt)
  175. {
  176. struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
  177. struct page *page;
  178. u8 *addr;
  179. u32 size;
  180. int i;
  181. chunk->dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);
  182. if (!chunk->dmaaddrs)
  183. return I40IW_ERR_NO_MEMORY;
  184. size = PAGE_SIZE * pg_cnt;
  185. chunk->vaddr = vmalloc(size);
  186. if (!chunk->vaddr) {
  187. kfree(chunk->dmaaddrs);
  188. chunk->dmaaddrs = NULL;
  189. return I40IW_ERR_NO_MEMORY;
  190. }
  191. chunk->size = size;
  192. addr = (u8 *)chunk->vaddr;
  193. for (i = 0; i < pg_cnt; i++) {
  194. page = vmalloc_to_page((void *)addr);
  195. if (!page)
  196. break;
  197. chunk->dmaaddrs[i] = dma_map_page(&pcidev->dev, page, 0,
  198. PAGE_SIZE, DMA_BIDIRECTIONAL);
  199. if (dma_mapping_error(&pcidev->dev, chunk->dmaaddrs[i]))
  200. break;
  201. addr += PAGE_SIZE;
  202. }
  203. chunk->pg_cnt = i;
  204. chunk->type = I40IW_VMALLOC;
  205. if (i == pg_cnt)
  206. return 0;
  207. i40iw_free_vmalloc_mem(hw, chunk);
  208. return I40IW_ERR_NO_MEMORY;
  209. }
  210. /**
  211. * fpm_to_idx - given fpm address, get pble index
  212. * @pble_rsrc: pble resource management
  213. * @addr: fpm address for index
  214. */
  215. static inline u32 fpm_to_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc, u64 addr)
  216. {
  217. return (addr - (pble_rsrc->fpm_base_addr)) >> 3;
  218. }
  219. /**
  220. * add_bp_pages - add backing pages for sd
  221. * @dev: hardware control device structure
  222. * @pble_rsrc: pble resource management
  223. * @info: page info for sd
  224. */
  225. static enum i40iw_status_code add_bp_pages(struct i40iw_sc_dev *dev,
  226. struct i40iw_hmc_pble_rsrc *pble_rsrc,
  227. struct i40iw_add_page_info *info)
  228. {
  229. u8 *addr;
  230. struct i40iw_dma_mem mem;
  231. struct i40iw_hmc_pd_entry *pd_entry;
  232. struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;
  233. struct i40iw_hmc_info *hmc_info = info->hmc_info;
  234. struct i40iw_chunk *chunk = info->chunk;
  235. struct i40iw_manage_vf_pble_info vf_pble_info;
  236. enum i40iw_status_code status = 0;
  237. u32 rel_pd_idx = info->idx.rel_pd_idx;
  238. u32 pd_idx = info->idx.pd_idx;
  239. u32 i;
  240. status = i40iw_get_vmalloc_mem(dev->hw, chunk, info->pages);
  241. if (status)
  242. return I40IW_ERR_NO_MEMORY;
  243. status = i40iw_add_sd_table_entry(dev->hw, hmc_info,
  244. info->idx.sd_idx, I40IW_SD_TYPE_PAGED,
  245. I40IW_HMC_DIRECT_BP_SIZE);
  246. if (status) {
  247. i40iw_free_vmalloc_mem(dev->hw, chunk);
  248. return status;
  249. }
  250. if (!dev->is_pf) {
  251. status = i40iw_vchnl_vf_add_hmc_objs(dev, I40IW_HMC_IW_PBLE,
  252. fpm_to_idx(pble_rsrc,
  253. pble_rsrc->next_fpm_addr),
  254. (info->pages << PBLE_512_SHIFT));
  255. if (status) {
  256. i40iw_pr_err("allocate PBLEs in the PF. Error %i\n", status);
  257. i40iw_free_vmalloc_mem(dev->hw, chunk);
  258. return status;
  259. }
  260. }
  261. addr = chunk->vaddr;
  262. for (i = 0; i < info->pages; i++) {
  263. mem.pa = chunk->dmaaddrs[i];
  264. mem.size = PAGE_SIZE;
  265. mem.va = (void *)(addr);
  266. pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
  267. if (!pd_entry->valid) {
  268. status = i40iw_add_pd_table_entry(dev->hw, hmc_info, pd_idx++, &mem);
  269. if (status)
  270. goto error;
  271. addr += PAGE_SIZE;
  272. } else {
  273. i40iw_pr_err("pd entry is valid expecting to be invalid\n");
  274. }
  275. }
  276. if (!dev->is_pf) {
  277. vf_pble_info.first_pd_index = info->idx.rel_pd_idx;
  278. vf_pble_info.inv_pd_ent = false;
  279. vf_pble_info.pd_entry_cnt = PBLE_PER_PAGE;
  280. vf_pble_info.pd_pl_pba = sd_entry->u.pd_table.pd_page_addr.pa;
  281. vf_pble_info.sd_index = info->idx.sd_idx;
  282. status = i40iw_hw_manage_vf_pble_bp(dev->back_dev,
  283. &vf_pble_info, true);
  284. if (status) {
  285. i40iw_pr_err("CQP manage VF PBLE BP failed. %i\n", status);
  286. goto error;
  287. }
  288. }
  289. chunk->fpm_addr = pble_rsrc->next_fpm_addr;
  290. return 0;
  291. error:
  292. i40iw_free_vmalloc_mem(dev->hw, chunk);
  293. return status;
  294. }
  295. /**
  296. * add_pble_pool - add a sd entry for pble resoure
  297. * @dev: hardware control device structure
  298. * @pble_rsrc: pble resource management
  299. */
  300. static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
  301. struct i40iw_hmc_pble_rsrc *pble_rsrc)
  302. {
  303. struct i40iw_hmc_sd_entry *sd_entry;
  304. struct i40iw_hmc_info *hmc_info;
  305. struct i40iw_chunk *chunk;
  306. struct i40iw_add_page_info info;
  307. struct sd_pd_idx *idx = &info.idx;
  308. enum i40iw_status_code ret_code = 0;
  309. enum i40iw_sd_entry_type sd_entry_type;
  310. u64 sd_reg_val = 0;
  311. u32 pages;
  312. if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
  313. return I40IW_ERR_NO_MEMORY;
  314. if (pble_rsrc->next_fpm_addr & 0xfff) {
  315. i40iw_pr_err("next fpm_addr %llx\n", pble_rsrc->next_fpm_addr);
  316. return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
  317. }
  318. chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
  319. if (!chunk)
  320. return I40IW_ERR_NO_MEMORY;
  321. hmc_info = dev->hmc_info;
  322. chunk->fpm_addr = pble_rsrc->next_fpm_addr;
  323. get_sd_pd_idx(pble_rsrc, idx);
  324. sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
  325. pages = (idx->rel_pd_idx) ? (I40IW_HMC_PD_CNT_IN_SD -
  326. idx->rel_pd_idx) : I40IW_HMC_PD_CNT_IN_SD;
  327. pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
  328. if (!pages) {
  329. ret_code = I40IW_ERR_NO_PBLCHUNKS_AVAILABLE;
  330. goto error;
  331. }
  332. info.chunk = chunk;
  333. info.hmc_info = hmc_info;
  334. info.pages = pages;
  335. info.sd_entry = sd_entry;
  336. if (!sd_entry->valid) {
  337. sd_entry_type = (!idx->rel_pd_idx &&
  338. (pages == I40IW_HMC_PD_CNT_IN_SD) &&
  339. dev->is_pf) ? I40IW_SD_TYPE_DIRECT : I40IW_SD_TYPE_PAGED;
  340. } else {
  341. sd_entry_type = sd_entry->entry_type;
  342. }
  343. i40iw_debug(dev, I40IW_DEBUG_PBLE,
  344. "pages = %d, unallocated_pble[%u] current_fpm_addr = %llx\n",
  345. pages, pble_rsrc->unallocated_pble, pble_rsrc->next_fpm_addr);
  346. i40iw_debug(dev, I40IW_DEBUG_PBLE, "sd_entry_type = %d sd_entry valid = %d\n",
  347. sd_entry_type, sd_entry->valid);
  348. if (sd_entry_type == I40IW_SD_TYPE_DIRECT)
  349. ret_code = add_sd_direct(dev, pble_rsrc, &info);
  350. if (ret_code)
  351. sd_entry_type = I40IW_SD_TYPE_PAGED;
  352. else
  353. pble_rsrc->stats_direct_sds++;
  354. if (sd_entry_type == I40IW_SD_TYPE_PAGED) {
  355. ret_code = add_bp_pages(dev, pble_rsrc, &info);
  356. if (ret_code)
  357. goto error;
  358. else
  359. pble_rsrc->stats_paged_sds++;
  360. }
  361. if (gen_pool_add_virt(pble_rsrc->pinfo.pool, (unsigned long)chunk->vaddr,
  362. (phys_addr_t)chunk->fpm_addr, chunk->size, -1)) {
  363. i40iw_pr_err("could not allocate memory by gen_pool_addr_virt()\n");
  364. ret_code = I40IW_ERR_NO_MEMORY;
  365. goto error;
  366. }
  367. pble_rsrc->next_fpm_addr += chunk->size;
  368. i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
  369. pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
  370. pble_rsrc->unallocated_pble -= (chunk->size >> 3);
  371. list_add(&chunk->list, &pble_rsrc->pinfo.clist);
  372. sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ?
  373. sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;
  374. if (sd_entry->valid)
  375. return 0;
  376. if (dev->is_pf)
  377. ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,
  378. sd_reg_val, idx->sd_idx,
  379. sd_entry->entry_type, true);
  380. if (ret_code) {
  381. i40iw_pr_err("cqp cmd failed for sd (pbles)\n");
  382. goto error;
  383. }
  384. sd_entry->valid = true;
  385. return 0;
  386. error:
  387. kfree(chunk);
  388. return ret_code;
  389. }
  390. /**
  391. * free_lvl2 - fee level 2 pble
  392. * @pble_rsrc: pble resource management
  393. * @palloc: level 2 pble allocation
  394. */
  395. static void free_lvl2(struct i40iw_hmc_pble_rsrc *pble_rsrc,
  396. struct i40iw_pble_alloc *palloc)
  397. {
  398. u32 i;
  399. struct gen_pool *pool;
  400. struct i40iw_pble_level2 *lvl2 = &palloc->level2;
  401. struct i40iw_pble_info *root = &lvl2->root;
  402. struct i40iw_pble_info *leaf = lvl2->leaf;
  403. pool = pble_rsrc->pinfo.pool;
  404. for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
  405. if (leaf->addr)
  406. gen_pool_free(pool, leaf->addr, (leaf->cnt << 3));
  407. else
  408. break;
  409. }
  410. if (root->addr)
  411. gen_pool_free(pool, root->addr, (root->cnt << 3));
  412. kfree(lvl2->leaf);
  413. lvl2->leaf = NULL;
  414. }
  415. /**
  416. * get_lvl2_pble - get level 2 pble resource
  417. * @pble_rsrc: pble resource management
  418. * @palloc: level 2 pble allocation
  419. * @pool: pool pointer
  420. */
  421. static enum i40iw_status_code get_lvl2_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,
  422. struct i40iw_pble_alloc *palloc,
  423. struct gen_pool *pool)
  424. {
  425. u32 lf4k, lflast, total, i;
  426. u32 pblcnt = PBLE_PER_PAGE;
  427. u64 *addr;
  428. struct i40iw_pble_level2 *lvl2 = &palloc->level2;
  429. struct i40iw_pble_info *root = &lvl2->root;
  430. struct i40iw_pble_info *leaf;
  431. /* number of full 512 (4K) leafs) */
  432. lf4k = palloc->total_cnt >> 9;
  433. lflast = palloc->total_cnt % PBLE_PER_PAGE;
  434. total = (lflast == 0) ? lf4k : lf4k + 1;
  435. lvl2->leaf_cnt = total;
  436. leaf = kzalloc((sizeof(*leaf) * total), GFP_ATOMIC);
  437. if (!leaf)
  438. return I40IW_ERR_NO_MEMORY;
  439. lvl2->leaf = leaf;
  440. /* allocate pbles for the root */
  441. root->addr = gen_pool_alloc(pool, (total << 3));
  442. if (!root->addr) {
  443. kfree(lvl2->leaf);
  444. lvl2->leaf = NULL;
  445. return I40IW_ERR_NO_MEMORY;
  446. }
  447. root->idx = fpm_to_idx(pble_rsrc,
  448. (u64)gen_pool_virt_to_phys(pool, root->addr));
  449. root->cnt = total;
  450. addr = (u64 *)root->addr;
  451. for (i = 0; i < total; i++, leaf++) {
  452. pblcnt = (lflast && ((i + 1) == total)) ? lflast : PBLE_PER_PAGE;
  453. leaf->addr = gen_pool_alloc(pool, (pblcnt << 3));
  454. if (!leaf->addr)
  455. goto error;
  456. leaf->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool, leaf->addr));
  457. leaf->cnt = pblcnt;
  458. *addr = (u64)leaf->idx;
  459. addr++;
  460. }
  461. palloc->level = I40IW_LEVEL_2;
  462. pble_rsrc->stats_lvl2++;
  463. return 0;
  464. error:
  465. free_lvl2(pble_rsrc, palloc);
  466. return I40IW_ERR_NO_MEMORY;
  467. }
  468. /**
  469. * get_lvl1_pble - get level 1 pble resource
  470. * @dev: hardware control device structure
  471. * @pble_rsrc: pble resource management
  472. * @palloc: level 1 pble allocation
  473. */
  474. static enum i40iw_status_code get_lvl1_pble(struct i40iw_sc_dev *dev,
  475. struct i40iw_hmc_pble_rsrc *pble_rsrc,
  476. struct i40iw_pble_alloc *palloc)
  477. {
  478. u64 *addr;
  479. struct gen_pool *pool;
  480. struct i40iw_pble_info *lvl1 = &palloc->level1;
  481. pool = pble_rsrc->pinfo.pool;
  482. addr = (u64 *)gen_pool_alloc(pool, (palloc->total_cnt << 3));
  483. if (!addr)
  484. return I40IW_ERR_NO_MEMORY;
  485. palloc->level = I40IW_LEVEL_1;
  486. lvl1->addr = (unsigned long)addr;
  487. lvl1->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool,
  488. (unsigned long)addr));
  489. lvl1->cnt = palloc->total_cnt;
  490. pble_rsrc->stats_lvl1++;
  491. return 0;
  492. }
  493. /**
  494. * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
  495. * @dev: i40iw_sc_dev struct
  496. * @pble_rsrc: pble resources
  497. * @palloc: contains all inforamtion regarding pble (idx + pble addr)
  498. * @pool: pointer to general purpose special memory pool descriptor
  499. */
  500. static inline enum i40iw_status_code get_lvl1_lvl2_pble(struct i40iw_sc_dev *dev,
  501. struct i40iw_hmc_pble_rsrc *pble_rsrc,
  502. struct i40iw_pble_alloc *palloc,
  503. struct gen_pool *pool)
  504. {
  505. enum i40iw_status_code status = 0;
  506. status = get_lvl1_pble(dev, pble_rsrc, palloc);
  507. if (status && (palloc->total_cnt > PBLE_PER_PAGE))
  508. status = get_lvl2_pble(pble_rsrc, palloc, pool);
  509. return status;
  510. }
  511. /**
  512. * i40iw_get_pble - allocate pbles from the pool
  513. * @dev: i40iw_sc_dev struct
  514. * @pble_rsrc: pble resources
  515. * @palloc: contains all inforamtion regarding pble (idx + pble addr)
  516. * @pble_cnt: #of pbles requested
  517. */
  518. enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev,
  519. struct i40iw_hmc_pble_rsrc *pble_rsrc,
  520. struct i40iw_pble_alloc *palloc,
  521. u32 pble_cnt)
  522. {
  523. struct gen_pool *pool;
  524. enum i40iw_status_code status = 0;
  525. u32 max_sds = 0;
  526. int i;
  527. pool = pble_rsrc->pinfo.pool;
  528. palloc->total_cnt = pble_cnt;
  529. palloc->level = I40IW_LEVEL_0;
  530. /*check first to see if we can get pble's without acquiring additional sd's */
  531. status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
  532. if (!status)
  533. goto exit;
  534. max_sds = (palloc->total_cnt >> 18) + 1;
  535. for (i = 0; i < max_sds; i++) {
  536. status = add_pble_pool(dev, pble_rsrc);
  537. if (status)
  538. break;
  539. status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
  540. if (!status)
  541. break;
  542. }
  543. exit:
  544. if (!status)
  545. pble_rsrc->stats_alloc_ok++;
  546. else
  547. pble_rsrc->stats_alloc_fail++;
  548. return status;
  549. }
  550. /**
  551. * i40iw_free_pble - put pbles back into pool
  552. * @pble_rsrc: pble resources
  553. * @palloc: contains all inforamtion regarding pble resource being freed
  554. */
  555. void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,
  556. struct i40iw_pble_alloc *palloc)
  557. {
  558. struct gen_pool *pool;
  559. pool = pble_rsrc->pinfo.pool;
  560. if (palloc->level == I40IW_LEVEL_2)
  561. free_lvl2(pble_rsrc, palloc);
  562. else
  563. gen_pool_free(pool, palloc->level1.addr,
  564. (palloc->level1.cnt << 3));
  565. pble_rsrc->stats_alloc_freed++;
  566. }