i40iw_hmc.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821
  1. /*******************************************************************************
  2. *
  3. * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenFabrics.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. *
  33. *******************************************************************************/
  34. #include "i40iw_osdep.h"
  35. #include "i40iw_register.h"
  36. #include "i40iw_status.h"
  37. #include "i40iw_hmc.h"
  38. #include "i40iw_d.h"
  39. #include "i40iw_type.h"
  40. #include "i40iw_p.h"
  41. #include "i40iw_vf.h"
  42. #include "i40iw_virtchnl.h"
  43. /**
  44. * i40iw_find_sd_index_limit - finds segment descriptor index limit
  45. * @hmc_info: pointer to the HMC configuration information structure
  46. * @type: type of HMC resources we're searching
  47. * @index: starting index for the object
  48. * @cnt: number of objects we're trying to create
  49. * @sd_idx: pointer to return index of the segment descriptor in question
  50. * @sd_limit: pointer to return the maximum number of segment descriptors
  51. *
  52. * This function calculates the segment descriptor index and index limit
  53. * for the resource defined by i40iw_hmc_rsrc_type.
  54. */
  55. static inline void i40iw_find_sd_index_limit(struct i40iw_hmc_info *hmc_info,
  56. u32 type,
  57. u32 idx,
  58. u32 cnt,
  59. u32 *sd_idx,
  60. u32 *sd_limit)
  61. {
  62. u64 fpm_addr, fpm_limit;
  63. fpm_addr = hmc_info->hmc_obj[(type)].base +
  64. hmc_info->hmc_obj[type].size * idx;
  65. fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;
  66. *sd_idx = (u32)(fpm_addr / I40IW_HMC_DIRECT_BP_SIZE);
  67. *sd_limit = (u32)((fpm_limit - 1) / I40IW_HMC_DIRECT_BP_SIZE);
  68. *sd_limit += 1;
  69. }
  70. /**
  71. * i40iw_find_pd_index_limit - finds page descriptor index limit
  72. * @hmc_info: pointer to the HMC configuration information struct
  73. * @type: HMC resource type we're examining
  74. * @idx: starting index for the object
  75. * @cnt: number of objects we're trying to create
  76. * @pd_index: pointer to return page descriptor index
  77. * @pd_limit: pointer to return page descriptor index limit
  78. *
  79. * Calculates the page descriptor index and index limit for the resource
  80. * defined by i40iw_hmc_rsrc_type.
  81. */
  82. static inline void i40iw_find_pd_index_limit(struct i40iw_hmc_info *hmc_info,
  83. u32 type,
  84. u32 idx,
  85. u32 cnt,
  86. u32 *pd_idx,
  87. u32 *pd_limit)
  88. {
  89. u64 fpm_adr, fpm_limit;
  90. fpm_adr = hmc_info->hmc_obj[type].base +
  91. hmc_info->hmc_obj[type].size * idx;
  92. fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);
  93. *(pd_idx) = (u32)(fpm_adr / I40IW_HMC_PAGED_BP_SIZE);
  94. *(pd_limit) = (u32)((fpm_limit - 1) / I40IW_HMC_PAGED_BP_SIZE);
  95. *(pd_limit) += 1;
  96. }
  97. /**
  98. * i40iw_set_sd_entry - setup entry for sd programming
  99. * @pa: physical addr
  100. * @idx: sd index
  101. * @type: paged or direct sd
  102. * @entry: sd entry ptr
  103. */
  104. static inline void i40iw_set_sd_entry(u64 pa,
  105. u32 idx,
  106. enum i40iw_sd_entry_type type,
  107. struct update_sd_entry *entry)
  108. {
  109. entry->data = pa | (I40IW_HMC_MAX_BP_COUNT << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
  110. (((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) <<
  111. I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |
  112. (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);
  113. entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15));
  114. }
  115. /**
  116. * i40iw_clr_sd_entry - setup entry for sd clear
  117. * @idx: sd index
  118. * @type: paged or direct sd
  119. * @entry: sd entry ptr
  120. */
  121. static inline void i40iw_clr_sd_entry(u32 idx, enum i40iw_sd_entry_type type,
  122. struct update_sd_entry *entry)
  123. {
  124. entry->data = (I40IW_HMC_MAX_BP_COUNT <<
  125. I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
  126. (((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) <<
  127. I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);
  128. entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15));
  129. }
  130. /**
  131. * i40iw_hmc_sd_one - setup 1 sd entry for cqp
  132. * @dev: pointer to the device structure
  133. * @hmc_fn_id: hmc's function id
  134. * @pa: physical addr
  135. * @sd_idx: sd index
  136. * @type: paged or direct sd
  137. * @setsd: flag to set or clear sd
  138. */
  139. enum i40iw_status_code i40iw_hmc_sd_one(struct i40iw_sc_dev *dev,
  140. u8 hmc_fn_id,
  141. u64 pa, u32 sd_idx,
  142. enum i40iw_sd_entry_type type,
  143. bool setsd)
  144. {
  145. struct i40iw_update_sds_info sdinfo;
  146. sdinfo.cnt = 1;
  147. sdinfo.hmc_fn_id = hmc_fn_id;
  148. if (setsd)
  149. i40iw_set_sd_entry(pa, sd_idx, type, sdinfo.entry);
  150. else
  151. i40iw_clr_sd_entry(sd_idx, type, sdinfo.entry);
  152. return dev->cqp->process_cqp_sds(dev, &sdinfo);
  153. }
  154. /**
  155. * i40iw_hmc_sd_grp - setup group od sd entries for cqp
  156. * @dev: pointer to the device structure
  157. * @hmc_info: pointer to the HMC configuration information struct
  158. * @sd_index: sd index
  159. * @sd_cnt: number of sd entries
  160. * @setsd: flag to set or clear sd
  161. */
  162. static enum i40iw_status_code i40iw_hmc_sd_grp(struct i40iw_sc_dev *dev,
  163. struct i40iw_hmc_info *hmc_info,
  164. u32 sd_index,
  165. u32 sd_cnt,
  166. bool setsd)
  167. {
  168. struct i40iw_hmc_sd_entry *sd_entry;
  169. struct i40iw_update_sds_info sdinfo;
  170. u64 pa;
  171. u32 i;
  172. enum i40iw_status_code ret_code = 0;
  173. memset(&sdinfo, 0, sizeof(sdinfo));
  174. sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
  175. for (i = sd_index; i < sd_index + sd_cnt; i++) {
  176. sd_entry = &hmc_info->sd_table.sd_entry[i];
  177. if (!sd_entry ||
  178. (!sd_entry->valid && setsd) ||
  179. (sd_entry->valid && !setsd))
  180. continue;
  181. if (setsd) {
  182. pa = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
  183. sd_entry->u.pd_table.pd_page_addr.pa :
  184. sd_entry->u.bp.addr.pa;
  185. i40iw_set_sd_entry(pa, i, sd_entry->entry_type,
  186. &sdinfo.entry[sdinfo.cnt]);
  187. } else {
  188. i40iw_clr_sd_entry(i, sd_entry->entry_type,
  189. &sdinfo.entry[sdinfo.cnt]);
  190. }
  191. sdinfo.cnt++;
  192. if (sdinfo.cnt == I40IW_MAX_SD_ENTRIES) {
  193. ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
  194. if (ret_code) {
  195. i40iw_debug(dev, I40IW_DEBUG_HMC,
  196. "i40iw_hmc_sd_grp: sd_programming failed err=%d\n",
  197. ret_code);
  198. return ret_code;
  199. }
  200. sdinfo.cnt = 0;
  201. }
  202. }
  203. if (sdinfo.cnt)
  204. ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
  205. return ret_code;
  206. }
  207. /**
  208. * i40iw_vfdev_from_fpm - return vf dev ptr for hmc function id
  209. * @dev: pointer to the device structure
  210. * @hmc_fn_id: hmc's function id
  211. */
  212. struct i40iw_vfdev *i40iw_vfdev_from_fpm(struct i40iw_sc_dev *dev, u8 hmc_fn_id)
  213. {
  214. struct i40iw_vfdev *vf_dev = NULL;
  215. u16 idx;
  216. for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) {
  217. if (dev->vf_dev[idx] &&
  218. ((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) {
  219. vf_dev = dev->vf_dev[idx];
  220. break;
  221. }
  222. }
  223. return vf_dev;
  224. }
  225. /**
  226. * i40iw_vf_hmcinfo_from_fpm - get ptr to hmc for func_id
  227. * @dev: pointer to the device structure
  228. * @hmc_fn_id: hmc's function id
  229. */
  230. struct i40iw_hmc_info *i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev *dev,
  231. u8 hmc_fn_id)
  232. {
  233. struct i40iw_hmc_info *hmc_info = NULL;
  234. u16 idx;
  235. for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) {
  236. if (dev->vf_dev[idx] &&
  237. ((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) {
  238. hmc_info = &dev->vf_dev[idx]->hmc_info;
  239. break;
  240. }
  241. }
  242. return hmc_info;
  243. }
  244. /**
  245. * i40iw_hmc_finish_add_sd_reg - program sd entries for objects
  246. * @dev: pointer to the device structure
  247. * @info: create obj info
  248. */
  249. static enum i40iw_status_code i40iw_hmc_finish_add_sd_reg(struct i40iw_sc_dev *dev,
  250. struct i40iw_hmc_create_obj_info *info)
  251. {
  252. if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
  253. return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
  254. if ((info->start_idx + info->count) >
  255. info->hmc_info->hmc_obj[info->rsrc_type].cnt)
  256. return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
  257. if (!info->add_sd_cnt)
  258. return 0;
  259. return i40iw_hmc_sd_grp(dev, info->hmc_info,
  260. info->hmc_info->sd_indexes[0],
  261. info->add_sd_cnt, true);
  262. }
  263. /**
  264. * i40iw_create_iw_hmc_obj - allocate backing store for hmc objects
  265. * @dev: pointer to the device structure
  266. * @info: pointer to i40iw_hmc_iw_create_obj_info struct
  267. *
  268. * This will allocate memory for PDs and backing pages and populate
  269. * the sd and pd entries.
  270. */
  271. enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev,
  272. struct i40iw_hmc_create_obj_info *info)
  273. {
  274. struct i40iw_hmc_sd_entry *sd_entry;
  275. u32 sd_idx, sd_lmt;
  276. u32 pd_idx = 0, pd_lmt = 0;
  277. u32 pd_idx1 = 0, pd_lmt1 = 0;
  278. u32 i, j;
  279. bool pd_error = false;
  280. enum i40iw_status_code ret_code = 0;
  281. if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
  282. return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
  283. if ((info->start_idx + info->count) >
  284. info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
  285. i40iw_debug(dev, I40IW_DEBUG_HMC,
  286. "%s: error type %u, start = %u, req cnt %u, cnt = %u\n",
  287. __func__, info->rsrc_type, info->start_idx, info->count,
  288. info->hmc_info->hmc_obj[info->rsrc_type].cnt);
  289. return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
  290. }
  291. if (!dev->is_pf)
  292. return i40iw_vchnl_vf_add_hmc_objs(dev, info->rsrc_type, 0, info->count);
  293. i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,
  294. info->start_idx, info->count,
  295. &sd_idx, &sd_lmt);
  296. if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
  297. sd_lmt > info->hmc_info->sd_table.sd_cnt) {
  298. return I40IW_ERR_INVALID_SD_INDEX;
  299. }
  300. i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,
  301. info->start_idx, info->count, &pd_idx, &pd_lmt);
  302. for (j = sd_idx; j < sd_lmt; j++) {
  303. ret_code = i40iw_add_sd_table_entry(dev->hw, info->hmc_info,
  304. j,
  305. info->entry_type,
  306. I40IW_HMC_DIRECT_BP_SIZE);
  307. if (ret_code)
  308. goto exit_sd_error;
  309. sd_entry = &info->hmc_info->sd_table.sd_entry[j];
  310. if ((sd_entry->entry_type == I40IW_SD_TYPE_PAGED) &&
  311. ((dev->hmc_info == info->hmc_info) &&
  312. (info->rsrc_type != I40IW_HMC_IW_PBLE))) {
  313. pd_idx1 = max(pd_idx, (j * I40IW_HMC_MAX_BP_COUNT));
  314. pd_lmt1 = min(pd_lmt,
  315. (j + 1) * I40IW_HMC_MAX_BP_COUNT);
  316. for (i = pd_idx1; i < pd_lmt1; i++) {
  317. /* update the pd table entry */
  318. ret_code = i40iw_add_pd_table_entry(dev->hw, info->hmc_info,
  319. i, NULL);
  320. if (ret_code) {
  321. pd_error = true;
  322. break;
  323. }
  324. }
  325. if (pd_error) {
  326. while (i && (i > pd_idx1)) {
  327. i40iw_remove_pd_bp(dev->hw, info->hmc_info, (i - 1),
  328. info->is_pf);
  329. i--;
  330. }
  331. }
  332. }
  333. if (sd_entry->valid)
  334. continue;
  335. info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;
  336. info->add_sd_cnt++;
  337. sd_entry->valid = true;
  338. }
  339. return i40iw_hmc_finish_add_sd_reg(dev, info);
  340. exit_sd_error:
  341. while (j && (j > sd_idx)) {
  342. sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
  343. switch (sd_entry->entry_type) {
  344. case I40IW_SD_TYPE_PAGED:
  345. pd_idx1 = max(pd_idx,
  346. (j - 1) * I40IW_HMC_MAX_BP_COUNT);
  347. pd_lmt1 = min(pd_lmt, (j * I40IW_HMC_MAX_BP_COUNT));
  348. for (i = pd_idx1; i < pd_lmt1; i++)
  349. i40iw_prep_remove_pd_page(info->hmc_info, i);
  350. break;
  351. case I40IW_SD_TYPE_DIRECT:
  352. i40iw_prep_remove_pd_page(info->hmc_info, (j - 1));
  353. break;
  354. default:
  355. ret_code = I40IW_ERR_INVALID_SD_TYPE;
  356. break;
  357. }
  358. j--;
  359. }
  360. return ret_code;
  361. }
  362. /**
  363. * i40iw_finish_del_sd_reg - delete sd entries for objects
  364. * @dev: pointer to the device structure
  365. * @info: dele obj info
  366. * @reset: true if called before reset
  367. */
  368. static enum i40iw_status_code i40iw_finish_del_sd_reg(struct i40iw_sc_dev *dev,
  369. struct i40iw_hmc_del_obj_info *info,
  370. bool reset)
  371. {
  372. struct i40iw_hmc_sd_entry *sd_entry;
  373. enum i40iw_status_code ret_code = 0;
  374. u32 i, sd_idx;
  375. struct i40iw_dma_mem *mem;
  376. if (dev->is_pf && !reset)
  377. ret_code = i40iw_hmc_sd_grp(dev, info->hmc_info,
  378. info->hmc_info->sd_indexes[0],
  379. info->del_sd_cnt, false);
  380. if (ret_code)
  381. i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd sd_grp\n", __func__);
  382. for (i = 0; i < info->del_sd_cnt; i++) {
  383. sd_idx = info->hmc_info->sd_indexes[i];
  384. sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
  385. if (!sd_entry)
  386. continue;
  387. mem = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
  388. &sd_entry->u.pd_table.pd_page_addr :
  389. &sd_entry->u.bp.addr;
  390. if (!mem || !mem->va)
  391. i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd mem\n", __func__);
  392. else
  393. i40iw_free_dma_mem(dev->hw, mem);
  394. }
  395. return ret_code;
  396. }
  397. /**
  398. * i40iw_del_iw_hmc_obj - remove pe hmc objects
  399. * @dev: pointer to the device structure
  400. * @info: pointer to i40iw_hmc_del_obj_info struct
  401. * @reset: true if called before reset
  402. *
  403. * This will de-populate the SDs and PDs. It frees
  404. * the memory for PDS and backing storage. After this function is returned,
  405. * caller should deallocate memory allocated previously for
  406. * book-keeping information about PDs and backing storage.
  407. */
  408. enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev,
  409. struct i40iw_hmc_del_obj_info *info,
  410. bool reset)
  411. {
  412. struct i40iw_hmc_pd_table *pd_table;
  413. u32 sd_idx, sd_lmt;
  414. u32 pd_idx, pd_lmt, rel_pd_idx;
  415. u32 i, j;
  416. enum i40iw_status_code ret_code = 0;
  417. if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
  418. i40iw_debug(dev, I40IW_DEBUG_HMC,
  419. "%s: error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
  420. __func__, info->start_idx, info->rsrc_type,
  421. info->hmc_info->hmc_obj[info->rsrc_type].cnt);
  422. return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
  423. }
  424. if ((info->start_idx + info->count) >
  425. info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
  426. i40iw_debug(dev, I40IW_DEBUG_HMC,
  427. "%s: error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n",
  428. __func__, info->start_idx, info->count,
  429. info->rsrc_type,
  430. info->hmc_info->hmc_obj[info->rsrc_type].cnt);
  431. return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
  432. }
  433. if (!dev->is_pf) {
  434. ret_code = i40iw_vchnl_vf_del_hmc_obj(dev, info->rsrc_type, 0,
  435. info->count);
  436. if (info->rsrc_type != I40IW_HMC_IW_PBLE)
  437. return ret_code;
  438. }
  439. i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,
  440. info->start_idx, info->count, &pd_idx, &pd_lmt);
  441. for (j = pd_idx; j < pd_lmt; j++) {
  442. sd_idx = j / I40IW_HMC_PD_CNT_IN_SD;
  443. if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
  444. I40IW_SD_TYPE_PAGED)
  445. continue;
  446. rel_pd_idx = j % I40IW_HMC_PD_CNT_IN_SD;
  447. pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
  448. if (pd_table->pd_entry[rel_pd_idx].valid) {
  449. ret_code = i40iw_remove_pd_bp(dev->hw, info->hmc_info, j,
  450. info->is_pf);
  451. if (ret_code) {
  452. i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error\n", __func__);
  453. return ret_code;
  454. }
  455. }
  456. }
  457. i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,
  458. info->start_idx, info->count, &sd_idx, &sd_lmt);
  459. if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
  460. sd_lmt > info->hmc_info->sd_table.sd_cnt) {
  461. i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error invalid sd_idx\n", __func__);
  462. return I40IW_ERR_INVALID_SD_INDEX;
  463. }
  464. for (i = sd_idx; i < sd_lmt; i++) {
  465. if (!info->hmc_info->sd_table.sd_entry[i].valid)
  466. continue;
  467. switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
  468. case I40IW_SD_TYPE_DIRECT:
  469. ret_code = i40iw_prep_remove_sd_bp(info->hmc_info, i);
  470. if (!ret_code) {
  471. info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
  472. info->del_sd_cnt++;
  473. }
  474. break;
  475. case I40IW_SD_TYPE_PAGED:
  476. ret_code = i40iw_prep_remove_pd_page(info->hmc_info, i);
  477. if (!ret_code) {
  478. info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
  479. info->del_sd_cnt++;
  480. }
  481. break;
  482. default:
  483. break;
  484. }
  485. }
  486. return i40iw_finish_del_sd_reg(dev, info, reset);
  487. }
  488. /**
  489. * i40iw_add_sd_table_entry - Adds a segment descriptor to the table
  490. * @hw: pointer to our hw struct
  491. * @hmc_info: pointer to the HMC configuration information struct
  492. * @sd_index: segment descriptor index to manipulate
  493. * @type: what type of segment descriptor we're manipulating
  494. * @direct_mode_sz: size to alloc in direct mode
  495. */
  496. enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw,
  497. struct i40iw_hmc_info *hmc_info,
  498. u32 sd_index,
  499. enum i40iw_sd_entry_type type,
  500. u64 direct_mode_sz)
  501. {
  502. enum i40iw_status_code ret_code = 0;
  503. struct i40iw_hmc_sd_entry *sd_entry;
  504. bool dma_mem_alloc_done = false;
  505. struct i40iw_dma_mem mem;
  506. u64 alloc_len;
  507. sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
  508. if (!sd_entry->valid) {
  509. if (type == I40IW_SD_TYPE_PAGED)
  510. alloc_len = I40IW_HMC_PAGED_BP_SIZE;
  511. else
  512. alloc_len = direct_mode_sz;
  513. /* allocate a 4K pd page or 2M backing page */
  514. ret_code = i40iw_allocate_dma_mem(hw, &mem, alloc_len,
  515. I40IW_HMC_PD_BP_BUF_ALIGNMENT);
  516. if (ret_code)
  517. goto exit;
  518. dma_mem_alloc_done = true;
  519. if (type == I40IW_SD_TYPE_PAGED) {
  520. ret_code = i40iw_allocate_virt_mem(hw,
  521. &sd_entry->u.pd_table.pd_entry_virt_mem,
  522. sizeof(struct i40iw_hmc_pd_entry) * 512);
  523. if (ret_code)
  524. goto exit;
  525. sd_entry->u.pd_table.pd_entry = (struct i40iw_hmc_pd_entry *)
  526. sd_entry->u.pd_table.pd_entry_virt_mem.va;
  527. memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem, sizeof(struct i40iw_dma_mem));
  528. } else {
  529. memcpy(&sd_entry->u.bp.addr, &mem, sizeof(struct i40iw_dma_mem));
  530. sd_entry->u.bp.sd_pd_index = sd_index;
  531. }
  532. hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
  533. I40IW_INC_SD_REFCNT(&hmc_info->sd_table);
  534. }
  535. if (sd_entry->entry_type == I40IW_SD_TYPE_DIRECT)
  536. I40IW_INC_BP_REFCNT(&sd_entry->u.bp);
  537. exit:
  538. if (ret_code)
  539. if (dma_mem_alloc_done)
  540. i40iw_free_dma_mem(hw, &mem);
  541. return ret_code;
  542. }
  543. /**
  544. * i40iw_add_pd_table_entry - Adds page descriptor to the specified table
  545. * @hw: pointer to our HW structure
  546. * @hmc_info: pointer to the HMC configuration information structure
  547. * @pd_index: which page descriptor index to manipulate
  548. * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
  549. *
  550. * This function:
  551. * 1. Initializes the pd entry
  552. * 2. Adds pd_entry in the pd_table
  553. * 3. Mark the entry valid in i40iw_hmc_pd_entry structure
  554. * 4. Initializes the pd_entry's ref count to 1
  555. * assumptions:
  556. * 1. The memory for pd should be pinned down, physically contiguous and
  557. * aligned on 4K boundary and zeroed memory.
  558. * 2. It should be 4K in size.
  559. */
  560. enum i40iw_status_code i40iw_add_pd_table_entry(struct i40iw_hw *hw,
  561. struct i40iw_hmc_info *hmc_info,
  562. u32 pd_index,
  563. struct i40iw_dma_mem *rsrc_pg)
  564. {
  565. enum i40iw_status_code ret_code = 0;
  566. struct i40iw_hmc_pd_table *pd_table;
  567. struct i40iw_hmc_pd_entry *pd_entry;
  568. struct i40iw_dma_mem mem;
  569. struct i40iw_dma_mem *page = &mem;
  570. u32 sd_idx, rel_pd_idx;
  571. u64 *pd_addr;
  572. u64 page_desc;
  573. if (pd_index / I40IW_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
  574. return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
  575. sd_idx = (pd_index / I40IW_HMC_PD_CNT_IN_SD);
  576. if (hmc_info->sd_table.sd_entry[sd_idx].entry_type != I40IW_SD_TYPE_PAGED)
  577. return 0;
  578. rel_pd_idx = (pd_index % I40IW_HMC_PD_CNT_IN_SD);
  579. pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
  580. pd_entry = &pd_table->pd_entry[rel_pd_idx];
  581. if (!pd_entry->valid) {
  582. if (rsrc_pg) {
  583. pd_entry->rsrc_pg = true;
  584. page = rsrc_pg;
  585. } else {
  586. ret_code = i40iw_allocate_dma_mem(hw, page,
  587. I40IW_HMC_PAGED_BP_SIZE,
  588. I40IW_HMC_PD_BP_BUF_ALIGNMENT);
  589. if (ret_code)
  590. return ret_code;
  591. pd_entry->rsrc_pg = false;
  592. }
  593. memcpy(&pd_entry->bp.addr, page, sizeof(struct i40iw_dma_mem));
  594. pd_entry->bp.sd_pd_index = pd_index;
  595. pd_entry->bp.entry_type = I40IW_SD_TYPE_PAGED;
  596. page_desc = page->pa | 0x1;
  597. pd_addr = (u64 *)pd_table->pd_page_addr.va;
  598. pd_addr += rel_pd_idx;
  599. memcpy(pd_addr, &page_desc, sizeof(*pd_addr));
  600. pd_entry->sd_index = sd_idx;
  601. pd_entry->valid = true;
  602. I40IW_INC_PD_REFCNT(pd_table);
  603. if (hmc_info->hmc_fn_id < I40IW_FIRST_VF_FPM_ID)
  604. I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, rel_pd_idx);
  605. else if (hw->hmc.hmc_fn_id != hmc_info->hmc_fn_id)
  606. I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, rel_pd_idx,
  607. hmc_info->hmc_fn_id);
  608. }
  609. I40IW_INC_BP_REFCNT(&pd_entry->bp);
  610. return 0;
  611. }
  612. /**
  613. * i40iw_remove_pd_bp - remove a backing page from a page descriptor
  614. * @hw: pointer to our HW structure
  615. * @hmc_info: pointer to the HMC configuration information structure
  616. * @idx: the page index
  617. * @is_pf: distinguishes a VF from a PF
  618. *
  619. * This function:
  620. * 1. Marks the entry in pd table (for paged address mode) or in sd table
  621. * (for direct address mode) invalid.
  622. * 2. Write to register PMPDINV to invalidate the backing page in FV cache
  623. * 3. Decrement the ref count for the pd _entry
  624. * assumptions:
  625. * 1. Caller can deallocate the memory used by backing storage after this
  626. * function returns.
  627. */
  628. enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw,
  629. struct i40iw_hmc_info *hmc_info,
  630. u32 idx,
  631. bool is_pf)
  632. {
  633. struct i40iw_hmc_pd_entry *pd_entry;
  634. struct i40iw_hmc_pd_table *pd_table;
  635. struct i40iw_hmc_sd_entry *sd_entry;
  636. u32 sd_idx, rel_pd_idx;
  637. struct i40iw_dma_mem *mem;
  638. u64 *pd_addr;
  639. sd_idx = idx / I40IW_HMC_PD_CNT_IN_SD;
  640. rel_pd_idx = idx % I40IW_HMC_PD_CNT_IN_SD;
  641. if (sd_idx >= hmc_info->sd_table.sd_cnt)
  642. return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
  643. sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
  644. if (sd_entry->entry_type != I40IW_SD_TYPE_PAGED)
  645. return I40IW_ERR_INVALID_SD_TYPE;
  646. pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
  647. pd_entry = &pd_table->pd_entry[rel_pd_idx];
  648. I40IW_DEC_BP_REFCNT(&pd_entry->bp);
  649. if (pd_entry->bp.ref_cnt)
  650. return 0;
  651. pd_entry->valid = false;
  652. I40IW_DEC_PD_REFCNT(pd_table);
  653. pd_addr = (u64 *)pd_table->pd_page_addr.va;
  654. pd_addr += rel_pd_idx;
  655. memset(pd_addr, 0, sizeof(u64));
  656. if (is_pf)
  657. I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
  658. else
  659. I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx,
  660. hmc_info->hmc_fn_id);
  661. if (!pd_entry->rsrc_pg) {
  662. mem = &pd_entry->bp.addr;
  663. if (!mem || !mem->va)
  664. return I40IW_ERR_PARAM;
  665. i40iw_free_dma_mem(hw, mem);
  666. }
  667. if (!pd_table->ref_cnt)
  668. i40iw_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
  669. return 0;
  670. }
  671. /**
  672. * i40iw_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
  673. * @hmc_info: pointer to the HMC configuration information structure
  674. * @idx: the page index
  675. */
  676. enum i40iw_status_code i40iw_prep_remove_sd_bp(struct i40iw_hmc_info *hmc_info, u32 idx)
  677. {
  678. struct i40iw_hmc_sd_entry *sd_entry;
  679. sd_entry = &hmc_info->sd_table.sd_entry[idx];
  680. I40IW_DEC_BP_REFCNT(&sd_entry->u.bp);
  681. if (sd_entry->u.bp.ref_cnt)
  682. return I40IW_ERR_NOT_READY;
  683. I40IW_DEC_SD_REFCNT(&hmc_info->sd_table);
  684. sd_entry->valid = false;
  685. return 0;
  686. }
  687. /**
  688. * i40iw_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
  689. * @hmc_info: pointer to the HMC configuration information structure
  690. * @idx: segment descriptor index to find the relevant page descriptor
  691. */
  692. enum i40iw_status_code i40iw_prep_remove_pd_page(struct i40iw_hmc_info *hmc_info,
  693. u32 idx)
  694. {
  695. struct i40iw_hmc_sd_entry *sd_entry;
  696. sd_entry = &hmc_info->sd_table.sd_entry[idx];
  697. if (sd_entry->u.pd_table.ref_cnt)
  698. return I40IW_ERR_NOT_READY;
  699. sd_entry->valid = false;
  700. I40IW_DEC_SD_REFCNT(&hmc_info->sd_table);
  701. return 0;
  702. }
  703. /**
  704. * i40iw_pf_init_vfhmc -
  705. * @vf_cnt_array: array of cnt values of iwarp hmc objects
  706. * @vf_hmc_fn_id: hmc function id ofr vf driver
  707. * @dev: pointer to i40iw_dev struct
  708. *
  709. * Called by pf driver to initialize hmc_info for vf driver instance.
  710. */
  711. enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev,
  712. u8 vf_hmc_fn_id,
  713. u32 *vf_cnt_array)
  714. {
  715. struct i40iw_hmc_info *hmc_info;
  716. enum i40iw_status_code ret_code = 0;
  717. u32 i;
  718. if ((vf_hmc_fn_id < I40IW_FIRST_VF_FPM_ID) ||
  719. (vf_hmc_fn_id >= I40IW_FIRST_VF_FPM_ID +
  720. I40IW_MAX_PE_ENABLED_VF_COUNT)) {
  721. i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: invalid vf_hmc_fn_id 0x%x\n",
  722. __func__, vf_hmc_fn_id);
  723. return I40IW_ERR_INVALID_HMCFN_ID;
  724. }
  725. ret_code = i40iw_sc_init_iw_hmc(dev, vf_hmc_fn_id);
  726. if (ret_code)
  727. return ret_code;
  728. hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, vf_hmc_fn_id);
  729. for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
  730. if (vf_cnt_array)
  731. hmc_info->hmc_obj[i].cnt =
  732. vf_cnt_array[i - I40IW_HMC_IW_QP];
  733. else
  734. hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
  735. return 0;
  736. }