i40e_lan_hmc.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2013 - 2018 Intel Corporation. */
  3. #include "i40e_osdep.h"
  4. #include "i40e_register.h"
  5. #include "i40e_type.h"
  6. #include "i40e_hmc.h"
  7. #include "i40e_lan_hmc.h"
  8. #include "i40e_prototype.h"
  9. /* lan specific interface functions */
  10. /**
  11. * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
  12. * @offset: base address offset needing alignment
  13. *
  14. * Aligns the layer 2 function private memory so it's 512-byte aligned.
  15. **/
  16. static u64 i40e_align_l2obj_base(u64 offset)
  17. {
  18. u64 aligned_offset = offset;
  19. if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
  20. aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
  21. (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
  22. return aligned_offset;
  23. }
  24. /**
  25. * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
  26. * @txq_num: number of Tx queues needing backing context
  27. * @rxq_num: number of Rx queues needing backing context
  28. * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
  29. * @fcoe_filt_num: number of FCoE filters needing backing context
  30. *
  31. * Calculates the maximum amount of memory for the function required, based
  32. * on the number of resources it must provide context for.
  33. **/
  34. static u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
  35. u32 fcoe_cntx_num, u32 fcoe_filt_num)
  36. {
  37. u64 fpm_size = 0;
  38. fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
  39. fpm_size = i40e_align_l2obj_base(fpm_size);
  40. fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
  41. fpm_size = i40e_align_l2obj_base(fpm_size);
  42. fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
  43. fpm_size = i40e_align_l2obj_base(fpm_size);
  44. fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
  45. fpm_size = i40e_align_l2obj_base(fpm_size);
  46. return fpm_size;
  47. }
  48. /**
  49. * i40e_init_lan_hmc - initialize i40e_hmc_info struct
  50. * @hw: pointer to the HW structure
  51. * @txq_num: number of Tx queues needing backing context
  52. * @rxq_num: number of Rx queues needing backing context
  53. * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
  54. * @fcoe_filt_num: number of FCoE filters needing backing context
  55. *
  56. * This function will be called once per physical function initialization.
  57. * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
  58. * the driver's provided input, as well as information from the HMC itself
  59. * loaded from NVRAM.
  60. *
  61. * Assumptions:
  62. * - HMC Resource Profile has been selected before calling this function.
  63. **/
  64. i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
  65. u32 rxq_num, u32 fcoe_cntx_num,
  66. u32 fcoe_filt_num)
  67. {
  68. struct i40e_hmc_obj_info *obj, *full_obj;
  69. i40e_status ret_code = 0;
  70. u64 l2fpm_size;
  71. u32 size_exp;
  72. hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
  73. hw->hmc.hmc_fn_id = hw->pf_id;
  74. /* allocate memory for hmc_obj */
  75. ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
  76. sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
  77. if (ret_code)
  78. goto init_lan_hmc_out;
  79. hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
  80. hw->hmc.hmc_obj_virt_mem.va;
  81. /* The full object will be used to create the LAN HMC SD */
  82. full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
  83. full_obj->max_cnt = 0;
  84. full_obj->cnt = 0;
  85. full_obj->base = 0;
  86. full_obj->size = 0;
  87. /* Tx queue context information */
  88. obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
  89. obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
  90. obj->cnt = txq_num;
  91. obj->base = 0;
  92. size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
  93. obj->size = BIT_ULL(size_exp);
  94. /* validate values requested by driver don't exceed HMC capacity */
  95. if (txq_num > obj->max_cnt) {
  96. ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
  97. hw_dbg(hw, "i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
  98. txq_num, obj->max_cnt, ret_code);
  99. goto init_lan_hmc_out;
  100. }
  101. /* aggregate values into the full LAN object for later */
  102. full_obj->max_cnt += obj->max_cnt;
  103. full_obj->cnt += obj->cnt;
  104. /* Rx queue context information */
  105. obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
  106. obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
  107. obj->cnt = rxq_num;
  108. obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
  109. (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
  110. hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
  111. obj->base = i40e_align_l2obj_base(obj->base);
  112. size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
  113. obj->size = BIT_ULL(size_exp);
  114. /* validate values requested by driver don't exceed HMC capacity */
  115. if (rxq_num > obj->max_cnt) {
  116. ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
  117. hw_dbg(hw, "i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
  118. rxq_num, obj->max_cnt, ret_code);
  119. goto init_lan_hmc_out;
  120. }
  121. /* aggregate values into the full LAN object for later */
  122. full_obj->max_cnt += obj->max_cnt;
  123. full_obj->cnt += obj->cnt;
  124. /* FCoE context information */
  125. obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
  126. obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
  127. obj->cnt = fcoe_cntx_num;
  128. obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
  129. (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
  130. hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
  131. obj->base = i40e_align_l2obj_base(obj->base);
  132. size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
  133. obj->size = BIT_ULL(size_exp);
  134. /* validate values requested by driver don't exceed HMC capacity */
  135. if (fcoe_cntx_num > obj->max_cnt) {
  136. ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
  137. hw_dbg(hw, "i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
  138. fcoe_cntx_num, obj->max_cnt, ret_code);
  139. goto init_lan_hmc_out;
  140. }
  141. /* aggregate values into the full LAN object for later */
  142. full_obj->max_cnt += obj->max_cnt;
  143. full_obj->cnt += obj->cnt;
  144. /* FCoE filter information */
  145. obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
  146. obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
  147. obj->cnt = fcoe_filt_num;
  148. obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
  149. (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
  150. hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
  151. obj->base = i40e_align_l2obj_base(obj->base);
  152. size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
  153. obj->size = BIT_ULL(size_exp);
  154. /* validate values requested by driver don't exceed HMC capacity */
  155. if (fcoe_filt_num > obj->max_cnt) {
  156. ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
  157. hw_dbg(hw, "i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
  158. fcoe_filt_num, obj->max_cnt, ret_code);
  159. goto init_lan_hmc_out;
  160. }
  161. /* aggregate values into the full LAN object for later */
  162. full_obj->max_cnt += obj->max_cnt;
  163. full_obj->cnt += obj->cnt;
  164. hw->hmc.first_sd_index = 0;
  165. hw->hmc.sd_table.ref_cnt = 0;
  166. l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
  167. fcoe_filt_num);
  168. if (NULL == hw->hmc.sd_table.sd_entry) {
  169. hw->hmc.sd_table.sd_cnt = (u32)
  170. (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
  171. I40E_HMC_DIRECT_BP_SIZE;
  172. /* allocate the sd_entry members in the sd_table */
  173. ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
  174. (sizeof(struct i40e_hmc_sd_entry) *
  175. hw->hmc.sd_table.sd_cnt));
  176. if (ret_code)
  177. goto init_lan_hmc_out;
  178. hw->hmc.sd_table.sd_entry =
  179. (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
  180. }
  181. /* store in the LAN full object for later */
  182. full_obj->size = l2fpm_size;
  183. init_lan_hmc_out:
  184. return ret_code;
  185. }
  186. /**
  187. * i40e_remove_pd_page - Remove a page from the page descriptor table
  188. * @hw: pointer to the HW structure
  189. * @hmc_info: pointer to the HMC configuration information structure
  190. * @idx: segment descriptor index to find the relevant page descriptor
  191. *
  192. * This function:
  193. * 1. Marks the entry in pd table (for paged address mode) invalid
  194. * 2. write to register PMPDINV to invalidate the backing page in FV cache
  195. * 3. Decrement the ref count for pd_entry
  196. * assumptions:
  197. * 1. caller can deallocate the memory used by pd after this function
  198. * returns.
  199. **/
  200. static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,
  201. struct i40e_hmc_info *hmc_info,
  202. u32 idx)
  203. {
  204. i40e_status ret_code = 0;
  205. if (!i40e_prep_remove_pd_page(hmc_info, idx))
  206. ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
  207. return ret_code;
  208. }
  209. /**
  210. * i40e_remove_sd_bp - remove a backing page from a segment descriptor
  211. * @hw: pointer to our HW structure
  212. * @hmc_info: pointer to the HMC configuration information structure
  213. * @idx: the page index
  214. *
  215. * This function:
  216. * 1. Marks the entry in sd table (for direct address mode) invalid
  217. * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
  218. * to 0) and PMSDDATAHIGH to invalidate the sd page
  219. * 3. Decrement the ref count for the sd_entry
  220. * assumptions:
  221. * 1. caller can deallocate the memory used by backing storage after this
  222. * function returns.
  223. **/
  224. static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
  225. struct i40e_hmc_info *hmc_info,
  226. u32 idx)
  227. {
  228. i40e_status ret_code = 0;
  229. if (!i40e_prep_remove_sd_bp(hmc_info, idx))
  230. ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
  231. return ret_code;
  232. }
  233. /**
  234. * i40e_create_lan_hmc_object - allocate backing store for hmc objects
  235. * @hw: pointer to the HW structure
  236. * @info: pointer to i40e_hmc_create_obj_info struct
  237. *
  238. * This will allocate memory for PDs and backing pages and populate
  239. * the sd and pd entries.
  240. **/
  241. static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
  242. struct i40e_hmc_lan_create_obj_info *info)
  243. {
  244. i40e_status ret_code = 0;
  245. struct i40e_hmc_sd_entry *sd_entry;
  246. u32 pd_idx1 = 0, pd_lmt1 = 0;
  247. u32 pd_idx = 0, pd_lmt = 0;
  248. bool pd_error = false;
  249. u32 sd_idx, sd_lmt;
  250. u64 sd_size;
  251. u32 i, j;
  252. if (NULL == info) {
  253. ret_code = I40E_ERR_BAD_PTR;
  254. hw_dbg(hw, "i40e_create_lan_hmc_object: bad info ptr\n");
  255. goto exit;
  256. }
  257. if (NULL == info->hmc_info) {
  258. ret_code = I40E_ERR_BAD_PTR;
  259. hw_dbg(hw, "i40e_create_lan_hmc_object: bad hmc_info ptr\n");
  260. goto exit;
  261. }
  262. if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
  263. ret_code = I40E_ERR_BAD_PTR;
  264. hw_dbg(hw, "i40e_create_lan_hmc_object: bad signature\n");
  265. goto exit;
  266. }
  267. if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
  268. ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
  269. hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
  270. ret_code);
  271. goto exit;
  272. }
  273. if ((info->start_idx + info->count) >
  274. info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
  275. ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
  276. hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
  277. ret_code);
  278. goto exit;
  279. }
  280. /* find sd index and limit */
  281. I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
  282. info->start_idx, info->count,
  283. &sd_idx, &sd_lmt);
  284. if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
  285. sd_lmt > info->hmc_info->sd_table.sd_cnt) {
  286. ret_code = I40E_ERR_INVALID_SD_INDEX;
  287. goto exit;
  288. }
  289. /* find pd index */
  290. I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
  291. info->start_idx, info->count, &pd_idx,
  292. &pd_lmt);
  293. /* This is to cover for cases where you may not want to have an SD with
  294. * the full 2M memory but something smaller. By not filling out any
  295. * size, the function will default the SD size to be 2M.
  296. */
  297. if (info->direct_mode_sz == 0)
  298. sd_size = I40E_HMC_DIRECT_BP_SIZE;
  299. else
  300. sd_size = info->direct_mode_sz;
  301. /* check if all the sds are valid. If not, allocate a page and
  302. * initialize it.
  303. */
  304. for (j = sd_idx; j < sd_lmt; j++) {
  305. /* update the sd table entry */
  306. ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
  307. info->entry_type,
  308. sd_size);
  309. if (ret_code)
  310. goto exit_sd_error;
  311. sd_entry = &info->hmc_info->sd_table.sd_entry[j];
  312. if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
  313. /* check if all the pds in this sd are valid. If not,
  314. * allocate a page and initialize it.
  315. */
  316. /* find pd_idx and pd_lmt in this sd */
  317. pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
  318. pd_lmt1 = min(pd_lmt,
  319. ((j + 1) * I40E_HMC_MAX_BP_COUNT));
  320. for (i = pd_idx1; i < pd_lmt1; i++) {
  321. /* update the pd table entry */
  322. ret_code = i40e_add_pd_table_entry(hw,
  323. info->hmc_info,
  324. i, NULL);
  325. if (ret_code) {
  326. pd_error = true;
  327. break;
  328. }
  329. }
  330. if (pd_error) {
  331. /* remove the backing pages from pd_idx1 to i */
  332. while (i && (i > pd_idx1)) {
  333. i40e_remove_pd_bp(hw, info->hmc_info,
  334. (i - 1));
  335. i--;
  336. }
  337. }
  338. }
  339. if (!sd_entry->valid) {
  340. sd_entry->valid = true;
  341. switch (sd_entry->entry_type) {
  342. case I40E_SD_TYPE_PAGED:
  343. I40E_SET_PF_SD_ENTRY(hw,
  344. sd_entry->u.pd_table.pd_page_addr.pa,
  345. j, sd_entry->entry_type);
  346. break;
  347. case I40E_SD_TYPE_DIRECT:
  348. I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
  349. j, sd_entry->entry_type);
  350. break;
  351. default:
  352. ret_code = I40E_ERR_INVALID_SD_TYPE;
  353. goto exit;
  354. }
  355. }
  356. }
  357. goto exit;
  358. exit_sd_error:
  359. /* cleanup for sd entries from j to sd_idx */
  360. while (j && (j > sd_idx)) {
  361. sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
  362. switch (sd_entry->entry_type) {
  363. case I40E_SD_TYPE_PAGED:
  364. pd_idx1 = max(pd_idx,
  365. ((j - 1) * I40E_HMC_MAX_BP_COUNT));
  366. pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
  367. for (i = pd_idx1; i < pd_lmt1; i++)
  368. i40e_remove_pd_bp(hw, info->hmc_info, i);
  369. i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
  370. break;
  371. case I40E_SD_TYPE_DIRECT:
  372. i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
  373. break;
  374. default:
  375. ret_code = I40E_ERR_INVALID_SD_TYPE;
  376. break;
  377. }
  378. j--;
  379. }
  380. exit:
  381. return ret_code;
  382. }
  383. /**
  384. * i40e_configure_lan_hmc - prepare the HMC backing store
  385. * @hw: pointer to the hw structure
  386. * @model: the model for the layout of the SD/PD tables
  387. *
  388. * - This function will be called once per physical function initialization.
  389. * - This function will be called after i40e_init_lan_hmc() and before
  390. * any LAN/FCoE HMC objects can be created.
  391. **/
  392. i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
  393. enum i40e_hmc_model model)
  394. {
  395. struct i40e_hmc_lan_create_obj_info info;
  396. i40e_status ret_code = 0;
  397. u8 hmc_fn_id = hw->hmc.hmc_fn_id;
  398. struct i40e_hmc_obj_info *obj;
  399. /* Initialize part of the create object info struct */
  400. info.hmc_info = &hw->hmc;
  401. info.rsrc_type = I40E_HMC_LAN_FULL;
  402. info.start_idx = 0;
  403. info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
  404. /* Build the SD entry for the LAN objects */
  405. switch (model) {
  406. case I40E_HMC_MODEL_DIRECT_PREFERRED:
  407. case I40E_HMC_MODEL_DIRECT_ONLY:
  408. info.entry_type = I40E_SD_TYPE_DIRECT;
  409. /* Make one big object, a single SD */
  410. info.count = 1;
  411. ret_code = i40e_create_lan_hmc_object(hw, &info);
  412. if (ret_code && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
  413. goto try_type_paged;
  414. else if (ret_code)
  415. goto configure_lan_hmc_out;
  416. /* else clause falls through the break */
  417. break;
  418. case I40E_HMC_MODEL_PAGED_ONLY:
  419. try_type_paged:
  420. info.entry_type = I40E_SD_TYPE_PAGED;
  421. /* Make one big object in the PD table */
  422. info.count = 1;
  423. ret_code = i40e_create_lan_hmc_object(hw, &info);
  424. if (ret_code)
  425. goto configure_lan_hmc_out;
  426. break;
  427. default:
  428. /* unsupported type */
  429. ret_code = I40E_ERR_INVALID_SD_TYPE;
  430. hw_dbg(hw, "i40e_configure_lan_hmc: Unknown SD type: %d\n",
  431. ret_code);
  432. goto configure_lan_hmc_out;
  433. }
  434. /* Configure and program the FPM registers so objects can be created */
  435. /* Tx contexts */
  436. obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
  437. wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
  438. (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
  439. wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
  440. /* Rx contexts */
  441. obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
  442. wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
  443. (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
  444. wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
  445. /* FCoE contexts */
  446. obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
  447. wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
  448. (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
  449. wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
  450. /* FCoE filters */
  451. obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
  452. wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
  453. (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
  454. wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
  455. configure_lan_hmc_out:
  456. return ret_code;
  457. }
  458. /**
  459. * i40e_delete_hmc_object - remove hmc objects
  460. * @hw: pointer to the HW structure
  461. * @info: pointer to i40e_hmc_delete_obj_info struct
  462. *
  463. * This will de-populate the SDs and PDs. It frees
  464. * the memory for PDS and backing storage. After this function is returned,
  465. * caller should deallocate memory allocated previously for
  466. * book-keeping information about PDs and backing storage.
  467. **/
  468. static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
  469. struct i40e_hmc_lan_delete_obj_info *info)
  470. {
  471. i40e_status ret_code = 0;
  472. struct i40e_hmc_pd_table *pd_table;
  473. u32 pd_idx, pd_lmt, rel_pd_idx;
  474. u32 sd_idx, sd_lmt;
  475. u32 i, j;
  476. if (NULL == info) {
  477. ret_code = I40E_ERR_BAD_PTR;
  478. hw_dbg(hw, "i40e_delete_hmc_object: bad info ptr\n");
  479. goto exit;
  480. }
  481. if (NULL == info->hmc_info) {
  482. ret_code = I40E_ERR_BAD_PTR;
  483. hw_dbg(hw, "i40e_delete_hmc_object: bad info->hmc_info ptr\n");
  484. goto exit;
  485. }
  486. if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
  487. ret_code = I40E_ERR_BAD_PTR;
  488. hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->signature\n");
  489. goto exit;
  490. }
  491. if (NULL == info->hmc_info->sd_table.sd_entry) {
  492. ret_code = I40E_ERR_BAD_PTR;
  493. hw_dbg(hw, "i40e_delete_hmc_object: bad sd_entry\n");
  494. goto exit;
  495. }
  496. if (NULL == info->hmc_info->hmc_obj) {
  497. ret_code = I40E_ERR_BAD_PTR;
  498. hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
  499. goto exit;
  500. }
  501. if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
  502. ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
  503. hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
  504. ret_code);
  505. goto exit;
  506. }
  507. if ((info->start_idx + info->count) >
  508. info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
  509. ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
  510. hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
  511. ret_code);
  512. goto exit;
  513. }
  514. I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
  515. info->start_idx, info->count, &pd_idx,
  516. &pd_lmt);
  517. for (j = pd_idx; j < pd_lmt; j++) {
  518. sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
  519. if (I40E_SD_TYPE_PAGED !=
  520. info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
  521. continue;
  522. rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
  523. pd_table =
  524. &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
  525. if (pd_table->pd_entry[rel_pd_idx].valid) {
  526. ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
  527. if (ret_code)
  528. goto exit;
  529. }
  530. }
  531. /* find sd index and limit */
  532. I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
  533. info->start_idx, info->count,
  534. &sd_idx, &sd_lmt);
  535. if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
  536. sd_lmt > info->hmc_info->sd_table.sd_cnt) {
  537. ret_code = I40E_ERR_INVALID_SD_INDEX;
  538. goto exit;
  539. }
  540. for (i = sd_idx; i < sd_lmt; i++) {
  541. if (!info->hmc_info->sd_table.sd_entry[i].valid)
  542. continue;
  543. switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
  544. case I40E_SD_TYPE_DIRECT:
  545. ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
  546. if (ret_code)
  547. goto exit;
  548. break;
  549. case I40E_SD_TYPE_PAGED:
  550. ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
  551. if (ret_code)
  552. goto exit;
  553. break;
  554. default:
  555. break;
  556. }
  557. }
  558. exit:
  559. return ret_code;
  560. }
  561. /**
  562. * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
  563. * @hw: pointer to the hw structure
  564. *
  565. * This must be called by drivers as they are shutting down and being
  566. * removed from the OS.
  567. **/
  568. i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw)
  569. {
  570. struct i40e_hmc_lan_delete_obj_info info;
  571. i40e_status ret_code;
  572. info.hmc_info = &hw->hmc;
  573. info.rsrc_type = I40E_HMC_LAN_FULL;
  574. info.start_idx = 0;
  575. info.count = 1;
  576. /* delete the object */
  577. ret_code = i40e_delete_lan_hmc_object(hw, &info);
  578. /* free the SD table entry for LAN */
  579. i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
  580. hw->hmc.sd_table.sd_cnt = 0;
  581. hw->hmc.sd_table.sd_entry = NULL;
  582. /* free memory used for hmc_obj */
  583. i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
  584. hw->hmc.hmc_obj = NULL;
  585. return ret_code;
  586. }
  587. #define I40E_HMC_STORE(_struct, _ele) \
  588. offsetof(struct _struct, _ele), \
  589. FIELD_SIZEOF(struct _struct, _ele)
  590. struct i40e_context_ele {
  591. u16 offset;
  592. u16 size_of;
  593. u16 width;
  594. u16 lsb;
  595. };
  596. /* LAN Tx Queue Context */
  597. static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
  598. /* Field Width LSB */
  599. {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 },
  600. {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 },
  601. {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 },
  602. {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 },
  603. {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 },
  604. {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 },
  605. {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 },
  606. {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 },
  607. /* line 1 */
  608. {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 },
  609. {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 },
  610. {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 },
  611. {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 },
  612. {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 },
  613. {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 },
  614. {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 },
  615. /* line 7 */
  616. {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) },
  617. {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) },
  618. {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) },
  619. { 0 }
  620. };
  621. /* LAN Rx Queue Context */
  622. static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
  623. /* Field Width LSB */
  624. { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 },
  625. { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 },
  626. { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 },
  627. { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 },
  628. { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 },
  629. { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 },
  630. { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 },
  631. { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 },
  632. { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 },
  633. { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 },
  634. { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 },
  635. { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 },
  636. { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 },
  637. { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 },
  638. { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 },
  639. { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 },
  640. { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 },
  641. { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
  642. { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
  643. { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
  644. { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 },
  645. { 0 }
  646. };
  647. /**
  648. * i40e_write_byte - replace HMC context byte
  649. * @hmc_bits: pointer to the HMC memory
  650. * @ce_info: a description of the struct to be read from
  651. * @src: the struct to be read from
  652. **/
  653. static void i40e_write_byte(u8 *hmc_bits,
  654. struct i40e_context_ele *ce_info,
  655. u8 *src)
  656. {
  657. u8 src_byte, dest_byte, mask;
  658. u8 *from, *dest;
  659. u16 shift_width;
  660. /* copy from the next struct field */
  661. from = src + ce_info->offset;
  662. /* prepare the bits and mask */
  663. shift_width = ce_info->lsb % 8;
  664. mask = (u8)(BIT(ce_info->width) - 1);
  665. src_byte = *from;
  666. src_byte &= mask;
  667. /* shift to correct alignment */
  668. mask <<= shift_width;
  669. src_byte <<= shift_width;
  670. /* get the current bits from the target bit string */
  671. dest = hmc_bits + (ce_info->lsb / 8);
  672. memcpy(&dest_byte, dest, sizeof(dest_byte));
  673. dest_byte &= ~mask; /* get the bits not changing */
  674. dest_byte |= src_byte; /* add in the new bits */
  675. /* put it all back */
  676. memcpy(dest, &dest_byte, sizeof(dest_byte));
  677. }
  678. /**
  679. * i40e_write_word - replace HMC context word
  680. * @hmc_bits: pointer to the HMC memory
  681. * @ce_info: a description of the struct to be read from
  682. * @src: the struct to be read from
  683. **/
  684. static void i40e_write_word(u8 *hmc_bits,
  685. struct i40e_context_ele *ce_info,
  686. u8 *src)
  687. {
  688. u16 src_word, mask;
  689. u8 *from, *dest;
  690. u16 shift_width;
  691. __le16 dest_word;
  692. /* copy from the next struct field */
  693. from = src + ce_info->offset;
  694. /* prepare the bits and mask */
  695. shift_width = ce_info->lsb % 8;
  696. mask = BIT(ce_info->width) - 1;
  697. /* don't swizzle the bits until after the mask because the mask bits
  698. * will be in a different bit position on big endian machines
  699. */
  700. src_word = *(u16 *)from;
  701. src_word &= mask;
  702. /* shift to correct alignment */
  703. mask <<= shift_width;
  704. src_word <<= shift_width;
  705. /* get the current bits from the target bit string */
  706. dest = hmc_bits + (ce_info->lsb / 8);
  707. memcpy(&dest_word, dest, sizeof(dest_word));
  708. dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
  709. dest_word |= cpu_to_le16(src_word); /* add in the new bits */
  710. /* put it all back */
  711. memcpy(dest, &dest_word, sizeof(dest_word));
  712. }
  713. /**
  714. * i40e_write_dword - replace HMC context dword
  715. * @hmc_bits: pointer to the HMC memory
  716. * @ce_info: a description of the struct to be read from
  717. * @src: the struct to be read from
  718. **/
  719. static void i40e_write_dword(u8 *hmc_bits,
  720. struct i40e_context_ele *ce_info,
  721. u8 *src)
  722. {
  723. u32 src_dword, mask;
  724. u8 *from, *dest;
  725. u16 shift_width;
  726. __le32 dest_dword;
  727. /* copy from the next struct field */
  728. from = src + ce_info->offset;
  729. /* prepare the bits and mask */
  730. shift_width = ce_info->lsb % 8;
  731. /* if the field width is exactly 32 on an x86 machine, then the shift
  732. * operation will not work because the SHL instructions count is masked
  733. * to 5 bits so the shift will do nothing
  734. */
  735. if (ce_info->width < 32)
  736. mask = BIT(ce_info->width) - 1;
  737. else
  738. mask = ~(u32)0;
  739. /* don't swizzle the bits until after the mask because the mask bits
  740. * will be in a different bit position on big endian machines
  741. */
  742. src_dword = *(u32 *)from;
  743. src_dword &= mask;
  744. /* shift to correct alignment */
  745. mask <<= shift_width;
  746. src_dword <<= shift_width;
  747. /* get the current bits from the target bit string */
  748. dest = hmc_bits + (ce_info->lsb / 8);
  749. memcpy(&dest_dword, dest, sizeof(dest_dword));
  750. dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
  751. dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
  752. /* put it all back */
  753. memcpy(dest, &dest_dword, sizeof(dest_dword));
  754. }
  755. /**
  756. * i40e_write_qword - replace HMC context qword
  757. * @hmc_bits: pointer to the HMC memory
  758. * @ce_info: a description of the struct to be read from
  759. * @src: the struct to be read from
  760. **/
  761. static void i40e_write_qword(u8 *hmc_bits,
  762. struct i40e_context_ele *ce_info,
  763. u8 *src)
  764. {
  765. u64 src_qword, mask;
  766. u8 *from, *dest;
  767. u16 shift_width;
  768. __le64 dest_qword;
  769. /* copy from the next struct field */
  770. from = src + ce_info->offset;
  771. /* prepare the bits and mask */
  772. shift_width = ce_info->lsb % 8;
  773. /* if the field width is exactly 64 on an x86 machine, then the shift
  774. * operation will not work because the SHL instructions count is masked
  775. * to 6 bits so the shift will do nothing
  776. */
  777. if (ce_info->width < 64)
  778. mask = BIT_ULL(ce_info->width) - 1;
  779. else
  780. mask = ~(u64)0;
  781. /* don't swizzle the bits until after the mask because the mask bits
  782. * will be in a different bit position on big endian machines
  783. */
  784. src_qword = *(u64 *)from;
  785. src_qword &= mask;
  786. /* shift to correct alignment */
  787. mask <<= shift_width;
  788. src_qword <<= shift_width;
  789. /* get the current bits from the target bit string */
  790. dest = hmc_bits + (ce_info->lsb / 8);
  791. memcpy(&dest_qword, dest, sizeof(dest_qword));
  792. dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
  793. dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
  794. /* put it all back */
  795. memcpy(dest, &dest_qword, sizeof(dest_qword));
  796. }
  797. /**
  798. * i40e_clear_hmc_context - zero out the HMC context bits
  799. * @hw: the hardware struct
  800. * @context_bytes: pointer to the context bit array (DMA memory)
  801. * @hmc_type: the type of HMC resource
  802. **/
  803. static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,
  804. u8 *context_bytes,
  805. enum i40e_hmc_lan_rsrc_type hmc_type)
  806. {
  807. /* clean the bit array */
  808. memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size);
  809. return 0;
  810. }
  811. /**
  812. * i40e_set_hmc_context - replace HMC context bits
  813. * @context_bytes: pointer to the context bit array
  814. * @ce_info: a description of the struct to be filled
  815. * @dest: the struct to be filled
  816. **/
  817. static i40e_status i40e_set_hmc_context(u8 *context_bytes,
  818. struct i40e_context_ele *ce_info,
  819. u8 *dest)
  820. {
  821. int f;
  822. for (f = 0; ce_info[f].width != 0; f++) {
  823. /* we have to deal with each element of the HMC using the
  824. * correct size so that we are correct regardless of the
  825. * endianness of the machine
  826. */
  827. switch (ce_info[f].size_of) {
  828. case 1:
  829. i40e_write_byte(context_bytes, &ce_info[f], dest);
  830. break;
  831. case 2:
  832. i40e_write_word(context_bytes, &ce_info[f], dest);
  833. break;
  834. case 4:
  835. i40e_write_dword(context_bytes, &ce_info[f], dest);
  836. break;
  837. case 8:
  838. i40e_write_qword(context_bytes, &ce_info[f], dest);
  839. break;
  840. }
  841. }
  842. return 0;
  843. }
  844. /**
  845. * i40e_hmc_get_object_va - retrieves an object's virtual address
  846. * @hmc_info: pointer to i40e_hmc_info struct
  847. * @object_base: pointer to u64 to get the va
  848. * @rsrc_type: the hmc resource type
  849. * @obj_idx: hmc object index
  850. *
  851. * This function retrieves the object's virtual address from the object
  852. * base pointer. This function is used for LAN Queue contexts.
  853. **/
  854. static
  855. i40e_status i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info,
  856. u8 **object_base,
  857. enum i40e_hmc_lan_rsrc_type rsrc_type,
  858. u32 obj_idx)
  859. {
  860. u32 obj_offset_in_sd, obj_offset_in_pd;
  861. i40e_status ret_code = 0;
  862. struct i40e_hmc_sd_entry *sd_entry;
  863. struct i40e_hmc_pd_entry *pd_entry;
  864. u32 pd_idx, pd_lmt, rel_pd_idx;
  865. u64 obj_offset_in_fpm;
  866. u32 sd_idx, sd_lmt;
  867. if (NULL == hmc_info) {
  868. ret_code = I40E_ERR_BAD_PTR;
  869. hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info ptr\n");
  870. goto exit;
  871. }
  872. if (NULL == hmc_info->hmc_obj) {
  873. ret_code = I40E_ERR_BAD_PTR;
  874. hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
  875. goto exit;
  876. }
  877. if (NULL == object_base) {
  878. ret_code = I40E_ERR_BAD_PTR;
  879. hw_dbg(hw, "i40e_hmc_get_object_va: bad object_base ptr\n");
  880. goto exit;
  881. }
  882. if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
  883. ret_code = I40E_ERR_BAD_PTR;
  884. hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->signature\n");
  885. goto exit;
  886. }
  887. if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
  888. hw_dbg(hw, "i40e_hmc_get_object_va: returns error %d\n",
  889. ret_code);
  890. ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
  891. goto exit;
  892. }
  893. /* find sd index and limit */
  894. I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
  895. &sd_idx, &sd_lmt);
  896. sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
  897. obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
  898. hmc_info->hmc_obj[rsrc_type].size * obj_idx;
  899. if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
  900. I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
  901. &pd_idx, &pd_lmt);
  902. rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
  903. pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
  904. obj_offset_in_pd = (u32)(obj_offset_in_fpm %
  905. I40E_HMC_PAGED_BP_SIZE);
  906. *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
  907. } else {
  908. obj_offset_in_sd = (u32)(obj_offset_in_fpm %
  909. I40E_HMC_DIRECT_BP_SIZE);
  910. *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
  911. }
  912. exit:
  913. return ret_code;
  914. }
  915. /**
  916. * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
  917. * @hw: the hardware struct
  918. * @queue: the queue we care about
  919. **/
  920. i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
  921. u16 queue)
  922. {
  923. i40e_status err;
  924. u8 *context_bytes;
  925. err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
  926. I40E_HMC_LAN_TX, queue);
  927. if (err < 0)
  928. return err;
  929. return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
  930. }
  931. /**
  932. * i40e_set_lan_tx_queue_context - set the HMC context for the queue
  933. * @hw: the hardware struct
  934. * @queue: the queue we care about
  935. * @s: the struct to be filled
  936. **/
  937. i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
  938. u16 queue,
  939. struct i40e_hmc_obj_txq *s)
  940. {
  941. i40e_status err;
  942. u8 *context_bytes;
  943. err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
  944. I40E_HMC_LAN_TX, queue);
  945. if (err < 0)
  946. return err;
  947. return i40e_set_hmc_context(context_bytes,
  948. i40e_hmc_txq_ce_info, (u8 *)s);
  949. }
  950. /**
  951. * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
  952. * @hw: the hardware struct
  953. * @queue: the queue we care about
  954. **/
  955. i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
  956. u16 queue)
  957. {
  958. i40e_status err;
  959. u8 *context_bytes;
  960. err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
  961. I40E_HMC_LAN_RX, queue);
  962. if (err < 0)
  963. return err;
  964. return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
  965. }
  966. /**
  967. * i40e_set_lan_rx_queue_context - set the HMC context for the queue
  968. * @hw: the hardware struct
  969. * @queue: the queue we care about
  970. * @s: the struct to be filled
  971. **/
  972. i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
  973. u16 queue,
  974. struct i40e_hmc_obj_rxq *s)
  975. {
  976. i40e_status err;
  977. u8 *context_bytes;
  978. err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
  979. I40E_HMC_LAN_RX, queue);
  980. if (err < 0)
  981. return err;
  982. return i40e_set_hmc_context(context_bytes,
  983. i40e_hmc_rxq_ce_info, (u8 *)s);
  984. }