ice_virtchnl_pf.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. #include "ice.h"
  4. #include "ice_lib.h"
  5. /**
  6. * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
  7. * @pf: pointer to the PF structure
  8. * @v_opcode: operation code
  9. * @v_retval: return value
  10. * @msg: pointer to the msg buffer
  11. * @msglen: msg length
  12. */
  13. static void
  14. ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
  15. enum ice_status v_retval, u8 *msg, u16 msglen)
  16. {
  17. struct ice_hw *hw = &pf->hw;
  18. struct ice_vf *vf = pf->vf;
  19. int i;
  20. for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
  21. /* Not all vfs are enabled so skip the ones that are not */
  22. if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
  23. !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
  24. continue;
  25. /* Ignore return value on purpose - a given VF may fail, but
  26. * we need to keep going and send to all of them
  27. */
  28. ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
  29. msglen, NULL);
  30. }
  31. }
  32. /**
  33. * ice_get_vf_vector - get VF interrupt vector register offset
  34. * @vf_msix: number of MSIx vector per VF on a PF
  35. * @vf_id: VF identifier
  36. * @i: index of MSIx vector
  37. */
  38. static u32 ice_get_vf_vector(int vf_msix, int vf_id, int i)
  39. {
  40. return ((i == 0) ? VFINT_DYN_CTLN(vf_id) :
  41. VFINT_DYN_CTLN(((vf_msix - 1) * (vf_id)) + (i - 1)));
  42. }
  43. /**
  44. * ice_free_vf_res - Free a VF's resources
  45. * @vf: pointer to the VF info
  46. */
  47. static void ice_free_vf_res(struct ice_vf *vf)
  48. {
  49. struct ice_pf *pf = vf->pf;
  50. int i, pf_vf_msix;
  51. /* First, disable VF's configuration API to prevent OS from
  52. * accessing the VF's VSI after it's freed or invalidated.
  53. */
  54. clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
  55. /* free vsi & disconnect it from the parent uplink */
  56. if (vf->lan_vsi_idx) {
  57. ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
  58. vf->lan_vsi_idx = 0;
  59. vf->lan_vsi_num = 0;
  60. vf->num_mac = 0;
  61. }
  62. pf_vf_msix = pf->num_vf_msix;
  63. /* Disable interrupts so that VF starts in a known state */
  64. for (i = 0; i < pf_vf_msix; i++) {
  65. u32 reg_idx;
  66. reg_idx = ice_get_vf_vector(pf_vf_msix, vf->vf_id, i);
  67. wr32(&pf->hw, reg_idx, VFINT_DYN_CTLN_CLEARPBA_M);
  68. ice_flush(&pf->hw);
  69. }
  70. /* reset some of the state variables keeping track of the resources */
  71. clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
  72. clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
  73. }
  74. /***********************enable_vf routines*****************************/
  75. /**
  76. * ice_dis_vf_mappings
  77. * @vf: pointer to the VF structure
  78. */
  79. static void ice_dis_vf_mappings(struct ice_vf *vf)
  80. {
  81. struct ice_pf *pf = vf->pf;
  82. struct ice_vsi *vsi;
  83. int first, last, v;
  84. struct ice_hw *hw;
  85. hw = &pf->hw;
  86. vsi = pf->vsi[vf->lan_vsi_idx];
  87. wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
  88. first = vf->first_vector_idx;
  89. last = first + pf->num_vf_msix - 1;
  90. for (v = first; v <= last; v++) {
  91. u32 reg;
  92. reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
  93. GLINT_VECT2FUNC_IS_PF_M) |
  94. ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
  95. GLINT_VECT2FUNC_PF_NUM_M));
  96. wr32(hw, GLINT_VECT2FUNC(v), reg);
  97. }
  98. if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
  99. wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
  100. else
  101. dev_err(&pf->pdev->dev,
  102. "Scattered mode for VF Tx queues is not yet implemented\n");
  103. if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
  104. wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
  105. else
  106. dev_err(&pf->pdev->dev,
  107. "Scattered mode for VF Rx queues is not yet implemented\n");
  108. }
  109. /**
  110. * ice_free_vfs - Free all VFs
  111. * @pf: pointer to the PF structure
  112. */
  113. void ice_free_vfs(struct ice_pf *pf)
  114. {
  115. struct ice_hw *hw = &pf->hw;
  116. int tmp, i;
  117. if (!pf->vf)
  118. return;
  119. while (test_and_set_bit(__ICE_VF_DIS, pf->state))
  120. usleep_range(1000, 2000);
  121. /* Avoid wait time by stopping all VFs at the same time */
  122. for (i = 0; i < pf->num_alloc_vfs; i++) {
  123. if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
  124. continue;
  125. /* stop rings without wait time */
  126. ice_vsi_stop_tx_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
  127. ICE_NO_RESET, i);
  128. ice_vsi_stop_rx_rings(pf->vsi[pf->vf[i].lan_vsi_idx]);
  129. clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
  130. }
  131. /* Disable IOV before freeing resources. This lets any VF drivers
  132. * running in the host get themselves cleaned up before we yank
  133. * the carpet out from underneath their feet.
  134. */
  135. if (!pci_vfs_assigned(pf->pdev))
  136. pci_disable_sriov(pf->pdev);
  137. else
  138. dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
  139. tmp = pf->num_alloc_vfs;
  140. pf->num_vf_qps = 0;
  141. pf->num_alloc_vfs = 0;
  142. for (i = 0; i < tmp; i++) {
  143. if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
  144. /* disable VF qp mappings */
  145. ice_dis_vf_mappings(&pf->vf[i]);
  146. /* Set this state so that assigned VF vectors can be
  147. * reclaimed by PF for reuse in ice_vsi_release(). No
  148. * need to clear this bit since pf->vf array is being
  149. * freed anyways after this for loop
  150. */
  151. set_bit(ICE_VF_STATE_CFG_INTR, pf->vf[i].vf_states);
  152. ice_free_vf_res(&pf->vf[i]);
  153. }
  154. }
  155. devm_kfree(&pf->pdev->dev, pf->vf);
  156. pf->vf = NULL;
  157. /* This check is for when the driver is unloaded while VFs are
  158. * assigned. Setting the number of VFs to 0 through sysfs is caught
  159. * before this function ever gets called.
  160. */
  161. if (!pci_vfs_assigned(pf->pdev)) {
  162. int vf_id;
  163. /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
  164. * work correctly when SR-IOV gets re-enabled.
  165. */
  166. for (vf_id = 0; vf_id < tmp; vf_id++) {
  167. u32 reg_idx, bit_idx;
  168. reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
  169. bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
  170. wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
  171. }
  172. }
  173. clear_bit(__ICE_VF_DIS, pf->state);
  174. clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
  175. }
  176. /**
  177. * ice_trigger_vf_reset - Reset a VF on HW
  178. * @vf: pointer to the VF structure
  179. * @is_vflr: true if VFLR was issued, false if not
  180. *
  181. * Trigger hardware to start a reset for a particular VF. Expects the caller
  182. * to wait the proper amount of time to allow hardware to reset the VF before
  183. * it cleans up and restores VF functionality.
  184. */
  185. static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr)
  186. {
  187. struct ice_pf *pf = vf->pf;
  188. u32 reg, reg_idx, bit_idx;
  189. struct ice_hw *hw;
  190. int vf_abs_id, i;
  191. hw = &pf->hw;
  192. vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
  193. /* Inform VF that it is no longer active, as a warning */
  194. clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
  195. /* Disable VF's configuration API during reset. The flag is re-enabled
  196. * in ice_alloc_vf_res(), when it's safe again to access VF's VSI.
  197. * It's normally disabled in ice_free_vf_res(), but it's safer
  198. * to do it earlier to give some time to finish to any VF config
  199. * functions that may still be running at this point.
  200. */
  201. clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
  202. /* In the case of a VFLR, the HW has already reset the VF and we
  203. * just need to clean up, so don't hit the VFRTRIG register.
  204. */
  205. if (!is_vflr) {
  206. /* reset VF using VPGEN_VFRTRIG reg */
  207. reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
  208. reg |= VPGEN_VFRTRIG_VFSWR_M;
  209. wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
  210. }
  211. /* clear the VFLR bit in GLGEN_VFLRSTAT */
  212. reg_idx = (vf_abs_id) / 32;
  213. bit_idx = (vf_abs_id) % 32;
  214. wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
  215. ice_flush(hw);
  216. wr32(hw, PF_PCI_CIAA,
  217. VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
  218. for (i = 0; i < 100; i++) {
  219. reg = rd32(hw, PF_PCI_CIAD);
  220. if ((reg & VF_TRANS_PENDING_M) != 0)
  221. dev_err(&pf->pdev->dev,
  222. "VF %d PCI transactions stuck\n", vf->vf_id);
  223. udelay(1);
  224. }
  225. }
  226. /**
  227. * ice_vsi_set_pvid - Set port VLAN id for the VSI
  228. * @vsi: the VSI being changed
  229. * @vid: the VLAN id to set as a PVID
  230. */
  231. static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
  232. {
  233. struct device *dev = &vsi->back->pdev->dev;
  234. struct ice_hw *hw = &vsi->back->hw;
  235. struct ice_vsi_ctx ctxt = { 0 };
  236. enum ice_status status;
  237. ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_TAGGED |
  238. ICE_AQ_VSI_PVLAN_INSERT_PVID |
  239. ICE_AQ_VSI_VLAN_EMOD_STR;
  240. ctxt.info.pvid = cpu_to_le16(vid);
  241. ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
  242. status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
  243. if (status) {
  244. dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
  245. status, hw->adminq.sq_last_status);
  246. return -EIO;
  247. }
  248. vsi->info.pvid = ctxt.info.pvid;
  249. vsi->info.vlan_flags = ctxt.info.vlan_flags;
  250. return 0;
  251. }
  252. /**
  253. * ice_vf_vsi_setup - Set up a VF VSI
  254. * @pf: board private structure
  255. * @pi: pointer to the port_info instance
  256. * @vf_id: defines VF id to which this VSI connects.
  257. *
  258. * Returns pointer to the successfully allocated VSI struct on success,
  259. * otherwise returns NULL on failure.
  260. */
  261. static struct ice_vsi *
  262. ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
  263. {
  264. return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
  265. }
  266. /**
  267. * ice_alloc_vsi_res - Setup VF VSI and its resources
  268. * @vf: pointer to the VF structure
  269. *
  270. * Returns 0 on success, negative value on failure
  271. */
  272. static int ice_alloc_vsi_res(struct ice_vf *vf)
  273. {
  274. struct ice_pf *pf = vf->pf;
  275. LIST_HEAD(tmp_add_list);
  276. u8 broadcast[ETH_ALEN];
  277. struct ice_vsi *vsi;
  278. int status = 0;
  279. vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
  280. if (!vsi) {
  281. dev_err(&pf->pdev->dev, "Failed to create VF VSI\n");
  282. return -ENOMEM;
  283. }
  284. vf->lan_vsi_idx = vsi->idx;
  285. vf->lan_vsi_num = vsi->vsi_num;
  286. /* first vector index is the VFs OICR index */
  287. vf->first_vector_idx = vsi->hw_base_vector;
  288. /* Since hw_base_vector holds the vector where data queue interrupts
  289. * starts, increment by 1 since VFs allocated vectors include OICR intr
  290. * as well.
  291. */
  292. vsi->hw_base_vector += 1;
  293. /* Check if port VLAN exist before, and restore it accordingly */
  294. if (vf->port_vlan_id)
  295. ice_vsi_set_pvid(vsi, vf->port_vlan_id);
  296. eth_broadcast_addr(broadcast);
  297. status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
  298. if (status)
  299. goto ice_alloc_vsi_res_exit;
  300. if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
  301. status = ice_add_mac_to_list(vsi, &tmp_add_list,
  302. vf->dflt_lan_addr.addr);
  303. if (status)
  304. goto ice_alloc_vsi_res_exit;
  305. }
  306. status = ice_add_mac(&pf->hw, &tmp_add_list);
  307. if (status)
  308. dev_err(&pf->pdev->dev, "could not add mac filters\n");
  309. /* Clear this bit after VF initialization since we shouldn't reclaim
  310. * and reassign interrupts for synchronous or asynchronous VFR events.
  311. * We don't want to reconfigure interrupts since AVF driver doesn't
  312. * expect vector assignment to be changed unless there is a request for
  313. * more vectors.
  314. */
  315. clear_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states);
  316. ice_alloc_vsi_res_exit:
  317. ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
  318. return status;
  319. }
  320. /**
  321. * ice_alloc_vf_res - Allocate VF resources
  322. * @vf: pointer to the VF structure
  323. */
  324. static int ice_alloc_vf_res(struct ice_vf *vf)
  325. {
  326. int status;
  327. /* setup VF VSI and necessary resources */
  328. status = ice_alloc_vsi_res(vf);
  329. if (status)
  330. goto ice_alloc_vf_res_exit;
  331. if (vf->trusted)
  332. set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
  333. else
  334. clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
  335. /* VF is now completely initialized */
  336. set_bit(ICE_VF_STATE_INIT, vf->vf_states);
  337. return status;
  338. ice_alloc_vf_res_exit:
  339. ice_free_vf_res(vf);
  340. return status;
  341. }
  342. /**
  343. * ice_ena_vf_mappings
  344. * @vf: pointer to the VF structure
  345. *
  346. * Enable VF vectors and queues allocation by writing the details into
  347. * respective registers.
  348. */
  349. static void ice_ena_vf_mappings(struct ice_vf *vf)
  350. {
  351. struct ice_pf *pf = vf->pf;
  352. struct ice_vsi *vsi;
  353. int first, last, v;
  354. struct ice_hw *hw;
  355. int abs_vf_id;
  356. u32 reg;
  357. hw = &pf->hw;
  358. vsi = pf->vsi[vf->lan_vsi_idx];
  359. first = vf->first_vector_idx;
  360. last = (first + pf->num_vf_msix) - 1;
  361. abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
  362. /* VF Vector allocation */
  363. reg = (((first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
  364. ((last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
  365. VPINT_ALLOC_VALID_M);
  366. wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
  367. /* map the interrupts to its functions */
  368. for (v = first; v <= last; v++) {
  369. reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
  370. GLINT_VECT2FUNC_VF_NUM_M) |
  371. ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
  372. GLINT_VECT2FUNC_PF_NUM_M));
  373. wr32(hw, GLINT_VECT2FUNC(v), reg);
  374. }
  375. /* VF Tx queues allocation */
  376. if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
  377. wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id),
  378. VPLAN_TXQ_MAPENA_TX_ENA_M);
  379. /* set the VF PF Tx queue range
  380. * VFNUMQ value should be set to (number of queues - 1). A value
  381. * of 0 means 1 queue and a value of 255 means 256 queues
  382. */
  383. reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
  384. VPLAN_TX_QBASE_VFFIRSTQ_M) |
  385. (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
  386. VPLAN_TX_QBASE_VFNUMQ_M));
  387. wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
  388. } else {
  389. dev_err(&pf->pdev->dev,
  390. "Scattered mode for VF Tx queues is not yet implemented\n");
  391. }
  392. /* VF Rx queues allocation */
  393. if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
  394. wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id),
  395. VPLAN_RXQ_MAPENA_RX_ENA_M);
  396. /* set the VF PF Rx queue range
  397. * VFNUMQ value should be set to (number of queues - 1). A value
  398. * of 0 means 1 queue and a value of 255 means 256 queues
  399. */
  400. reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
  401. VPLAN_RX_QBASE_VFFIRSTQ_M) |
  402. (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
  403. VPLAN_RX_QBASE_VFNUMQ_M));
  404. wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
  405. } else {
  406. dev_err(&pf->pdev->dev,
  407. "Scattered mode for VF Rx queues is not yet implemented\n");
  408. }
  409. }
  410. /**
  411. * ice_determine_res
  412. * @pf: pointer to the PF structure
  413. * @avail_res: available resources in the PF structure
  414. * @max_res: maximum resources that can be given per VF
  415. * @min_res: minimum resources that can be given per VF
  416. *
  417. * Returns non-zero value if resources (queues/vectors) are available or
  418. * returns zero if PF cannot accommodate for all num_alloc_vfs.
  419. */
  420. static int
  421. ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
  422. {
  423. bool checked_min_res = false;
  424. int res;
  425. /* start by checking if PF can assign max number of resources for
  426. * all num_alloc_vfs.
  427. * if yes, return number per VF
  428. * If no, divide by 2 and roundup, check again
  429. * repeat the loop till we reach a point where even minimum resources
  430. * are not available, in that case return 0
  431. */
  432. res = max_res;
  433. while ((res >= min_res) && !checked_min_res) {
  434. int num_all_res;
  435. num_all_res = pf->num_alloc_vfs * res;
  436. if (num_all_res <= avail_res)
  437. return res;
  438. if (res == min_res)
  439. checked_min_res = true;
  440. res = DIV_ROUND_UP(res, 2);
  441. }
  442. return 0;
  443. }
  444. /**
  445. * ice_check_avail_res - check if vectors and queues are available
  446. * @pf: pointer to the PF structure
  447. *
  448. * This function is where we calculate actual number of resources for VF VSIs,
  449. * we don't reserve ahead of time during probe. Returns success if vectors and
  450. * queues resources are available, otherwise returns error code
  451. */
  452. static int ice_check_avail_res(struct ice_pf *pf)
  453. {
  454. u16 num_msix, num_txq, num_rxq;
  455. if (!pf->num_alloc_vfs)
  456. return -EINVAL;
  457. /* Grab from HW interrupts common pool
  458. * Note: By the time the user decides it needs more vectors in a VF
  459. * its already too late since one must decide this prior to creating the
  460. * VF interface. So the best we can do is take a guess as to what the
  461. * user might want.
  462. *
  463. * We have two policies for vector allocation:
  464. * 1. if num_alloc_vfs is from 1 to 16, then we consider this as small
  465. * number of NFV VFs used for NFV appliances, since this is a special
  466. * case, we try to assign maximum vectors per VF (65) as much as
  467. * possible, based on determine_resources algorithm.
  468. * 2. if num_alloc_vfs is from 17 to 256, then its large number of
  469. * regular VFs which are not used for any special purpose. Hence try to
  470. * grab default interrupt vectors (5 as supported by AVF driver).
  471. */
  472. if (pf->num_alloc_vfs <= 16) {
  473. num_msix = ice_determine_res(pf, pf->num_avail_hw_msix,
  474. ICE_MAX_INTR_PER_VF,
  475. ICE_MIN_INTR_PER_VF);
  476. } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) {
  477. num_msix = ice_determine_res(pf, pf->num_avail_hw_msix,
  478. ICE_DFLT_INTR_PER_VF,
  479. ICE_MIN_INTR_PER_VF);
  480. } else {
  481. dev_err(&pf->pdev->dev,
  482. "Number of VFs %d exceeds max VF count %d\n",
  483. pf->num_alloc_vfs, ICE_MAX_VF_COUNT);
  484. return -EIO;
  485. }
  486. if (!num_msix)
  487. return -EIO;
  488. /* Grab from the common pool
  489. * start by requesting Default queues (4 as supported by AVF driver),
  490. * Note that, the main difference between queues and vectors is, latter
  491. * can only be reserved at init time but queues can be requested by VF
  492. * at runtime through Virtchnl, that is the reason we start by reserving
  493. * few queues.
  494. */
  495. num_txq = ice_determine_res(pf, pf->q_left_tx, ICE_DFLT_QS_PER_VF,
  496. ICE_MIN_QS_PER_VF);
  497. num_rxq = ice_determine_res(pf, pf->q_left_rx, ICE_DFLT_QS_PER_VF,
  498. ICE_MIN_QS_PER_VF);
  499. if (!num_txq || !num_rxq)
  500. return -EIO;
  501. /* since AVF driver works with only queue pairs which means, it expects
  502. * to have equal number of Rx and Tx queues, so take the minimum of
  503. * available Tx or Rx queues
  504. */
  505. pf->num_vf_qps = min_t(int, num_txq, num_rxq);
  506. pf->num_vf_msix = num_msix;
  507. return 0;
  508. }
  509. /**
  510. * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset
  511. * @vf: pointer to the VF structure
  512. *
  513. * Cleanup a VF after the hardware reset is finished. Expects the caller to
  514. * have verified whether the reset is finished properly, and ensure the
  515. * minimum amount of wait time has passed. Reallocate VF resources back to make
  516. * VF state active
  517. */
  518. static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
  519. {
  520. struct ice_pf *pf = vf->pf;
  521. struct ice_hw *hw;
  522. u32 reg;
  523. hw = &pf->hw;
  524. /* PF software completes the flow by notifying VF that reset flow is
  525. * completed. This is done by enabling hardware by clearing the reset
  526. * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT
  527. * register to VFR completed (done at the end of this function)
  528. * By doing this we allow HW to access VF memory at any point. If we
  529. * did it any sooner, HW could access memory while it was being freed
  530. * in ice_free_vf_res(), causing an IOMMU fault.
  531. *
  532. * On the other hand, this needs to be done ASAP, because the VF driver
  533. * is waiting for this to happen and may report a timeout. It's
  534. * harmless, but it gets logged into Guest OS kernel log, so best avoid
  535. * it.
  536. */
  537. reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
  538. reg &= ~VPGEN_VFRTRIG_VFSWR_M;
  539. wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
  540. /* reallocate VF resources to finish resetting the VSI state */
  541. if (!ice_alloc_vf_res(vf)) {
  542. ice_ena_vf_mappings(vf);
  543. set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
  544. clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
  545. vf->num_vlan = 0;
  546. }
  547. /* Tell the VF driver the reset is done. This needs to be done only
  548. * after VF has been fully initialized, because the VF driver may
  549. * request resources immediately after setting this flag.
  550. */
  551. wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
  552. }
  553. /**
  554. * ice_reset_all_vfs - reset all allocated VFs in one go
  555. * @pf: pointer to the PF structure
  556. * @is_vflr: true if VFLR was issued, false if not
  557. *
  558. * First, tell the hardware to reset each VF, then do all the waiting in one
  559. * chunk, and finally finish restoring each VF after the wait. This is useful
  560. * during PF routines which need to reset all VFs, as otherwise it must perform
  561. * these resets in a serialized fashion.
  562. *
  563. * Returns true if any VFs were reset, and false otherwise.
  564. */
  565. bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
  566. {
  567. struct ice_hw *hw = &pf->hw;
  568. int v, i;
  569. /* If we don't have any VFs, then there is nothing to reset */
  570. if (!pf->num_alloc_vfs)
  571. return false;
  572. /* If VFs have been disabled, there is no need to reset */
  573. if (test_and_set_bit(__ICE_VF_DIS, pf->state))
  574. return false;
  575. /* Begin reset on all VFs at once */
  576. for (v = 0; v < pf->num_alloc_vfs; v++)
  577. ice_trigger_vf_reset(&pf->vf[v], is_vflr);
  578. /* Call Disable LAN Tx queue AQ call with VFR bit set and 0
  579. * queues to inform Firmware about VF reset.
  580. */
  581. for (v = 0; v < pf->num_alloc_vfs; v++)
  582. ice_dis_vsi_txq(pf->vsi[0]->port_info, 0, NULL, NULL,
  583. ICE_VF_RESET, v, NULL);
  584. /* HW requires some time to make sure it can flush the FIFO for a VF
  585. * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
  586. * sequence to make sure that it has completed. We'll keep track of
  587. * the VFs using a simple iterator that increments once that VF has
  588. * finished resetting.
  589. */
  590. for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
  591. usleep_range(10000, 20000);
  592. /* Check each VF in sequence */
  593. while (v < pf->num_alloc_vfs) {
  594. struct ice_vf *vf = &pf->vf[v];
  595. u32 reg;
  596. reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
  597. if (!(reg & VPGEN_VFRSTAT_VFRD_M))
  598. break;
  599. /* If the current VF has finished resetting, move on
  600. * to the next VF in sequence.
  601. */
  602. v++;
  603. }
  604. }
  605. /* Display a warning if at least one VF didn't manage to reset in
  606. * time, but continue on with the operation.
  607. */
  608. if (v < pf->num_alloc_vfs)
  609. dev_warn(&pf->pdev->dev, "VF reset check timeout\n");
  610. usleep_range(10000, 20000);
  611. /* free VF resources to begin resetting the VSI state */
  612. for (v = 0; v < pf->num_alloc_vfs; v++)
  613. ice_free_vf_res(&pf->vf[v]);
  614. if (ice_check_avail_res(pf)) {
  615. dev_err(&pf->pdev->dev,
  616. "Cannot allocate VF resources, try with fewer number of VFs\n");
  617. return false;
  618. }
  619. /* Finish the reset on each VF */
  620. for (v = 0; v < pf->num_alloc_vfs; v++)
  621. ice_cleanup_and_realloc_vf(&pf->vf[v]);
  622. ice_flush(hw);
  623. clear_bit(__ICE_VF_DIS, pf->state);
  624. return true;
  625. }
  626. /**
  627. * ice_reset_vf - Reset a particular VF
  628. * @vf: pointer to the VF structure
  629. * @is_vflr: true if VFLR was issued, false if not
  630. *
  631. * Returns true if the VF is reset, false otherwise.
  632. */
  633. static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
  634. {
  635. struct ice_pf *pf = vf->pf;
  636. struct ice_hw *hw = &pf->hw;
  637. bool rsd = false;
  638. u32 reg;
  639. int i;
  640. /* If the VFs have been disabled, this means something else is
  641. * resetting the VF, so we shouldn't continue.
  642. */
  643. if (test_and_set_bit(__ICE_VF_DIS, pf->state))
  644. return false;
  645. ice_trigger_vf_reset(vf, is_vflr);
  646. if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
  647. ice_vsi_stop_tx_rings(pf->vsi[vf->lan_vsi_idx], ICE_VF_RESET,
  648. vf->vf_id);
  649. ice_vsi_stop_rx_rings(pf->vsi[vf->lan_vsi_idx]);
  650. clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
  651. } else {
  652. /* Call Disable LAN Tx queue AQ call even when queues are not
  653. * enabled. This is needed for successful completiom of VFR
  654. */
  655. ice_dis_vsi_txq(pf->vsi[vf->lan_vsi_idx]->port_info, 0,
  656. NULL, NULL, ICE_VF_RESET, vf->vf_id, NULL);
  657. }
  658. /* poll VPGEN_VFRSTAT reg to make sure
  659. * that reset is complete
  660. */
  661. for (i = 0; i < 10; i++) {
  662. /* VF reset requires driver to first reset the VF and then
  663. * poll the status register to make sure that the reset
  664. * completed successfully.
  665. */
  666. usleep_range(10000, 20000);
  667. reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
  668. if (reg & VPGEN_VFRSTAT_VFRD_M) {
  669. rsd = true;
  670. break;
  671. }
  672. }
  673. /* Display a warning if VF didn't manage to reset in time, but need to
  674. * continue on with the operation.
  675. */
  676. if (!rsd)
  677. dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
  678. vf->vf_id);
  679. usleep_range(10000, 20000);
  680. /* free VF resources to begin resetting the VSI state */
  681. ice_free_vf_res(vf);
  682. ice_cleanup_and_realloc_vf(vf);
  683. ice_flush(hw);
  684. clear_bit(__ICE_VF_DIS, pf->state);
  685. return true;
  686. }
  687. /**
  688. * ice_vc_notify_reset - Send pending reset message to all VFs
  689. * @pf: pointer to the PF structure
  690. *
  691. * indicate a pending reset to all VFs on a given PF
  692. */
  693. void ice_vc_notify_reset(struct ice_pf *pf)
  694. {
  695. struct virtchnl_pf_event pfe;
  696. if (!pf->num_alloc_vfs)
  697. return;
  698. pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
  699. pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
  700. ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, ICE_SUCCESS,
  701. (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
  702. }
  703. /**
  704. * ice_alloc_vfs - Allocate and set up VFs resources
  705. * @pf: pointer to the PF structure
  706. * @num_alloc_vfs: number of VFs to allocate
  707. */
  708. static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
  709. {
  710. struct ice_hw *hw = &pf->hw;
  711. struct ice_vf *vfs;
  712. int i, ret;
  713. /* Disable global interrupt 0 so we don't try to handle the VFLR. */
  714. wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx),
  715. ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
  716. ice_flush(hw);
  717. ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
  718. if (ret) {
  719. pf->num_alloc_vfs = 0;
  720. goto err_unroll_intr;
  721. }
  722. /* allocate memory */
  723. vfs = devm_kcalloc(&pf->pdev->dev, num_alloc_vfs, sizeof(*vfs),
  724. GFP_KERNEL);
  725. if (!vfs) {
  726. ret = -ENOMEM;
  727. goto err_unroll_sriov;
  728. }
  729. pf->vf = vfs;
  730. /* apply default profile */
  731. for (i = 0; i < num_alloc_vfs; i++) {
  732. vfs[i].pf = pf;
  733. vfs[i].vf_sw_id = pf->first_sw;
  734. vfs[i].vf_id = i;
  735. /* assign default capabilities */
  736. set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
  737. vfs[i].spoofchk = true;
  738. /* Set this state so that PF driver does VF vector assignment */
  739. set_bit(ICE_VF_STATE_CFG_INTR, vfs[i].vf_states);
  740. }
  741. pf->num_alloc_vfs = num_alloc_vfs;
  742. /* VF resources get allocated during reset */
  743. if (!ice_reset_all_vfs(pf, false))
  744. goto err_unroll_sriov;
  745. goto err_unroll_intr;
  746. err_unroll_sriov:
  747. pci_disable_sriov(pf->pdev);
  748. err_unroll_intr:
  749. /* rearm interrupts here */
  750. ice_irq_dynamic_ena(hw, NULL, NULL);
  751. return ret;
  752. }
  753. /**
  754. * ice_pf_state_is_nominal - checks the pf for nominal state
  755. * @pf: pointer to pf to check
  756. *
  757. * Check the PF's state for a collection of bits that would indicate
  758. * the PF is in a state that would inhibit normal operation for
  759. * driver functionality.
  760. *
  761. * Returns true if PF is in a nominal state.
  762. * Returns false otherwise
  763. */
  764. static bool ice_pf_state_is_nominal(struct ice_pf *pf)
  765. {
  766. DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
  767. if (!pf)
  768. return false;
  769. bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
  770. if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
  771. return false;
  772. return true;
  773. }
  774. /**
  775. * ice_pci_sriov_ena - Enable or change number of VFs
  776. * @pf: pointer to the PF structure
  777. * @num_vfs: number of VFs to allocate
  778. */
  779. static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
  780. {
  781. int pre_existing_vfs = pci_num_vf(pf->pdev);
  782. struct device *dev = &pf->pdev->dev;
  783. int err;
  784. if (!ice_pf_state_is_nominal(pf)) {
  785. dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
  786. return -EBUSY;
  787. }
  788. if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
  789. dev_err(dev, "This device is not capable of SR-IOV\n");
  790. return -ENODEV;
  791. }
  792. if (pre_existing_vfs && pre_existing_vfs != num_vfs)
  793. ice_free_vfs(pf);
  794. else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
  795. return num_vfs;
  796. if (num_vfs > pf->num_vfs_supported) {
  797. dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
  798. num_vfs, pf->num_vfs_supported);
  799. return -ENOTSUPP;
  800. }
  801. dev_info(dev, "Allocating %d VFs\n", num_vfs);
  802. err = ice_alloc_vfs(pf, num_vfs);
  803. if (err) {
  804. dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
  805. return err;
  806. }
  807. set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
  808. return num_vfs;
  809. }
  810. /**
  811. * ice_sriov_configure - Enable or change number of VFs via sysfs
  812. * @pdev: pointer to a pci_dev structure
  813. * @num_vfs: number of VFs to allocate
  814. *
  815. * This function is called when the user updates the number of VFs in sysfs.
  816. */
  817. int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
  818. {
  819. struct ice_pf *pf = pci_get_drvdata(pdev);
  820. if (num_vfs)
  821. return ice_pci_sriov_ena(pf, num_vfs);
  822. if (!pci_vfs_assigned(pdev)) {
  823. ice_free_vfs(pf);
  824. } else {
  825. dev_err(&pf->pdev->dev,
  826. "can't free VFs because some are assigned to VMs.\n");
  827. return -EBUSY;
  828. }
  829. return 0;
  830. }
  831. /**
  832. * ice_process_vflr_event - Free VF resources via IRQ calls
  833. * @pf: pointer to the PF structure
  834. *
  835. * called from the VLFR IRQ handler to
  836. * free up VF resources and state variables
  837. */
  838. void ice_process_vflr_event(struct ice_pf *pf)
  839. {
  840. struct ice_hw *hw = &pf->hw;
  841. int vf_id;
  842. u32 reg;
  843. if (!test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
  844. !pf->num_alloc_vfs)
  845. return;
  846. /* Re-enable the VFLR interrupt cause here, before looking for which
  847. * VF got reset. Otherwise, if another VF gets a reset while the
  848. * first one is being processed, that interrupt will be lost, and
  849. * that VF will be stuck in reset forever.
  850. */
  851. reg = rd32(hw, PFINT_OICR_ENA);
  852. reg |= PFINT_OICR_VFLR_M;
  853. wr32(hw, PFINT_OICR_ENA, reg);
  854. ice_flush(hw);
  855. clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
  856. for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
  857. struct ice_vf *vf = &pf->vf[vf_id];
  858. u32 reg_idx, bit_idx;
  859. reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
  860. bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
  861. /* read GLGEN_VFLRSTAT register to find out the flr VFs */
  862. reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
  863. if (reg & BIT(bit_idx))
  864. /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
  865. ice_reset_vf(vf, true);
  866. }
  867. }