i40e_virtchnl_pf.c 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199
  1. /*******************************************************************************
  2. *
  3. * Intel Ethernet Controller XL710 Family Linux Driver
  4. * Copyright(c) 2013 - 2014 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along
  16. * with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * The full GNU General Public License is included in this distribution in
  19. * the file called "COPYING".
  20. *
  21. * Contact Information:
  22. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  23. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  24. *
  25. ******************************************************************************/
  26. #include "i40e.h"
  27. /***********************misc routines*****************************/
  28. /**
  29. * i40e_vc_isvalid_vsi_id
  30. * @vf: pointer to the vf info
  31. * @vsi_id: vf relative vsi id
  32. *
  33. * check for the valid vsi id
  34. **/
  35. static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
  36. {
  37. struct i40e_pf *pf = vf->pf;
  38. return pf->vsi[vsi_id]->vf_id == vf->vf_id;
  39. }
  40. /**
  41. * i40e_vc_isvalid_queue_id
  42. * @vf: pointer to the vf info
  43. * @vsi_id: vsi id
  44. * @qid: vsi relative queue id
  45. *
  46. * check for the valid queue id
  47. **/
  48. static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
  49. u8 qid)
  50. {
  51. struct i40e_pf *pf = vf->pf;
  52. return qid < pf->vsi[vsi_id]->num_queue_pairs;
  53. }
  54. /**
  55. * i40e_vc_isvalid_vector_id
  56. * @vf: pointer to the vf info
  57. * @vector_id: vf relative vector id
  58. *
  59. * check for the valid vector id
  60. **/
  61. static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
  62. {
  63. struct i40e_pf *pf = vf->pf;
  64. return vector_id <= pf->hw.func_caps.num_msix_vectors_vf;
  65. }
  66. /***********************vf resource mgmt routines*****************/
  67. /**
  68. * i40e_vc_get_pf_queue_id
  69. * @vf: pointer to the vf info
  70. * @vsi_idx: index of VSI in PF struct
  71. * @vsi_queue_id: vsi relative queue id
  72. *
  73. * return pf relative queue id
  74. **/
  75. static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
  76. u8 vsi_queue_id)
  77. {
  78. struct i40e_pf *pf = vf->pf;
  79. struct i40e_vsi *vsi = pf->vsi[vsi_idx];
  80. u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
  81. if (le16_to_cpu(vsi->info.mapping_flags) &
  82. I40E_AQ_VSI_QUE_MAP_NONCONTIG)
  83. pf_queue_id =
  84. le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
  85. else
  86. pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
  87. vsi_queue_id;
  88. return pf_queue_id;
  89. }
  90. /**
  91. * i40e_config_irq_link_list
  92. * @vf: pointer to the vf info
  93. * @vsi_idx: index of VSI in PF struct
  94. * @vecmap: irq map info
  95. *
  96. * configure irq link list from the map
  97. **/
  98. static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
  99. struct i40e_virtchnl_vector_map *vecmap)
  100. {
  101. unsigned long linklistmap = 0, tempmap;
  102. struct i40e_pf *pf = vf->pf;
  103. struct i40e_hw *hw = &pf->hw;
  104. u16 vsi_queue_id, pf_queue_id;
  105. enum i40e_queue_type qtype;
  106. u16 next_q, vector_id;
  107. u32 reg, reg_idx;
  108. u16 itr_idx = 0;
  109. vector_id = vecmap->vector_id;
  110. /* setup the head */
  111. if (0 == vector_id)
  112. reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
  113. else
  114. reg_idx = I40E_VPINT_LNKLSTN(
  115. (pf->hw.func_caps.num_msix_vectors_vf
  116. * vf->vf_id) + (vector_id - 1));
  117. if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
  118. /* Special case - No queues mapped on this vector */
  119. wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
  120. goto irq_list_done;
  121. }
  122. tempmap = vecmap->rxq_map;
  123. for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
  124. linklistmap |= (1 <<
  125. (I40E_VIRTCHNL_SUPPORTED_QTYPES *
  126. vsi_queue_id));
  127. }
  128. tempmap = vecmap->txq_map;
  129. for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
  130. linklistmap |= (1 <<
  131. (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
  132. + 1));
  133. }
  134. next_q = find_first_bit(&linklistmap,
  135. (I40E_MAX_VSI_QP *
  136. I40E_VIRTCHNL_SUPPORTED_QTYPES));
  137. vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
  138. qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
  139. pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
  140. reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
  141. wr32(hw, reg_idx, reg);
  142. while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
  143. switch (qtype) {
  144. case I40E_QUEUE_TYPE_RX:
  145. reg_idx = I40E_QINT_RQCTL(pf_queue_id);
  146. itr_idx = vecmap->rxitr_idx;
  147. break;
  148. case I40E_QUEUE_TYPE_TX:
  149. reg_idx = I40E_QINT_TQCTL(pf_queue_id);
  150. itr_idx = vecmap->txitr_idx;
  151. break;
  152. default:
  153. break;
  154. }
  155. next_q = find_next_bit(&linklistmap,
  156. (I40E_MAX_VSI_QP *
  157. I40E_VIRTCHNL_SUPPORTED_QTYPES),
  158. next_q + 1);
  159. if (next_q <
  160. (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
  161. vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
  162. qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
  163. pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
  164. vsi_queue_id);
  165. } else {
  166. pf_queue_id = I40E_QUEUE_END_OF_LIST;
  167. qtype = 0;
  168. }
  169. /* format for the RQCTL & TQCTL regs is same */
  170. reg = (vector_id) |
  171. (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
  172. (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
  173. (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
  174. (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
  175. wr32(hw, reg_idx, reg);
  176. }
  177. irq_list_done:
  178. i40e_flush(hw);
  179. }
  180. /**
  181. * i40e_config_vsi_tx_queue
  182. * @vf: pointer to the vf info
  183. * @vsi_idx: index of VSI in PF struct
  184. * @vsi_queue_id: vsi relative queue index
  185. * @info: config. info
  186. *
  187. * configure tx queue
  188. **/
  189. static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
  190. u16 vsi_queue_id,
  191. struct i40e_virtchnl_txq_info *info)
  192. {
  193. struct i40e_pf *pf = vf->pf;
  194. struct i40e_hw *hw = &pf->hw;
  195. struct i40e_hmc_obj_txq tx_ctx;
  196. u16 pf_queue_id;
  197. u32 qtx_ctl;
  198. int ret = 0;
  199. pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
  200. /* clear the context structure first */
  201. memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
  202. /* only set the required fields */
  203. tx_ctx.base = info->dma_ring_addr / 128;
  204. tx_ctx.qlen = info->ring_len;
  205. tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
  206. tx_ctx.rdylist_act = 0;
  207. /* clear the context in the HMC */
  208. ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
  209. if (ret) {
  210. dev_err(&pf->pdev->dev,
  211. "Failed to clear VF LAN Tx queue context %d, error: %d\n",
  212. pf_queue_id, ret);
  213. ret = -ENOENT;
  214. goto error_context;
  215. }
  216. /* set the context in the HMC */
  217. ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
  218. if (ret) {
  219. dev_err(&pf->pdev->dev,
  220. "Failed to set VF LAN Tx queue context %d error: %d\n",
  221. pf_queue_id, ret);
  222. ret = -ENOENT;
  223. goto error_context;
  224. }
  225. /* associate this queue with the PCI VF function */
  226. qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
  227. qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
  228. & I40E_QTX_CTL_PF_INDX_MASK);
  229. qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
  230. << I40E_QTX_CTL_VFVM_INDX_SHIFT)
  231. & I40E_QTX_CTL_VFVM_INDX_MASK);
  232. wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
  233. i40e_flush(hw);
  234. error_context:
  235. return ret;
  236. }
  237. /**
  238. * i40e_config_vsi_rx_queue
  239. * @vf: pointer to the vf info
  240. * @vsi_idx: index of VSI in PF struct
  241. * @vsi_queue_id: vsi relative queue index
  242. * @info: config. info
  243. *
  244. * configure rx queue
  245. **/
  246. static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
  247. u16 vsi_queue_id,
  248. struct i40e_virtchnl_rxq_info *info)
  249. {
  250. struct i40e_pf *pf = vf->pf;
  251. struct i40e_hw *hw = &pf->hw;
  252. struct i40e_hmc_obj_rxq rx_ctx;
  253. u16 pf_queue_id;
  254. int ret = 0;
  255. pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
  256. /* clear the context structure first */
  257. memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
  258. /* only set the required fields */
  259. rx_ctx.base = info->dma_ring_addr / 128;
  260. rx_ctx.qlen = info->ring_len;
  261. if (info->splithdr_enabled) {
  262. rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
  263. I40E_RX_SPLIT_IP |
  264. I40E_RX_SPLIT_TCP_UDP |
  265. I40E_RX_SPLIT_SCTP;
  266. /* header length validation */
  267. if (info->hdr_size > ((2 * 1024) - 64)) {
  268. ret = -EINVAL;
  269. goto error_param;
  270. }
  271. rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
  272. /* set splitalways mode 10b */
  273. rx_ctx.dtype = 0x2;
  274. }
  275. /* databuffer length validation */
  276. if (info->databuffer_size > ((16 * 1024) - 128)) {
  277. ret = -EINVAL;
  278. goto error_param;
  279. }
  280. rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
  281. /* max pkt. length validation */
  282. if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
  283. ret = -EINVAL;
  284. goto error_param;
  285. }
  286. rx_ctx.rxmax = info->max_pkt_size;
  287. /* enable 32bytes desc always */
  288. rx_ctx.dsize = 1;
  289. /* default values */
  290. rx_ctx.tphrdesc_ena = 1;
  291. rx_ctx.tphwdesc_ena = 1;
  292. rx_ctx.tphdata_ena = 1;
  293. rx_ctx.tphhead_ena = 1;
  294. rx_ctx.lrxqthresh = 2;
  295. rx_ctx.crcstrip = 1;
  296. /* clear the context in the HMC */
  297. ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
  298. if (ret) {
  299. dev_err(&pf->pdev->dev,
  300. "Failed to clear VF LAN Rx queue context %d, error: %d\n",
  301. pf_queue_id, ret);
  302. ret = -ENOENT;
  303. goto error_param;
  304. }
  305. /* set the context in the HMC */
  306. ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
  307. if (ret) {
  308. dev_err(&pf->pdev->dev,
  309. "Failed to set VF LAN Rx queue context %d error: %d\n",
  310. pf_queue_id, ret);
  311. ret = -ENOENT;
  312. goto error_param;
  313. }
  314. error_param:
  315. return ret;
  316. }
  317. /**
  318. * i40e_alloc_vsi_res
  319. * @vf: pointer to the vf info
  320. * @type: type of VSI to allocate
  321. *
  322. * alloc vf vsi context & resources
  323. **/
  324. static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
  325. {
  326. struct i40e_mac_filter *f = NULL;
  327. struct i40e_pf *pf = vf->pf;
  328. struct i40e_vsi *vsi;
  329. int ret = 0;
  330. vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
  331. if (!vsi) {
  332. dev_err(&pf->pdev->dev,
  333. "add vsi failed for vf %d, aq_err %d\n",
  334. vf->vf_id, pf->hw.aq.asq_last_status);
  335. ret = -ENOENT;
  336. goto error_alloc_vsi_res;
  337. }
  338. if (type == I40E_VSI_SRIOV) {
  339. u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  340. vf->lan_vsi_index = vsi->idx;
  341. vf->lan_vsi_id = vsi->id;
  342. dev_info(&pf->pdev->dev,
  343. "VF %d assigned LAN VSI index %d, VSI id %d\n",
  344. vf->vf_id, vsi->idx, vsi->id);
  345. /* If the port VLAN has been configured and then the
  346. * VF driver was removed then the VSI port VLAN
  347. * configuration was destroyed. Check if there is
  348. * a port VLAN and restore the VSI configuration if
  349. * needed.
  350. */
  351. if (vf->port_vlan_id)
  352. i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
  353. f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
  354. vf->port_vlan_id, true, false);
  355. if (!f)
  356. dev_info(&pf->pdev->dev,
  357. "Could not allocate VF MAC addr\n");
  358. f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id,
  359. true, false);
  360. if (!f)
  361. dev_info(&pf->pdev->dev,
  362. "Could not allocate VF broadcast filter\n");
  363. }
  364. if (!f) {
  365. dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
  366. ret = -ENOMEM;
  367. goto error_alloc_vsi_res;
  368. }
  369. /* program mac filter */
  370. ret = i40e_sync_vsi_filters(vsi);
  371. if (ret) {
  372. dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
  373. goto error_alloc_vsi_res;
  374. }
  375. error_alloc_vsi_res:
  376. return ret;
  377. }
  378. /**
  379. * i40e_enable_vf_mappings
  380. * @vf: pointer to the vf info
  381. *
  382. * enable vf mappings
  383. **/
  384. static void i40e_enable_vf_mappings(struct i40e_vf *vf)
  385. {
  386. struct i40e_pf *pf = vf->pf;
  387. struct i40e_hw *hw = &pf->hw;
  388. u32 reg, total_queue_pairs = 0;
  389. int j;
  390. /* Tell the hardware we're using noncontiguous mapping. HW requires
  391. * that VF queues be mapped using this method, even when they are
  392. * contiguous in real life
  393. */
  394. wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
  395. I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
  396. /* enable VF vplan_qtable mappings */
  397. reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
  398. wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
  399. /* map PF queues to VF queues */
  400. for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
  401. u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
  402. reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
  403. wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
  404. total_queue_pairs++;
  405. }
  406. /* map PF queues to VSI */
  407. for (j = 0; j < 7; j++) {
  408. if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) {
  409. reg = 0x07FF07FF; /* unused */
  410. } else {
  411. u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
  412. j * 2);
  413. reg = qid;
  414. qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
  415. (j * 2) + 1);
  416. reg |= qid << 16;
  417. }
  418. wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg);
  419. }
  420. i40e_flush(hw);
  421. }
  422. /**
  423. * i40e_disable_vf_mappings
  424. * @vf: pointer to the vf info
  425. *
  426. * disable vf mappings
  427. **/
  428. static void i40e_disable_vf_mappings(struct i40e_vf *vf)
  429. {
  430. struct i40e_pf *pf = vf->pf;
  431. struct i40e_hw *hw = &pf->hw;
  432. int i;
  433. /* disable qp mappings */
  434. wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
  435. for (i = 0; i < I40E_MAX_VSI_QP; i++)
  436. wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
  437. I40E_QUEUE_END_OF_LIST);
  438. i40e_flush(hw);
  439. }
  440. /**
  441. * i40e_free_vf_res
  442. * @vf: pointer to the vf info
  443. *
  444. * free vf resources
  445. **/
  446. static void i40e_free_vf_res(struct i40e_vf *vf)
  447. {
  448. struct i40e_pf *pf = vf->pf;
  449. struct i40e_hw *hw = &pf->hw;
  450. u32 reg_idx, reg;
  451. int i, msix_vf;
  452. /* free vsi & disconnect it from the parent uplink */
  453. if (vf->lan_vsi_index) {
  454. i40e_vsi_release(pf->vsi[vf->lan_vsi_index]);
  455. vf->lan_vsi_index = 0;
  456. vf->lan_vsi_id = 0;
  457. }
  458. msix_vf = pf->hw.func_caps.num_msix_vectors_vf + 1;
  459. /* disable interrupts so the VF starts in a known state */
  460. for (i = 0; i < msix_vf; i++) {
  461. /* format is same for both registers */
  462. if (0 == i)
  463. reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
  464. else
  465. reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
  466. (vf->vf_id))
  467. + (i - 1));
  468. wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
  469. i40e_flush(hw);
  470. }
  471. /* clear the irq settings */
  472. for (i = 0; i < msix_vf; i++) {
  473. /* format is same for both registers */
  474. if (0 == i)
  475. reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
  476. else
  477. reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
  478. (vf->vf_id))
  479. + (i - 1));
  480. reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
  481. I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
  482. wr32(hw, reg_idx, reg);
  483. i40e_flush(hw);
  484. }
  485. /* reset some of the state varibles keeping
  486. * track of the resources
  487. */
  488. vf->num_queue_pairs = 0;
  489. vf->vf_states = 0;
  490. }
  491. /**
  492. * i40e_alloc_vf_res
  493. * @vf: pointer to the vf info
  494. *
  495. * allocate vf resources
  496. **/
  497. static int i40e_alloc_vf_res(struct i40e_vf *vf)
  498. {
  499. struct i40e_pf *pf = vf->pf;
  500. int total_queue_pairs = 0;
  501. int ret;
  502. /* allocate hw vsi context & associated resources */
  503. ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
  504. if (ret)
  505. goto error_alloc;
  506. total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
  507. set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
  508. /* store the total qps number for the runtime
  509. * vf req validation
  510. */
  511. vf->num_queue_pairs = total_queue_pairs;
  512. /* vf is now completely initialized */
  513. set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
  514. error_alloc:
  515. if (ret)
  516. i40e_free_vf_res(vf);
  517. return ret;
  518. }
  519. #define VF_DEVICE_STATUS 0xAA
  520. #define VF_TRANS_PENDING_MASK 0x20
  521. /**
  522. * i40e_quiesce_vf_pci
  523. * @vf: pointer to the vf structure
  524. *
  525. * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
  526. * if the transactions never clear.
  527. **/
  528. static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
  529. {
  530. struct i40e_pf *pf = vf->pf;
  531. struct i40e_hw *hw = &pf->hw;
  532. int vf_abs_id, i;
  533. u32 reg;
  534. vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
  535. wr32(hw, I40E_PF_PCI_CIAA,
  536. VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
  537. for (i = 0; i < 100; i++) {
  538. reg = rd32(hw, I40E_PF_PCI_CIAD);
  539. if ((reg & VF_TRANS_PENDING_MASK) == 0)
  540. return 0;
  541. udelay(1);
  542. }
  543. return -EIO;
  544. }
  545. /**
  546. * i40e_reset_vf
  547. * @vf: pointer to the vf structure
  548. * @flr: VFLR was issued or not
  549. *
  550. * reset the vf
  551. **/
  552. void i40e_reset_vf(struct i40e_vf *vf, bool flr)
  553. {
  554. struct i40e_pf *pf = vf->pf;
  555. struct i40e_hw *hw = &pf->hw;
  556. bool rsd = false;
  557. int i;
  558. u32 reg;
  559. /* warn the VF */
  560. clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
  561. /* In the case of a VFLR, the HW has already reset the VF and we
  562. * just need to clean up, so don't hit the VFRTRIG register.
  563. */
  564. if (!flr) {
  565. /* reset vf using VPGEN_VFRTRIG reg */
  566. reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
  567. reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
  568. wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
  569. i40e_flush(hw);
  570. }
  571. if (i40e_quiesce_vf_pci(vf))
  572. dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
  573. vf->vf_id);
  574. /* poll VPGEN_VFRSTAT reg to make sure
  575. * that reset is complete
  576. */
  577. for (i = 0; i < 100; i++) {
  578. /* vf reset requires driver to first reset the
  579. * vf & than poll the status register to make sure
  580. * that the requested op was completed
  581. * successfully
  582. */
  583. udelay(10);
  584. reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
  585. if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
  586. rsd = true;
  587. break;
  588. }
  589. }
  590. if (!rsd)
  591. dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
  592. vf->vf_id);
  593. wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
  594. /* clear the reset bit in the VPGEN_VFRTRIG reg */
  595. reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
  596. reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
  597. wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
  598. /* On initial reset, we won't have any queues */
  599. if (vf->lan_vsi_index == 0)
  600. goto complete_reset;
  601. i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false);
  602. complete_reset:
  603. /* reallocate vf resources to reset the VSI state */
  604. i40e_free_vf_res(vf);
  605. mdelay(10);
  606. i40e_alloc_vf_res(vf);
  607. i40e_enable_vf_mappings(vf);
  608. /* tell the VF the reset is done */
  609. wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
  610. i40e_flush(hw);
  611. }
  612. /**
  613. * i40e_vfs_are_assigned
  614. * @pf: pointer to the pf structure
  615. *
  616. * Determine if any VFs are assigned to VMs
  617. **/
  618. static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
  619. {
  620. struct pci_dev *pdev = pf->pdev;
  621. struct pci_dev *vfdev;
  622. /* loop through all the VFs to see if we own any that are assigned */
  623. vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF , NULL);
  624. while (vfdev) {
  625. /* if we don't own it we don't care */
  626. if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
  627. /* if it is assigned we cannot release it */
  628. if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
  629. return true;
  630. }
  631. vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
  632. I40E_DEV_ID_VF,
  633. vfdev);
  634. }
  635. return false;
  636. }
  637. #ifdef CONFIG_PCI_IOV
  638. /**
  639. * i40e_enable_pf_switch_lb
  640. * @pf: pointer to the pf structure
  641. *
  642. * enable switch loop back or die - no point in a return value
  643. **/
  644. static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
  645. {
  646. struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
  647. struct i40e_vsi_context ctxt;
  648. int aq_ret;
  649. ctxt.seid = pf->main_vsi_seid;
  650. ctxt.pf_num = pf->hw.pf_id;
  651. ctxt.vf_num = 0;
  652. aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
  653. if (aq_ret) {
  654. dev_info(&pf->pdev->dev,
  655. "%s couldn't get pf vsi config, err %d, aq_err %d\n",
  656. __func__, aq_ret, pf->hw.aq.asq_last_status);
  657. return;
  658. }
  659. ctxt.flags = I40E_AQ_VSI_TYPE_PF;
  660. ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
  661. ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
  662. aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
  663. if (aq_ret) {
  664. dev_info(&pf->pdev->dev,
  665. "%s: update vsi switch failed, aq_err=%d\n",
  666. __func__, vsi->back->hw.aq.asq_last_status);
  667. }
  668. }
  669. #endif
  670. /**
  671. * i40e_disable_pf_switch_lb
  672. * @pf: pointer to the pf structure
  673. *
  674. * disable switch loop back or die - no point in a return value
  675. **/
  676. static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
  677. {
  678. struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
  679. struct i40e_vsi_context ctxt;
  680. int aq_ret;
  681. ctxt.seid = pf->main_vsi_seid;
  682. ctxt.pf_num = pf->hw.pf_id;
  683. ctxt.vf_num = 0;
  684. aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
  685. if (aq_ret) {
  686. dev_info(&pf->pdev->dev,
  687. "%s couldn't get pf vsi config, err %d, aq_err %d\n",
  688. __func__, aq_ret, pf->hw.aq.asq_last_status);
  689. return;
  690. }
  691. ctxt.flags = I40E_AQ_VSI_TYPE_PF;
  692. ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
  693. ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
  694. aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
  695. if (aq_ret) {
  696. dev_info(&pf->pdev->dev,
  697. "%s: update vsi switch failed, aq_err=%d\n",
  698. __func__, vsi->back->hw.aq.asq_last_status);
  699. }
  700. }
  701. /**
  702. * i40e_free_vfs
  703. * @pf: pointer to the pf structure
  704. *
  705. * free vf resources
  706. **/
  707. void i40e_free_vfs(struct i40e_pf *pf)
  708. {
  709. struct i40e_hw *hw = &pf->hw;
  710. u32 reg_idx, bit_idx;
  711. int i, tmp, vf_id;
  712. if (!pf->vf)
  713. return;
  714. /* Disable interrupt 0 so we don't try to handle the VFLR. */
  715. i40e_irq_dynamic_disable_icr0(pf);
  716. mdelay(10); /* let any messages in transit get finished up */
  717. /* free up vf resources */
  718. tmp = pf->num_alloc_vfs;
  719. pf->num_alloc_vfs = 0;
  720. for (i = 0; i < tmp; i++) {
  721. if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
  722. i40e_free_vf_res(&pf->vf[i]);
  723. /* disable qp mappings */
  724. i40e_disable_vf_mappings(&pf->vf[i]);
  725. }
  726. kfree(pf->vf);
  727. pf->vf = NULL;
  728. if (!i40e_vfs_are_assigned(pf)) {
  729. pci_disable_sriov(pf->pdev);
  730. /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
  731. * work correctly when SR-IOV gets re-enabled.
  732. */
  733. for (vf_id = 0; vf_id < tmp; vf_id++) {
  734. reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
  735. bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
  736. wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
  737. }
  738. i40e_disable_pf_switch_lb(pf);
  739. } else {
  740. dev_warn(&pf->pdev->dev,
  741. "unable to disable SR-IOV because VFs are assigned.\n");
  742. }
  743. /* Re-enable interrupt 0. */
  744. i40e_irq_dynamic_enable_icr0(pf);
  745. }
  746. #ifdef CONFIG_PCI_IOV
  747. /**
  748. * i40e_alloc_vfs
  749. * @pf: pointer to the pf structure
  750. * @num_alloc_vfs: number of vfs to allocate
  751. *
  752. * allocate vf resources
  753. **/
  754. static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
  755. {
  756. struct i40e_vf *vfs;
  757. int i, ret = 0;
  758. /* Disable interrupt 0 so we don't try to handle the VFLR. */
  759. i40e_irq_dynamic_disable_icr0(pf);
  760. ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
  761. if (ret) {
  762. dev_err(&pf->pdev->dev,
  763. "pci_enable_sriov failed with error %d!\n", ret);
  764. pf->num_alloc_vfs = 0;
  765. goto err_iov;
  766. }
  767. /* allocate memory */
  768. vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL);
  769. if (!vfs) {
  770. ret = -ENOMEM;
  771. goto err_alloc;
  772. }
  773. /* apply default profile */
  774. for (i = 0; i < num_alloc_vfs; i++) {
  775. vfs[i].pf = pf;
  776. vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
  777. vfs[i].vf_id = i;
  778. /* assign default capabilities */
  779. set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
  780. /* vf resources get allocated during reset */
  781. i40e_reset_vf(&vfs[i], false);
  782. /* enable vf vplan_qtable mappings */
  783. i40e_enable_vf_mappings(&vfs[i]);
  784. }
  785. pf->vf = vfs;
  786. pf->num_alloc_vfs = num_alloc_vfs;
  787. i40e_enable_pf_switch_lb(pf);
  788. err_alloc:
  789. if (ret)
  790. i40e_free_vfs(pf);
  791. err_iov:
  792. /* Re-enable interrupt 0. */
  793. i40e_irq_dynamic_enable_icr0(pf);
  794. return ret;
  795. }
  796. #endif
  797. /**
  798. * i40e_pci_sriov_enable
  799. * @pdev: pointer to a pci_dev structure
  800. * @num_vfs: number of vfs to allocate
  801. *
  802. * Enable or change the number of VFs
  803. **/
  804. static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
  805. {
  806. #ifdef CONFIG_PCI_IOV
  807. struct i40e_pf *pf = pci_get_drvdata(pdev);
  808. int pre_existing_vfs = pci_num_vf(pdev);
  809. int err = 0;
  810. dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
  811. if (pre_existing_vfs && pre_existing_vfs != num_vfs)
  812. i40e_free_vfs(pf);
  813. else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
  814. goto out;
  815. if (num_vfs > pf->num_req_vfs) {
  816. err = -EPERM;
  817. goto err_out;
  818. }
  819. err = i40e_alloc_vfs(pf, num_vfs);
  820. if (err) {
  821. dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
  822. goto err_out;
  823. }
  824. out:
  825. return num_vfs;
  826. err_out:
  827. return err;
  828. #endif
  829. return 0;
  830. }
  831. /**
  832. * i40e_pci_sriov_configure
  833. * @pdev: pointer to a pci_dev structure
  834. * @num_vfs: number of vfs to allocate
  835. *
  836. * Enable or change the number of VFs. Called when the user updates the number
  837. * of VFs in sysfs.
  838. **/
  839. int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
  840. {
  841. struct i40e_pf *pf = pci_get_drvdata(pdev);
  842. if (num_vfs)
  843. return i40e_pci_sriov_enable(pdev, num_vfs);
  844. i40e_free_vfs(pf);
  845. return 0;
  846. }
  847. /***********************virtual channel routines******************/
  848. /**
  849. * i40e_vc_send_msg_to_vf
  850. * @vf: pointer to the vf info
  851. * @v_opcode: virtual channel opcode
  852. * @v_retval: virtual channel return value
  853. * @msg: pointer to the msg buffer
  854. * @msglen: msg length
  855. *
  856. * send msg to vf
  857. **/
  858. static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
  859. u32 v_retval, u8 *msg, u16 msglen)
  860. {
  861. struct i40e_pf *pf = vf->pf;
  862. struct i40e_hw *hw = &pf->hw;
  863. int true_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
  864. i40e_status aq_ret;
  865. /* single place to detect unsuccessful return values */
  866. if (v_retval) {
  867. vf->num_invalid_msgs++;
  868. dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n",
  869. v_opcode, v_retval);
  870. if (vf->num_invalid_msgs >
  871. I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
  872. dev_err(&pf->pdev->dev,
  873. "Number of invalid messages exceeded for VF %d\n",
  874. vf->vf_id);
  875. dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
  876. set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
  877. }
  878. } else {
  879. vf->num_valid_msgs++;
  880. }
  881. aq_ret = i40e_aq_send_msg_to_vf(hw, true_vf_id, v_opcode, v_retval,
  882. msg, msglen, NULL);
  883. if (aq_ret) {
  884. dev_err(&pf->pdev->dev,
  885. "Unable to send the message to VF %d aq_err %d\n",
  886. vf->vf_id, pf->hw.aq.asq_last_status);
  887. return -EIO;
  888. }
  889. return 0;
  890. }
  891. /**
  892. * i40e_vc_send_resp_to_vf
  893. * @vf: pointer to the vf info
  894. * @opcode: operation code
  895. * @retval: return value
  896. *
  897. * send resp msg to vf
  898. **/
  899. static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
  900. enum i40e_virtchnl_ops opcode,
  901. i40e_status retval)
  902. {
  903. return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
  904. }
  905. /**
  906. * i40e_vc_get_version_msg
  907. * @vf: pointer to the vf info
  908. *
  909. * called from the vf to request the API version used by the PF
  910. **/
  911. static int i40e_vc_get_version_msg(struct i40e_vf *vf)
  912. {
  913. struct i40e_virtchnl_version_info info = {
  914. I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
  915. };
  916. return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
  917. I40E_SUCCESS, (u8 *)&info,
  918. sizeof(struct
  919. i40e_virtchnl_version_info));
  920. }
  921. /**
  922. * i40e_vc_get_vf_resources_msg
  923. * @vf: pointer to the vf info
  924. * @msg: pointer to the msg buffer
  925. * @msglen: msg length
  926. *
  927. * called from the vf to request its resources
  928. **/
  929. static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
  930. {
  931. struct i40e_virtchnl_vf_resource *vfres = NULL;
  932. struct i40e_pf *pf = vf->pf;
  933. i40e_status aq_ret = 0;
  934. struct i40e_vsi *vsi;
  935. int i = 0, len = 0;
  936. int num_vsis = 1;
  937. int ret;
  938. if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
  939. aq_ret = I40E_ERR_PARAM;
  940. goto err;
  941. }
  942. len = (sizeof(struct i40e_virtchnl_vf_resource) +
  943. sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis);
  944. vfres = kzalloc(len, GFP_KERNEL);
  945. if (!vfres) {
  946. aq_ret = I40E_ERR_NO_MEMORY;
  947. len = 0;
  948. goto err;
  949. }
  950. vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
  951. vsi = pf->vsi[vf->lan_vsi_index];
  952. if (!vsi->info.pvid)
  953. vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
  954. vfres->num_vsis = num_vsis;
  955. vfres->num_queue_pairs = vf->num_queue_pairs;
  956. vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
  957. if (vf->lan_vsi_index) {
  958. vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
  959. vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
  960. vfres->vsi_res[i].num_queue_pairs =
  961. pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
  962. memcpy(vfres->vsi_res[i].default_mac_addr,
  963. vf->default_lan_addr.addr, ETH_ALEN);
  964. i++;
  965. }
  966. set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
  967. err:
  968. /* send the response back to the vf */
  969. ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
  970. aq_ret, (u8 *)vfres, len);
  971. kfree(vfres);
  972. return ret;
  973. }
  974. /**
  975. * i40e_vc_reset_vf_msg
  976. * @vf: pointer to the vf info
  977. * @msg: pointer to the msg buffer
  978. * @msglen: msg length
  979. *
  980. * called from the vf to reset itself,
  981. * unlike other virtchnl messages, pf driver
  982. * doesn't send the response back to the vf
  983. **/
  984. static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
  985. {
  986. if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
  987. i40e_reset_vf(vf, false);
  988. }
  989. /**
  990. * i40e_vc_config_promiscuous_mode_msg
  991. * @vf: pointer to the vf info
  992. * @msg: pointer to the msg buffer
  993. * @msglen: msg length
  994. *
  995. * called from the vf to configure the promiscuous mode of
  996. * vf vsis
  997. **/
  998. static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
  999. u8 *msg, u16 msglen)
  1000. {
  1001. struct i40e_virtchnl_promisc_info *info =
  1002. (struct i40e_virtchnl_promisc_info *)msg;
  1003. struct i40e_pf *pf = vf->pf;
  1004. struct i40e_hw *hw = &pf->hw;
  1005. bool allmulti = false;
  1006. bool promisc = false;
  1007. i40e_status aq_ret;
  1008. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
  1009. !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
  1010. !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
  1011. (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) {
  1012. aq_ret = I40E_ERR_PARAM;
  1013. goto error_param;
  1014. }
  1015. if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
  1016. promisc = true;
  1017. aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, info->vsi_id,
  1018. promisc, NULL);
  1019. if (aq_ret)
  1020. goto error_param;
  1021. if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
  1022. allmulti = true;
  1023. aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, info->vsi_id,
  1024. allmulti, NULL);
  1025. error_param:
  1026. /* send the response to the vf */
  1027. return i40e_vc_send_resp_to_vf(vf,
  1028. I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
  1029. aq_ret);
  1030. }
  1031. /**
  1032. * i40e_vc_config_queues_msg
  1033. * @vf: pointer to the vf info
  1034. * @msg: pointer to the msg buffer
  1035. * @msglen: msg length
  1036. *
  1037. * called from the vf to configure the rx/tx
  1038. * queues
  1039. **/
  1040. static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1041. {
  1042. struct i40e_virtchnl_vsi_queue_config_info *qci =
  1043. (struct i40e_virtchnl_vsi_queue_config_info *)msg;
  1044. struct i40e_virtchnl_queue_pair_info *qpi;
  1045. u16 vsi_id, vsi_queue_id;
  1046. i40e_status aq_ret = 0;
  1047. int i;
  1048. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
  1049. aq_ret = I40E_ERR_PARAM;
  1050. goto error_param;
  1051. }
  1052. vsi_id = qci->vsi_id;
  1053. if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
  1054. aq_ret = I40E_ERR_PARAM;
  1055. goto error_param;
  1056. }
  1057. for (i = 0; i < qci->num_queue_pairs; i++) {
  1058. qpi = &qci->qpair[i];
  1059. vsi_queue_id = qpi->txq.queue_id;
  1060. if ((qpi->txq.vsi_id != vsi_id) ||
  1061. (qpi->rxq.vsi_id != vsi_id) ||
  1062. (qpi->rxq.queue_id != vsi_queue_id) ||
  1063. !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
  1064. aq_ret = I40E_ERR_PARAM;
  1065. goto error_param;
  1066. }
  1067. if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
  1068. &qpi->rxq) ||
  1069. i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
  1070. &qpi->txq)) {
  1071. aq_ret = I40E_ERR_PARAM;
  1072. goto error_param;
  1073. }
  1074. }
  1075. error_param:
  1076. /* send the response to the vf */
  1077. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
  1078. aq_ret);
  1079. }
  1080. /**
  1081. * i40e_vc_config_irq_map_msg
  1082. * @vf: pointer to the vf info
  1083. * @msg: pointer to the msg buffer
  1084. * @msglen: msg length
  1085. *
  1086. * called from the vf to configure the irq to
  1087. * queue map
  1088. **/
  1089. static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1090. {
  1091. struct i40e_virtchnl_irq_map_info *irqmap_info =
  1092. (struct i40e_virtchnl_irq_map_info *)msg;
  1093. struct i40e_virtchnl_vector_map *map;
  1094. u16 vsi_id, vsi_queue_id, vector_id;
  1095. i40e_status aq_ret = 0;
  1096. unsigned long tempmap;
  1097. int i;
  1098. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
  1099. aq_ret = I40E_ERR_PARAM;
  1100. goto error_param;
  1101. }
  1102. for (i = 0; i < irqmap_info->num_vectors; i++) {
  1103. map = &irqmap_info->vecmap[i];
  1104. vector_id = map->vector_id;
  1105. vsi_id = map->vsi_id;
  1106. /* validate msg params */
  1107. if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
  1108. !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
  1109. aq_ret = I40E_ERR_PARAM;
  1110. goto error_param;
  1111. }
  1112. /* lookout for the invalid queue index */
  1113. tempmap = map->rxq_map;
  1114. for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
  1115. if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
  1116. vsi_queue_id)) {
  1117. aq_ret = I40E_ERR_PARAM;
  1118. goto error_param;
  1119. }
  1120. }
  1121. tempmap = map->txq_map;
  1122. for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
  1123. if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
  1124. vsi_queue_id)) {
  1125. aq_ret = I40E_ERR_PARAM;
  1126. goto error_param;
  1127. }
  1128. }
  1129. i40e_config_irq_link_list(vf, vsi_id, map);
  1130. }
  1131. error_param:
  1132. /* send the response to the vf */
  1133. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
  1134. aq_ret);
  1135. }
  1136. /**
  1137. * i40e_vc_enable_queues_msg
  1138. * @vf: pointer to the vf info
  1139. * @msg: pointer to the msg buffer
  1140. * @msglen: msg length
  1141. *
  1142. * called from the vf to enable all or specific queue(s)
  1143. **/
  1144. static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1145. {
  1146. struct i40e_virtchnl_queue_select *vqs =
  1147. (struct i40e_virtchnl_queue_select *)msg;
  1148. struct i40e_pf *pf = vf->pf;
  1149. u16 vsi_id = vqs->vsi_id;
  1150. i40e_status aq_ret = 0;
  1151. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
  1152. aq_ret = I40E_ERR_PARAM;
  1153. goto error_param;
  1154. }
  1155. if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
  1156. aq_ret = I40E_ERR_PARAM;
  1157. goto error_param;
  1158. }
  1159. if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
  1160. aq_ret = I40E_ERR_PARAM;
  1161. goto error_param;
  1162. }
  1163. if (i40e_vsi_control_rings(pf->vsi[vsi_id], true))
  1164. aq_ret = I40E_ERR_TIMEOUT;
  1165. error_param:
  1166. /* send the response to the vf */
  1167. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
  1168. aq_ret);
  1169. }
  1170. /**
  1171. * i40e_vc_disable_queues_msg
  1172. * @vf: pointer to the vf info
  1173. * @msg: pointer to the msg buffer
  1174. * @msglen: msg length
  1175. *
  1176. * called from the vf to disable all or specific
  1177. * queue(s)
  1178. **/
  1179. static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1180. {
  1181. struct i40e_virtchnl_queue_select *vqs =
  1182. (struct i40e_virtchnl_queue_select *)msg;
  1183. struct i40e_pf *pf = vf->pf;
  1184. u16 vsi_id = vqs->vsi_id;
  1185. i40e_status aq_ret = 0;
  1186. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
  1187. aq_ret = I40E_ERR_PARAM;
  1188. goto error_param;
  1189. }
  1190. if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
  1191. aq_ret = I40E_ERR_PARAM;
  1192. goto error_param;
  1193. }
  1194. if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
  1195. aq_ret = I40E_ERR_PARAM;
  1196. goto error_param;
  1197. }
  1198. if (i40e_vsi_control_rings(pf->vsi[vsi_id], false))
  1199. aq_ret = I40E_ERR_TIMEOUT;
  1200. error_param:
  1201. /* send the response to the vf */
  1202. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
  1203. aq_ret);
  1204. }
  1205. /**
  1206. * i40e_vc_get_stats_msg
  1207. * @vf: pointer to the vf info
  1208. * @msg: pointer to the msg buffer
  1209. * @msglen: msg length
  1210. *
  1211. * called from the vf to get vsi stats
  1212. **/
  1213. static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1214. {
  1215. struct i40e_virtchnl_queue_select *vqs =
  1216. (struct i40e_virtchnl_queue_select *)msg;
  1217. struct i40e_pf *pf = vf->pf;
  1218. struct i40e_eth_stats stats;
  1219. i40e_status aq_ret = 0;
  1220. struct i40e_vsi *vsi;
  1221. memset(&stats, 0, sizeof(struct i40e_eth_stats));
  1222. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
  1223. aq_ret = I40E_ERR_PARAM;
  1224. goto error_param;
  1225. }
  1226. if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
  1227. aq_ret = I40E_ERR_PARAM;
  1228. goto error_param;
  1229. }
  1230. vsi = pf->vsi[vqs->vsi_id];
  1231. if (!vsi) {
  1232. aq_ret = I40E_ERR_PARAM;
  1233. goto error_param;
  1234. }
  1235. i40e_update_eth_stats(vsi);
  1236. stats = vsi->eth_stats;
  1237. error_param:
  1238. /* send the response back to the vf */
  1239. return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
  1240. (u8 *)&stats, sizeof(stats));
  1241. }
  1242. /**
  1243. * i40e_check_vf_permission
  1244. * @vf: pointer to the vf info
  1245. * @macaddr: pointer to the MAC Address being checked
  1246. *
  1247. * Check if the VF has permission to add or delete unicast MAC address
  1248. * filters and return error code -EPERM if not. Then check if the
  1249. * address filter requested is broadcast or zero and if so return
  1250. * an invalid MAC address error code.
  1251. **/
  1252. static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
  1253. {
  1254. struct i40e_pf *pf = vf->pf;
  1255. int ret = 0;
  1256. if (is_broadcast_ether_addr(macaddr) ||
  1257. is_zero_ether_addr(macaddr)) {
  1258. dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
  1259. ret = I40E_ERR_INVALID_MAC_ADDR;
  1260. } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
  1261. !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
  1262. /* If the host VMM administrator has set the VF MAC address
  1263. * administratively via the ndo_set_vf_mac command then deny
  1264. * permission to the VF to add or delete unicast MAC addresses.
  1265. * The VF may request to set the MAC address filter already
  1266. * assigned to it so do not return an error in that case.
  1267. */
  1268. dev_err(&pf->pdev->dev,
  1269. "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
  1270. ret = -EPERM;
  1271. }
  1272. return ret;
  1273. }
  1274. /**
  1275. * i40e_vc_add_mac_addr_msg
  1276. * @vf: pointer to the vf info
  1277. * @msg: pointer to the msg buffer
  1278. * @msglen: msg length
  1279. *
  1280. * add guest mac address filter
  1281. **/
  1282. static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1283. {
  1284. struct i40e_virtchnl_ether_addr_list *al =
  1285. (struct i40e_virtchnl_ether_addr_list *)msg;
  1286. struct i40e_pf *pf = vf->pf;
  1287. struct i40e_vsi *vsi = NULL;
  1288. u16 vsi_id = al->vsi_id;
  1289. i40e_status ret = 0;
  1290. int i;
  1291. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
  1292. !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
  1293. !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
  1294. ret = I40E_ERR_PARAM;
  1295. goto error_param;
  1296. }
  1297. for (i = 0; i < al->num_elements; i++) {
  1298. ret = i40e_check_vf_permission(vf, al->list[i].addr);
  1299. if (ret)
  1300. goto error_param;
  1301. }
  1302. vsi = pf->vsi[vsi_id];
  1303. /* add new addresses to the list */
  1304. for (i = 0; i < al->num_elements; i++) {
  1305. struct i40e_mac_filter *f;
  1306. f = i40e_find_mac(vsi, al->list[i].addr, true, false);
  1307. if (!f) {
  1308. if (i40e_is_vsi_in_vlan(vsi))
  1309. f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
  1310. true, false);
  1311. else
  1312. f = i40e_add_filter(vsi, al->list[i].addr, -1,
  1313. true, false);
  1314. }
  1315. if (!f) {
  1316. dev_err(&pf->pdev->dev,
  1317. "Unable to add VF MAC filter\n");
  1318. ret = I40E_ERR_PARAM;
  1319. goto error_param;
  1320. }
  1321. }
  1322. /* program the updated filter list */
  1323. if (i40e_sync_vsi_filters(vsi))
  1324. dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
  1325. error_param:
  1326. /* send the response to the vf */
  1327. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
  1328. ret);
  1329. }
  1330. /**
  1331. * i40e_vc_del_mac_addr_msg
  1332. * @vf: pointer to the vf info
  1333. * @msg: pointer to the msg buffer
  1334. * @msglen: msg length
  1335. *
  1336. * remove guest mac address filter
  1337. **/
  1338. static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1339. {
  1340. struct i40e_virtchnl_ether_addr_list *al =
  1341. (struct i40e_virtchnl_ether_addr_list *)msg;
  1342. struct i40e_pf *pf = vf->pf;
  1343. struct i40e_vsi *vsi = NULL;
  1344. u16 vsi_id = al->vsi_id;
  1345. i40e_status ret = 0;
  1346. int i;
  1347. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
  1348. !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
  1349. !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
  1350. ret = I40E_ERR_PARAM;
  1351. goto error_param;
  1352. }
  1353. for (i = 0; i < al->num_elements; i++) {
  1354. if (is_broadcast_ether_addr(al->list[i].addr) ||
  1355. is_zero_ether_addr(al->list[i].addr)) {
  1356. dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
  1357. al->list[i].addr);
  1358. ret = I40E_ERR_INVALID_MAC_ADDR;
  1359. goto error_param;
  1360. }
  1361. }
  1362. vsi = pf->vsi[vsi_id];
  1363. /* delete addresses from the list */
  1364. for (i = 0; i < al->num_elements; i++)
  1365. i40e_del_filter(vsi, al->list[i].addr,
  1366. I40E_VLAN_ANY, true, false);
  1367. /* program the updated filter list */
  1368. if (i40e_sync_vsi_filters(vsi))
  1369. dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
  1370. error_param:
  1371. /* send the response to the vf */
  1372. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
  1373. ret);
  1374. }
  1375. /**
  1376. * i40e_vc_add_vlan_msg
  1377. * @vf: pointer to the vf info
  1378. * @msg: pointer to the msg buffer
  1379. * @msglen: msg length
  1380. *
  1381. * program guest vlan id
  1382. **/
  1383. static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1384. {
  1385. struct i40e_virtchnl_vlan_filter_list *vfl =
  1386. (struct i40e_virtchnl_vlan_filter_list *)msg;
  1387. struct i40e_pf *pf = vf->pf;
  1388. struct i40e_vsi *vsi = NULL;
  1389. u16 vsi_id = vfl->vsi_id;
  1390. i40e_status aq_ret = 0;
  1391. int i;
  1392. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
  1393. !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
  1394. !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
  1395. aq_ret = I40E_ERR_PARAM;
  1396. goto error_param;
  1397. }
  1398. for (i = 0; i < vfl->num_elements; i++) {
  1399. if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
  1400. aq_ret = I40E_ERR_PARAM;
  1401. dev_err(&pf->pdev->dev,
  1402. "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
  1403. goto error_param;
  1404. }
  1405. }
  1406. vsi = pf->vsi[vsi_id];
  1407. if (vsi->info.pvid) {
  1408. aq_ret = I40E_ERR_PARAM;
  1409. goto error_param;
  1410. }
  1411. i40e_vlan_stripping_enable(vsi);
  1412. for (i = 0; i < vfl->num_elements; i++) {
  1413. /* add new VLAN filter */
  1414. int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
  1415. if (ret)
  1416. dev_err(&pf->pdev->dev,
  1417. "Unable to add VF vlan filter %d, error %d\n",
  1418. vfl->vlan_id[i], ret);
  1419. }
  1420. error_param:
  1421. /* send the response to the vf */
  1422. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
  1423. }
  1424. /**
  1425. * i40e_vc_remove_vlan_msg
  1426. * @vf: pointer to the vf info
  1427. * @msg: pointer to the msg buffer
  1428. * @msglen: msg length
  1429. *
  1430. * remove programmed guest vlan id
  1431. **/
  1432. static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1433. {
  1434. struct i40e_virtchnl_vlan_filter_list *vfl =
  1435. (struct i40e_virtchnl_vlan_filter_list *)msg;
  1436. struct i40e_pf *pf = vf->pf;
  1437. struct i40e_vsi *vsi = NULL;
  1438. u16 vsi_id = vfl->vsi_id;
  1439. i40e_status aq_ret = 0;
  1440. int i;
  1441. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
  1442. !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
  1443. !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
  1444. aq_ret = I40E_ERR_PARAM;
  1445. goto error_param;
  1446. }
  1447. for (i = 0; i < vfl->num_elements; i++) {
  1448. if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
  1449. aq_ret = I40E_ERR_PARAM;
  1450. goto error_param;
  1451. }
  1452. }
  1453. vsi = pf->vsi[vsi_id];
  1454. if (vsi->info.pvid) {
  1455. aq_ret = I40E_ERR_PARAM;
  1456. goto error_param;
  1457. }
  1458. for (i = 0; i < vfl->num_elements; i++) {
  1459. int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
  1460. if (ret)
  1461. dev_err(&pf->pdev->dev,
  1462. "Unable to delete VF vlan filter %d, error %d\n",
  1463. vfl->vlan_id[i], ret);
  1464. }
  1465. error_param:
  1466. /* send the response to the vf */
  1467. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
  1468. }
  1469. /**
  1470. * i40e_vc_validate_vf_msg
  1471. * @vf: pointer to the vf info
  1472. * @msg: pointer to the msg buffer
  1473. * @msglen: msg length
  1474. * @msghndl: msg handle
  1475. *
  1476. * validate msg
  1477. **/
  1478. static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
  1479. u32 v_retval, u8 *msg, u16 msglen)
  1480. {
  1481. bool err_msg_format = false;
  1482. int valid_len;
  1483. /* Check if VF is disabled. */
  1484. if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
  1485. return I40E_ERR_PARAM;
  1486. /* Validate message length. */
  1487. switch (v_opcode) {
  1488. case I40E_VIRTCHNL_OP_VERSION:
  1489. valid_len = sizeof(struct i40e_virtchnl_version_info);
  1490. break;
  1491. case I40E_VIRTCHNL_OP_RESET_VF:
  1492. case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
  1493. valid_len = 0;
  1494. break;
  1495. case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
  1496. valid_len = sizeof(struct i40e_virtchnl_txq_info);
  1497. break;
  1498. case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
  1499. valid_len = sizeof(struct i40e_virtchnl_rxq_info);
  1500. break;
  1501. case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
  1502. valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
  1503. if (msglen >= valid_len) {
  1504. struct i40e_virtchnl_vsi_queue_config_info *vqc =
  1505. (struct i40e_virtchnl_vsi_queue_config_info *)msg;
  1506. valid_len += (vqc->num_queue_pairs *
  1507. sizeof(struct
  1508. i40e_virtchnl_queue_pair_info));
  1509. if (vqc->num_queue_pairs == 0)
  1510. err_msg_format = true;
  1511. }
  1512. break;
  1513. case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
  1514. valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
  1515. if (msglen >= valid_len) {
  1516. struct i40e_virtchnl_irq_map_info *vimi =
  1517. (struct i40e_virtchnl_irq_map_info *)msg;
  1518. valid_len += (vimi->num_vectors *
  1519. sizeof(struct i40e_virtchnl_vector_map));
  1520. if (vimi->num_vectors == 0)
  1521. err_msg_format = true;
  1522. }
  1523. break;
  1524. case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
  1525. case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
  1526. valid_len = sizeof(struct i40e_virtchnl_queue_select);
  1527. break;
  1528. case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
  1529. case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
  1530. valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
  1531. if (msglen >= valid_len) {
  1532. struct i40e_virtchnl_ether_addr_list *veal =
  1533. (struct i40e_virtchnl_ether_addr_list *)msg;
  1534. valid_len += veal->num_elements *
  1535. sizeof(struct i40e_virtchnl_ether_addr);
  1536. if (veal->num_elements == 0)
  1537. err_msg_format = true;
  1538. }
  1539. break;
  1540. case I40E_VIRTCHNL_OP_ADD_VLAN:
  1541. case I40E_VIRTCHNL_OP_DEL_VLAN:
  1542. valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
  1543. if (msglen >= valid_len) {
  1544. struct i40e_virtchnl_vlan_filter_list *vfl =
  1545. (struct i40e_virtchnl_vlan_filter_list *)msg;
  1546. valid_len += vfl->num_elements * sizeof(u16);
  1547. if (vfl->num_elements == 0)
  1548. err_msg_format = true;
  1549. }
  1550. break;
  1551. case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
  1552. valid_len = sizeof(struct i40e_virtchnl_promisc_info);
  1553. break;
  1554. case I40E_VIRTCHNL_OP_GET_STATS:
  1555. valid_len = sizeof(struct i40e_virtchnl_queue_select);
  1556. break;
  1557. /* These are always errors coming from the VF. */
  1558. case I40E_VIRTCHNL_OP_EVENT:
  1559. case I40E_VIRTCHNL_OP_UNKNOWN:
  1560. default:
  1561. return -EPERM;
  1562. break;
  1563. }
  1564. /* few more checks */
  1565. if ((valid_len != msglen) || (err_msg_format)) {
  1566. i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
  1567. return -EINVAL;
  1568. } else {
  1569. return 0;
  1570. }
  1571. }
  1572. /**
  1573. * i40e_vc_process_vf_msg
  1574. * @pf: pointer to the pf structure
  1575. * @vf_id: source vf id
  1576. * @msg: pointer to the msg buffer
  1577. * @msglen: msg length
  1578. * @msghndl: msg handle
  1579. *
  1580. * called from the common aeq/arq handler to
  1581. * process request from vf
  1582. **/
  1583. int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
  1584. u32 v_retval, u8 *msg, u16 msglen)
  1585. {
  1586. struct i40e_hw *hw = &pf->hw;
  1587. int local_vf_id = vf_id - hw->func_caps.vf_base_id;
  1588. struct i40e_vf *vf;
  1589. int ret;
  1590. pf->vf_aq_requests++;
  1591. if (local_vf_id >= pf->num_alloc_vfs)
  1592. return -EINVAL;
  1593. vf = &(pf->vf[local_vf_id]);
  1594. /* perform basic checks on the msg */
  1595. ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
  1596. if (ret) {
  1597. dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n",
  1598. local_vf_id, v_opcode, msglen);
  1599. return ret;
  1600. }
  1601. switch (v_opcode) {
  1602. case I40E_VIRTCHNL_OP_VERSION:
  1603. ret = i40e_vc_get_version_msg(vf);
  1604. break;
  1605. case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
  1606. ret = i40e_vc_get_vf_resources_msg(vf);
  1607. break;
  1608. case I40E_VIRTCHNL_OP_RESET_VF:
  1609. i40e_vc_reset_vf_msg(vf);
  1610. ret = 0;
  1611. break;
  1612. case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
  1613. ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
  1614. break;
  1615. case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
  1616. ret = i40e_vc_config_queues_msg(vf, msg, msglen);
  1617. break;
  1618. case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
  1619. ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
  1620. break;
  1621. case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
  1622. ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
  1623. break;
  1624. case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
  1625. ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
  1626. break;
  1627. case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
  1628. ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
  1629. break;
  1630. case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
  1631. ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
  1632. break;
  1633. case I40E_VIRTCHNL_OP_ADD_VLAN:
  1634. ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
  1635. break;
  1636. case I40E_VIRTCHNL_OP_DEL_VLAN:
  1637. ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
  1638. break;
  1639. case I40E_VIRTCHNL_OP_GET_STATS:
  1640. ret = i40e_vc_get_stats_msg(vf, msg, msglen);
  1641. break;
  1642. case I40E_VIRTCHNL_OP_UNKNOWN:
  1643. default:
  1644. dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n",
  1645. v_opcode, local_vf_id);
  1646. ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
  1647. I40E_ERR_NOT_IMPLEMENTED);
  1648. break;
  1649. }
  1650. return ret;
  1651. }
  1652. /**
  1653. * i40e_vc_process_vflr_event
  1654. * @pf: pointer to the pf structure
  1655. *
  1656. * called from the vlfr irq handler to
  1657. * free up vf resources and state variables
  1658. **/
  1659. int i40e_vc_process_vflr_event(struct i40e_pf *pf)
  1660. {
  1661. u32 reg, reg_idx, bit_idx, vf_id;
  1662. struct i40e_hw *hw = &pf->hw;
  1663. struct i40e_vf *vf;
  1664. if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
  1665. return 0;
  1666. clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
  1667. for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
  1668. reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
  1669. bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
  1670. /* read GLGEN_VFLRSTAT register to find out the flr vfs */
  1671. vf = &pf->vf[vf_id];
  1672. reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
  1673. if (reg & (1 << bit_idx)) {
  1674. /* clear the bit in GLGEN_VFLRSTAT */
  1675. wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
  1676. i40e_reset_vf(vf, true);
  1677. }
  1678. }
  1679. /* re-enable vflr interrupt cause */
  1680. reg = rd32(hw, I40E_PFINT_ICR0_ENA);
  1681. reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
  1682. wr32(hw, I40E_PFINT_ICR0_ENA, reg);
  1683. i40e_flush(hw);
  1684. return 0;
  1685. }
  1686. /**
  1687. * i40e_vc_vf_broadcast
  1688. * @pf: pointer to the pf structure
  1689. * @opcode: operation code
  1690. * @retval: return value
  1691. * @msg: pointer to the msg buffer
  1692. * @msglen: msg length
  1693. *
  1694. * send a message to all VFs on a given PF
  1695. **/
  1696. static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
  1697. enum i40e_virtchnl_ops v_opcode,
  1698. i40e_status v_retval, u8 *msg,
  1699. u16 msglen)
  1700. {
  1701. struct i40e_hw *hw = &pf->hw;
  1702. struct i40e_vf *vf = pf->vf;
  1703. int i;
  1704. for (i = 0; i < pf->num_alloc_vfs; i++) {
  1705. /* Ignore return value on purpose - a given VF may fail, but
  1706. * we need to keep going and send to all of them
  1707. */
  1708. i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
  1709. msg, msglen, NULL);
  1710. vf++;
  1711. }
  1712. }
  1713. /**
  1714. * i40e_vc_notify_link_state
  1715. * @pf: pointer to the pf structure
  1716. *
  1717. * send a link status message to all VFs on a given PF
  1718. **/
  1719. void i40e_vc_notify_link_state(struct i40e_pf *pf)
  1720. {
  1721. struct i40e_virtchnl_pf_event pfe;
  1722. pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
  1723. pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
  1724. pfe.event_data.link_event.link_status =
  1725. pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
  1726. pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed;
  1727. i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
  1728. (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
  1729. }
  1730. /**
  1731. * i40e_vc_notify_reset
  1732. * @pf: pointer to the pf structure
  1733. *
  1734. * indicate a pending reset to all VFs on a given PF
  1735. **/
  1736. void i40e_vc_notify_reset(struct i40e_pf *pf)
  1737. {
  1738. struct i40e_virtchnl_pf_event pfe;
  1739. pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
  1740. pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
  1741. i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
  1742. (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
  1743. }
  1744. /**
  1745. * i40e_vc_notify_vf_reset
  1746. * @vf: pointer to the vf structure
  1747. *
  1748. * indicate a pending reset to the given VF
  1749. **/
  1750. void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
  1751. {
  1752. struct i40e_virtchnl_pf_event pfe;
  1753. pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
  1754. pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
  1755. i40e_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
  1756. I40E_SUCCESS, (u8 *)&pfe,
  1757. sizeof(struct i40e_virtchnl_pf_event), NULL);
  1758. }
  1759. /**
  1760. * i40e_ndo_set_vf_mac
  1761. * @netdev: network interface device structure
  1762. * @vf_id: vf identifier
  1763. * @mac: mac address
  1764. *
  1765. * program vf mac address
  1766. **/
  1767. int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
  1768. {
  1769. struct i40e_netdev_priv *np = netdev_priv(netdev);
  1770. struct i40e_vsi *vsi = np->vsi;
  1771. struct i40e_pf *pf = vsi->back;
  1772. struct i40e_mac_filter *f;
  1773. struct i40e_vf *vf;
  1774. int ret = 0;
  1775. /* validate the request */
  1776. if (vf_id >= pf->num_alloc_vfs) {
  1777. dev_err(&pf->pdev->dev,
  1778. "Invalid VF Identifier %d\n", vf_id);
  1779. ret = -EINVAL;
  1780. goto error_param;
  1781. }
  1782. vf = &(pf->vf[vf_id]);
  1783. vsi = pf->vsi[vf->lan_vsi_index];
  1784. if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
  1785. dev_err(&pf->pdev->dev,
  1786. "Uninitialized VF %d\n", vf_id);
  1787. ret = -EINVAL;
  1788. goto error_param;
  1789. }
  1790. if (!is_valid_ether_addr(mac)) {
  1791. dev_err(&pf->pdev->dev,
  1792. "Invalid VF ethernet address\n");
  1793. ret = -EINVAL;
  1794. goto error_param;
  1795. }
  1796. /* delete the temporary mac address */
  1797. i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false);
  1798. /* add the new mac address */
  1799. f = i40e_add_filter(vsi, mac, 0, true, false);
  1800. if (!f) {
  1801. dev_err(&pf->pdev->dev,
  1802. "Unable to add VF ucast filter\n");
  1803. ret = -ENOMEM;
  1804. goto error_param;
  1805. }
  1806. dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
  1807. /* program mac filter */
  1808. if (i40e_sync_vsi_filters(vsi)) {
  1809. dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
  1810. ret = -EIO;
  1811. goto error_param;
  1812. }
  1813. memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
  1814. vf->pf_set_mac = true;
  1815. dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
  1816. ret = 0;
  1817. error_param:
  1818. return ret;
  1819. }
  1820. /**
  1821. * i40e_ndo_set_vf_port_vlan
  1822. * @netdev: network interface device structure
  1823. * @vf_id: vf identifier
  1824. * @vlan_id: mac address
  1825. * @qos: priority setting
  1826. *
  1827. * program vf vlan id and/or qos
  1828. **/
  1829. int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
  1830. int vf_id, u16 vlan_id, u8 qos)
  1831. {
  1832. struct i40e_netdev_priv *np = netdev_priv(netdev);
  1833. struct i40e_pf *pf = np->vsi->back;
  1834. struct i40e_vsi *vsi;
  1835. struct i40e_vf *vf;
  1836. int ret = 0;
  1837. /* validate the request */
  1838. if (vf_id >= pf->num_alloc_vfs) {
  1839. dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
  1840. ret = -EINVAL;
  1841. goto error_pvid;
  1842. }
  1843. if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
  1844. dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
  1845. ret = -EINVAL;
  1846. goto error_pvid;
  1847. }
  1848. vf = &(pf->vf[vf_id]);
  1849. vsi = pf->vsi[vf->lan_vsi_index];
  1850. if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
  1851. dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
  1852. ret = -EINVAL;
  1853. goto error_pvid;
  1854. }
  1855. if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi))
  1856. dev_err(&pf->pdev->dev,
  1857. "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
  1858. vf_id);
  1859. /* Check for condition where there was already a port VLAN ID
  1860. * filter set and now it is being deleted by setting it to zero.
  1861. * Before deleting all the old VLAN filters we must add new ones
  1862. * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
  1863. * MAC addresses deleted.
  1864. */
  1865. if (!(vlan_id || qos) && vsi->info.pvid)
  1866. ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
  1867. if (vsi->info.pvid) {
  1868. /* kill old VLAN */
  1869. ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
  1870. VLAN_VID_MASK));
  1871. if (ret) {
  1872. dev_info(&vsi->back->pdev->dev,
  1873. "remove VLAN failed, ret=%d, aq_err=%d\n",
  1874. ret, pf->hw.aq.asq_last_status);
  1875. }
  1876. }
  1877. if (vlan_id || qos)
  1878. ret = i40e_vsi_add_pvid(vsi,
  1879. vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
  1880. else
  1881. i40e_vsi_remove_pvid(vsi);
  1882. if (vlan_id) {
  1883. dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
  1884. vlan_id, qos, vf_id);
  1885. /* add new VLAN filter */
  1886. ret = i40e_vsi_add_vlan(vsi, vlan_id);
  1887. if (ret) {
  1888. dev_info(&vsi->back->pdev->dev,
  1889. "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
  1890. vsi->back->hw.aq.asq_last_status);
  1891. goto error_pvid;
  1892. }
  1893. /* Kill non-vlan MAC filters - ignore error return since
  1894. * there might not be any non-vlan MAC filters.
  1895. */
  1896. i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY);
  1897. }
  1898. if (ret) {
  1899. dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
  1900. goto error_pvid;
  1901. }
  1902. /* The Port VLAN needs to be saved across resets the same as the
  1903. * default LAN MAC address.
  1904. */
  1905. vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
  1906. ret = 0;
  1907. error_pvid:
  1908. return ret;
  1909. }
  1910. /**
  1911. * i40e_ndo_set_vf_bw
  1912. * @netdev: network interface device structure
  1913. * @vf_id: vf identifier
  1914. * @tx_rate: tx rate
  1915. *
  1916. * configure vf tx rate
  1917. **/
  1918. int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
  1919. {
  1920. return -EOPNOTSUPP;
  1921. }
  1922. /**
  1923. * i40e_ndo_get_vf_config
  1924. * @netdev: network interface device structure
  1925. * @vf_id: vf identifier
  1926. * @ivi: vf configuration structure
  1927. *
  1928. * return vf configuration
  1929. **/
  1930. int i40e_ndo_get_vf_config(struct net_device *netdev,
  1931. int vf_id, struct ifla_vf_info *ivi)
  1932. {
  1933. struct i40e_netdev_priv *np = netdev_priv(netdev);
  1934. struct i40e_vsi *vsi = np->vsi;
  1935. struct i40e_pf *pf = vsi->back;
  1936. struct i40e_vf *vf;
  1937. int ret = 0;
  1938. /* validate the request */
  1939. if (vf_id >= pf->num_alloc_vfs) {
  1940. dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
  1941. ret = -EINVAL;
  1942. goto error_param;
  1943. }
  1944. vf = &(pf->vf[vf_id]);
  1945. /* first vsi is always the LAN vsi */
  1946. vsi = pf->vsi[vf->lan_vsi_index];
  1947. if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
  1948. dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
  1949. ret = -EINVAL;
  1950. goto error_param;
  1951. }
  1952. ivi->vf = vf_id;
  1953. memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
  1954. ivi->tx_rate = 0;
  1955. ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
  1956. ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
  1957. I40E_VLAN_PRIORITY_SHIFT;
  1958. ret = 0;
  1959. error_param:
  1960. return ret;
  1961. }