i40e_virtchnl_pf.c 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310
  1. /*******************************************************************************
  2. *
  3. * Intel Ethernet Controller XL710 Family Linux Driver
  4. * Copyright(c) 2013 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program; if not, write to the Free Software Foundation, Inc.,
  17. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18. *
  19. * The full GNU General Public License is included in this distribution in
  20. * the file called "COPYING".
  21. *
  22. * Contact Information:
  23. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  24. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  25. *
  26. ******************************************************************************/
  27. #include "i40e.h"
  28. /***********************misc routines*****************************/
  29. /**
  30. * i40e_vc_isvalid_vsi_id
  31. * @vf: pointer to the vf info
  32. * @vsi_id: vf relative vsi id
  33. *
  34. * check for the valid vsi id
  35. **/
  36. static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
  37. {
  38. struct i40e_pf *pf = vf->pf;
  39. return pf->vsi[vsi_id]->vf_id == vf->vf_id;
  40. }
  41. /**
  42. * i40e_vc_isvalid_queue_id
  43. * @vf: pointer to the vf info
  44. * @vsi_id: vsi id
  45. * @qid: vsi relative queue id
  46. *
  47. * check for the valid queue id
  48. **/
  49. static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
  50. u8 qid)
  51. {
  52. struct i40e_pf *pf = vf->pf;
  53. return qid < pf->vsi[vsi_id]->num_queue_pairs;
  54. }
  55. /**
  56. * i40e_vc_isvalid_vector_id
  57. * @vf: pointer to the vf info
  58. * @vector_id: vf relative vector id
  59. *
  60. * check for the valid vector id
  61. **/
  62. static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
  63. {
  64. struct i40e_pf *pf = vf->pf;
  65. return vector_id <= pf->hw.func_caps.num_msix_vectors_vf;
  66. }
  67. /***********************vf resource mgmt routines*****************/
  68. /**
  69. * i40e_vc_get_pf_queue_id
  70. * @vf: pointer to the vf info
  71. * @vsi_idx: index of VSI in PF struct
  72. * @vsi_queue_id: vsi relative queue id
  73. *
  74. * return pf relative queue id
  75. **/
  76. static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
  77. u8 vsi_queue_id)
  78. {
  79. struct i40e_pf *pf = vf->pf;
  80. struct i40e_vsi *vsi = pf->vsi[vsi_idx];
  81. u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
  82. if (le16_to_cpu(vsi->info.mapping_flags) &
  83. I40E_AQ_VSI_QUE_MAP_NONCONTIG)
  84. pf_queue_id =
  85. le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
  86. else
  87. pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
  88. vsi_queue_id;
  89. return pf_queue_id;
  90. }
  91. /**
  92. * i40e_ctrl_vsi_tx_queue
  93. * @vf: pointer to the vf info
  94. * @vsi_idx: index of VSI in PF struct
  95. * @vsi_queue_id: vsi relative queue index
  96. * @ctrl: control flags
  97. *
  98. * enable/disable/enable check/disable check
  99. **/
  100. static int i40e_ctrl_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
  101. u16 vsi_queue_id,
  102. enum i40e_queue_ctrl ctrl)
  103. {
  104. struct i40e_pf *pf = vf->pf;
  105. struct i40e_hw *hw = &pf->hw;
  106. bool writeback = false;
  107. u16 pf_queue_id;
  108. int ret = 0;
  109. u32 reg;
  110. pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
  111. reg = rd32(hw, I40E_QTX_ENA(pf_queue_id));
  112. switch (ctrl) {
  113. case I40E_QUEUE_CTRL_ENABLE:
  114. reg |= I40E_QTX_ENA_QENA_REQ_MASK;
  115. writeback = true;
  116. break;
  117. case I40E_QUEUE_CTRL_ENABLECHECK:
  118. ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
  119. break;
  120. case I40E_QUEUE_CTRL_DISABLE:
  121. reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
  122. writeback = true;
  123. break;
  124. case I40E_QUEUE_CTRL_DISABLECHECK:
  125. ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
  126. break;
  127. case I40E_QUEUE_CTRL_FASTDISABLE:
  128. reg |= I40E_QTX_ENA_FAST_QDIS_MASK;
  129. writeback = true;
  130. break;
  131. case I40E_QUEUE_CTRL_FASTDISABLECHECK:
  132. ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
  133. if (!ret) {
  134. reg &= ~I40E_QTX_ENA_FAST_QDIS_MASK;
  135. writeback = true;
  136. }
  137. break;
  138. default:
  139. ret = -EINVAL;
  140. break;
  141. }
  142. if (writeback) {
  143. wr32(hw, I40E_QTX_ENA(pf_queue_id), reg);
  144. i40e_flush(hw);
  145. }
  146. return ret;
  147. }
  148. /**
  149. * i40e_ctrl_vsi_rx_queue
  150. * @vf: pointer to the vf info
  151. * @vsi_idx: index of VSI in PF struct
  152. * @vsi_queue_id: vsi relative queue index
  153. * @ctrl: control flags
  154. *
  155. * enable/disable/enable check/disable check
  156. **/
  157. static int i40e_ctrl_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
  158. u16 vsi_queue_id,
  159. enum i40e_queue_ctrl ctrl)
  160. {
  161. struct i40e_pf *pf = vf->pf;
  162. struct i40e_hw *hw = &pf->hw;
  163. bool writeback = false;
  164. u16 pf_queue_id;
  165. int ret = 0;
  166. u32 reg;
  167. pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
  168. reg = rd32(hw, I40E_QRX_ENA(pf_queue_id));
  169. switch (ctrl) {
  170. case I40E_QUEUE_CTRL_ENABLE:
  171. reg |= I40E_QRX_ENA_QENA_REQ_MASK;
  172. writeback = true;
  173. break;
  174. case I40E_QUEUE_CTRL_ENABLECHECK:
  175. ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
  176. break;
  177. case I40E_QUEUE_CTRL_DISABLE:
  178. reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
  179. writeback = true;
  180. break;
  181. case I40E_QUEUE_CTRL_DISABLECHECK:
  182. ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
  183. break;
  184. case I40E_QUEUE_CTRL_FASTDISABLE:
  185. reg |= I40E_QRX_ENA_FAST_QDIS_MASK;
  186. writeback = true;
  187. break;
  188. case I40E_QUEUE_CTRL_FASTDISABLECHECK:
  189. ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
  190. if (!ret) {
  191. reg &= ~I40E_QRX_ENA_FAST_QDIS_MASK;
  192. writeback = true;
  193. }
  194. break;
  195. default:
  196. ret = -EINVAL;
  197. break;
  198. }
  199. if (writeback) {
  200. wr32(hw, I40E_QRX_ENA(pf_queue_id), reg);
  201. i40e_flush(hw);
  202. }
  203. return ret;
  204. }
  205. /**
  206. * i40e_config_irq_link_list
  207. * @vf: pointer to the vf info
  208. * @vsi_idx: index of VSI in PF struct
  209. * @vecmap: irq map info
  210. *
  211. * configure irq link list from the map
  212. **/
  213. static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
  214. struct i40e_virtchnl_vector_map *vecmap)
  215. {
  216. unsigned long linklistmap = 0, tempmap;
  217. struct i40e_pf *pf = vf->pf;
  218. struct i40e_hw *hw = &pf->hw;
  219. u16 vsi_queue_id, pf_queue_id;
  220. enum i40e_queue_type qtype;
  221. u16 next_q, vector_id;
  222. u32 reg, reg_idx;
  223. u16 itr_idx = 0;
  224. vector_id = vecmap->vector_id;
  225. /* setup the head */
  226. if (0 == vector_id)
  227. reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
  228. else
  229. reg_idx = I40E_VPINT_LNKLSTN(
  230. (pf->hw.func_caps.num_msix_vectors_vf
  231. * vf->vf_id) + (vector_id - 1));
  232. if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
  233. /* Special case - No queues mapped on this vector */
  234. wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
  235. goto irq_list_done;
  236. }
  237. tempmap = vecmap->rxq_map;
  238. vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  239. while (vsi_queue_id < I40E_MAX_VSI_QP) {
  240. linklistmap |= (1 <<
  241. (I40E_VIRTCHNL_SUPPORTED_QTYPES *
  242. vsi_queue_id));
  243. vsi_queue_id =
  244. find_next_bit(&tempmap, I40E_MAX_VSI_QP, vsi_queue_id + 1);
  245. }
  246. tempmap = vecmap->txq_map;
  247. vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  248. while (vsi_queue_id < I40E_MAX_VSI_QP) {
  249. linklistmap |= (1 <<
  250. (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
  251. + 1));
  252. vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
  253. vsi_queue_id + 1);
  254. }
  255. next_q = find_first_bit(&linklistmap,
  256. (I40E_MAX_VSI_QP *
  257. I40E_VIRTCHNL_SUPPORTED_QTYPES));
  258. vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
  259. qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
  260. pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
  261. reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
  262. wr32(hw, reg_idx, reg);
  263. while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
  264. switch (qtype) {
  265. case I40E_QUEUE_TYPE_RX:
  266. reg_idx = I40E_QINT_RQCTL(pf_queue_id);
  267. itr_idx = vecmap->rxitr_idx;
  268. break;
  269. case I40E_QUEUE_TYPE_TX:
  270. reg_idx = I40E_QINT_TQCTL(pf_queue_id);
  271. itr_idx = vecmap->txitr_idx;
  272. break;
  273. default:
  274. break;
  275. }
  276. next_q = find_next_bit(&linklistmap,
  277. (I40E_MAX_VSI_QP *
  278. I40E_VIRTCHNL_SUPPORTED_QTYPES),
  279. next_q + 1);
  280. if (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
  281. vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
  282. qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
  283. pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
  284. vsi_queue_id);
  285. } else {
  286. pf_queue_id = I40E_QUEUE_END_OF_LIST;
  287. qtype = 0;
  288. }
  289. /* format for the RQCTL & TQCTL regs is same */
  290. reg = (vector_id) |
  291. (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
  292. (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
  293. (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
  294. (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
  295. wr32(hw, reg_idx, reg);
  296. }
  297. irq_list_done:
  298. i40e_flush(hw);
  299. }
  300. /**
  301. * i40e_config_vsi_tx_queue
  302. * @vf: pointer to the vf info
  303. * @vsi_idx: index of VSI in PF struct
  304. * @vsi_queue_id: vsi relative queue index
  305. * @info: config. info
  306. *
  307. * configure tx queue
  308. **/
  309. static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
  310. u16 vsi_queue_id,
  311. struct i40e_virtchnl_txq_info *info)
  312. {
  313. struct i40e_pf *pf = vf->pf;
  314. struct i40e_hw *hw = &pf->hw;
  315. struct i40e_hmc_obj_txq tx_ctx;
  316. u16 pf_queue_id;
  317. u32 qtx_ctl;
  318. int ret = 0;
  319. pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
  320. /* clear the context structure first */
  321. memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
  322. /* only set the required fields */
  323. tx_ctx.base = info->dma_ring_addr / 128;
  324. tx_ctx.qlen = info->ring_len;
  325. tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
  326. tx_ctx.rdylist_act = 0;
  327. /* clear the context in the HMC */
  328. ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
  329. if (ret) {
  330. dev_err(&pf->pdev->dev,
  331. "Failed to clear VF LAN Tx queue context %d, error: %d\n",
  332. pf_queue_id, ret);
  333. ret = -ENOENT;
  334. goto error_context;
  335. }
  336. /* set the context in the HMC */
  337. ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
  338. if (ret) {
  339. dev_err(&pf->pdev->dev,
  340. "Failed to set VF LAN Tx queue context %d error: %d\n",
  341. pf_queue_id, ret);
  342. ret = -ENOENT;
  343. goto error_context;
  344. }
  345. /* associate this queue with the PCI VF function */
  346. qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
  347. qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
  348. & I40E_QTX_CTL_PF_INDX_MASK);
  349. qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
  350. << I40E_QTX_CTL_VFVM_INDX_SHIFT)
  351. & I40E_QTX_CTL_VFVM_INDX_MASK);
  352. wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
  353. i40e_flush(hw);
  354. error_context:
  355. return ret;
  356. }
  357. /**
  358. * i40e_config_vsi_rx_queue
  359. * @vf: pointer to the vf info
  360. * @vsi_idx: index of VSI in PF struct
  361. * @vsi_queue_id: vsi relative queue index
  362. * @info: config. info
  363. *
  364. * configure rx queue
  365. **/
  366. static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
  367. u16 vsi_queue_id,
  368. struct i40e_virtchnl_rxq_info *info)
  369. {
  370. struct i40e_pf *pf = vf->pf;
  371. struct i40e_hw *hw = &pf->hw;
  372. struct i40e_hmc_obj_rxq rx_ctx;
  373. u16 pf_queue_id;
  374. int ret = 0;
  375. pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
  376. /* clear the context structure first */
  377. memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
  378. /* only set the required fields */
  379. rx_ctx.base = info->dma_ring_addr / 128;
  380. rx_ctx.qlen = info->ring_len;
  381. if (info->splithdr_enabled) {
  382. rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
  383. I40E_RX_SPLIT_IP |
  384. I40E_RX_SPLIT_TCP_UDP |
  385. I40E_RX_SPLIT_SCTP;
  386. /* header length validation */
  387. if (info->hdr_size > ((2 * 1024) - 64)) {
  388. ret = -EINVAL;
  389. goto error_param;
  390. }
  391. rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
  392. /* set splitalways mode 10b */
  393. rx_ctx.dtype = 0x2;
  394. }
  395. /* databuffer length validation */
  396. if (info->databuffer_size > ((16 * 1024) - 128)) {
  397. ret = -EINVAL;
  398. goto error_param;
  399. }
  400. rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
  401. /* max pkt. length validation */
  402. if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
  403. ret = -EINVAL;
  404. goto error_param;
  405. }
  406. rx_ctx.rxmax = info->max_pkt_size;
  407. /* enable 32bytes desc always */
  408. rx_ctx.dsize = 1;
  409. /* default values */
  410. rx_ctx.tphrdesc_ena = 1;
  411. rx_ctx.tphwdesc_ena = 1;
  412. rx_ctx.tphdata_ena = 1;
  413. rx_ctx.tphhead_ena = 1;
  414. rx_ctx.lrxqthresh = 2;
  415. rx_ctx.crcstrip = 1;
  416. /* clear the context in the HMC */
  417. ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
  418. if (ret) {
  419. dev_err(&pf->pdev->dev,
  420. "Failed to clear VF LAN Rx queue context %d, error: %d\n",
  421. pf_queue_id, ret);
  422. ret = -ENOENT;
  423. goto error_param;
  424. }
  425. /* set the context in the HMC */
  426. ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
  427. if (ret) {
  428. dev_err(&pf->pdev->dev,
  429. "Failed to set VF LAN Rx queue context %d error: %d\n",
  430. pf_queue_id, ret);
  431. ret = -ENOENT;
  432. goto error_param;
  433. }
  434. error_param:
  435. return ret;
  436. }
  437. /**
  438. * i40e_alloc_vsi_res
  439. * @vf: pointer to the vf info
  440. * @type: type of VSI to allocate
  441. *
  442. * alloc vf vsi context & resources
  443. **/
  444. static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
  445. {
  446. struct i40e_mac_filter *f = NULL;
  447. struct i40e_pf *pf = vf->pf;
  448. struct i40e_hw *hw = &pf->hw;
  449. struct i40e_vsi *vsi;
  450. int ret = 0;
  451. vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
  452. if (!vsi) {
  453. dev_err(&pf->pdev->dev,
  454. "add vsi failed for vf %d, aq_err %d\n",
  455. vf->vf_id, pf->hw.aq.asq_last_status);
  456. ret = -ENOENT;
  457. goto error_alloc_vsi_res;
  458. }
  459. if (type == I40E_VSI_SRIOV) {
  460. vf->lan_vsi_index = vsi->idx;
  461. vf->lan_vsi_id = vsi->id;
  462. dev_info(&pf->pdev->dev,
  463. "LAN VSI index %d, VSI id %d\n",
  464. vsi->idx, vsi->id);
  465. f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
  466. 0, true, false);
  467. }
  468. if (!f) {
  469. dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
  470. ret = -ENOMEM;
  471. goto error_alloc_vsi_res;
  472. }
  473. /* program mac filter */
  474. ret = i40e_sync_vsi_filters(vsi);
  475. if (ret) {
  476. dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
  477. goto error_alloc_vsi_res;
  478. }
  479. /* accept bcast pkts. by default */
  480. ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
  481. if (ret) {
  482. dev_err(&pf->pdev->dev,
  483. "set vsi bcast failed for vf %d, vsi %d, aq_err %d\n",
  484. vf->vf_id, vsi->idx, pf->hw.aq.asq_last_status);
  485. ret = -EINVAL;
  486. }
  487. error_alloc_vsi_res:
  488. return ret;
  489. }
  490. /**
  491. * i40e_reset_vf
  492. * @vf: pointer to the vf structure
  493. * @flr: VFLR was issued or not
  494. *
  495. * reset the vf
  496. **/
  497. int i40e_reset_vf(struct i40e_vf *vf, bool flr)
  498. {
  499. int ret = -ENOENT;
  500. struct i40e_pf *pf = vf->pf;
  501. struct i40e_hw *hw = &pf->hw;
  502. u32 reg, reg_idx, msix_vf;
  503. bool rsd = false;
  504. u16 pf_queue_id;
  505. int i, j;
  506. /* warn the VF */
  507. wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_INPROGRESS);
  508. clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
  509. /* PF triggers VFR only when VF requests, in case of
  510. * VFLR, HW triggers VFR
  511. */
  512. if (!flr) {
  513. /* reset vf using VPGEN_VFRTRIG reg */
  514. reg = I40E_VPGEN_VFRTRIG_VFSWR_MASK;
  515. wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
  516. i40e_flush(hw);
  517. }
  518. /* poll VPGEN_VFRSTAT reg to make sure
  519. * that reset is complete
  520. */
  521. for (i = 0; i < 4; i++) {
  522. /* vf reset requires driver to first reset the
  523. * vf & than poll the status register to make sure
  524. * that the requested op was completed
  525. * successfully
  526. */
  527. udelay(10);
  528. reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
  529. if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
  530. rsd = true;
  531. break;
  532. }
  533. }
  534. if (!rsd)
  535. dev_err(&pf->pdev->dev, "VF reset check timeout %d\n",
  536. vf->vf_id);
  537. /* fast disable qps */
  538. for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
  539. ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
  540. I40E_QUEUE_CTRL_FASTDISABLE);
  541. ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
  542. I40E_QUEUE_CTRL_FASTDISABLE);
  543. }
  544. /* Queue enable/disable requires driver to
  545. * first reset the vf & than poll the status register
  546. * to make sure that the requested op was completed
  547. * successfully
  548. */
  549. udelay(10);
  550. for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
  551. ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
  552. I40E_QUEUE_CTRL_FASTDISABLECHECK);
  553. if (ret)
  554. dev_info(&pf->pdev->dev,
  555. "Queue control check failed on Tx queue %d of VSI %d VF %d\n",
  556. j, vf->lan_vsi_index, vf->vf_id);
  557. ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
  558. I40E_QUEUE_CTRL_FASTDISABLECHECK);
  559. if (ret)
  560. dev_info(&pf->pdev->dev,
  561. "Queue control check failed on Rx queue %d of VSI %d VF %d\n",
  562. j, vf->lan_vsi_index, vf->vf_id);
  563. }
  564. /* clear the irq settings */
  565. msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
  566. for (i = 0; i < msix_vf; i++) {
  567. /* format is same for both registers */
  568. if (0 == i)
  569. reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
  570. else
  571. reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
  572. (vf->vf_id))
  573. + (i - 1));
  574. reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
  575. I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
  576. wr32(hw, reg_idx, reg);
  577. i40e_flush(hw);
  578. }
  579. /* disable interrupts so the VF starts in a known state */
  580. for (i = 0; i < msix_vf; i++) {
  581. /* format is same for both registers */
  582. if (0 == i)
  583. reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
  584. else
  585. reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
  586. (vf->vf_id))
  587. + (i - 1));
  588. wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
  589. i40e_flush(hw);
  590. }
  591. /* set the defaults for the rqctl & tqctl registers */
  592. reg = (I40E_QINT_RQCTL_NEXTQ_INDX_MASK | I40E_QINT_RQCTL_ITR_INDX_MASK |
  593. I40E_QINT_RQCTL_NEXTQ_TYPE_MASK);
  594. for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
  595. pf_queue_id = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
  596. wr32(hw, I40E_QINT_RQCTL(pf_queue_id), reg);
  597. wr32(hw, I40E_QINT_TQCTL(pf_queue_id), reg);
  598. }
  599. /* clear the reset bit in the VPGEN_VFRTRIG reg */
  600. reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
  601. reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
  602. wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
  603. /* tell the VF the reset is done */
  604. wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
  605. i40e_flush(hw);
  606. return ret;
  607. }
  608. /**
  609. * i40e_enable_vf_mappings
  610. * @vf: pointer to the vf info
  611. *
  612. * enable vf mappings
  613. **/
  614. static void i40e_enable_vf_mappings(struct i40e_vf *vf)
  615. {
  616. struct i40e_pf *pf = vf->pf;
  617. struct i40e_hw *hw = &pf->hw;
  618. u32 reg, total_queue_pairs = 0;
  619. int j;
  620. /* Tell the hardware we're using noncontiguous mapping. HW requires
  621. * that VF queues be mapped using this method, even when they are
  622. * contiguous in real life
  623. */
  624. wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
  625. I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
  626. /* enable VF vplan_qtable mappings */
  627. reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
  628. wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
  629. /* map PF queues to VF queues */
  630. for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
  631. u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
  632. reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
  633. wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
  634. total_queue_pairs++;
  635. }
  636. /* map PF queues to VSI */
  637. for (j = 0; j < 7; j++) {
  638. if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) {
  639. reg = 0x07FF07FF; /* unused */
  640. } else {
  641. u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
  642. j * 2);
  643. reg = qid;
  644. qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
  645. (j * 2) + 1);
  646. reg |= qid << 16;
  647. }
  648. wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg);
  649. }
  650. i40e_flush(hw);
  651. }
  652. /**
  653. * i40e_disable_vf_mappings
  654. * @vf: pointer to the vf info
  655. *
  656. * disable vf mappings
  657. **/
  658. static void i40e_disable_vf_mappings(struct i40e_vf *vf)
  659. {
  660. struct i40e_pf *pf = vf->pf;
  661. struct i40e_hw *hw = &pf->hw;
  662. int i;
  663. /* disable qp mappings */
  664. wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
  665. for (i = 0; i < I40E_MAX_VSI_QP; i++)
  666. wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
  667. I40E_QUEUE_END_OF_LIST);
  668. i40e_flush(hw);
  669. }
  670. /**
  671. * i40e_free_vf_res
  672. * @vf: pointer to the vf info
  673. *
  674. * free vf resources
  675. **/
  676. static void i40e_free_vf_res(struct i40e_vf *vf)
  677. {
  678. struct i40e_pf *pf = vf->pf;
  679. /* free vsi & disconnect it from the parent uplink */
  680. if (vf->lan_vsi_index) {
  681. i40e_vsi_release(pf->vsi[vf->lan_vsi_index]);
  682. vf->lan_vsi_index = 0;
  683. vf->lan_vsi_id = 0;
  684. }
  685. /* reset some of the state varibles keeping
  686. * track of the resources
  687. */
  688. vf->num_queue_pairs = 0;
  689. vf->vf_states = 0;
  690. }
  691. /**
  692. * i40e_alloc_vf_res
  693. * @vf: pointer to the vf info
  694. *
  695. * allocate vf resources
  696. **/
  697. static int i40e_alloc_vf_res(struct i40e_vf *vf)
  698. {
  699. struct i40e_pf *pf = vf->pf;
  700. int total_queue_pairs = 0;
  701. int ret;
  702. /* allocate hw vsi context & associated resources */
  703. ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
  704. if (ret)
  705. goto error_alloc;
  706. total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
  707. set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
  708. /* store the total qps number for the runtime
  709. * vf req validation
  710. */
  711. vf->num_queue_pairs = total_queue_pairs;
  712. /* vf is now completely initialized */
  713. set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
  714. error_alloc:
  715. if (ret)
  716. i40e_free_vf_res(vf);
  717. return ret;
  718. }
  719. /**
  720. * i40e_vfs_are_assigned
  721. * @pf: pointer to the pf structure
  722. *
  723. * Determine if any VFs are assigned to VMs
  724. **/
  725. static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
  726. {
  727. struct pci_dev *pdev = pf->pdev;
  728. struct pci_dev *vfdev;
  729. /* loop through all the VFs to see if we own any that are assigned */
  730. vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_VF_DEVICE_ID , NULL);
  731. while (vfdev) {
  732. /* if we don't own it we don't care */
  733. if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
  734. /* if it is assigned we cannot release it */
  735. if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
  736. return true;
  737. }
  738. vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
  739. I40E_VF_DEVICE_ID,
  740. vfdev);
  741. }
  742. return false;
  743. }
  744. /**
  745. * i40e_free_vfs
  746. * @pf: pointer to the pf structure
  747. *
  748. * free vf resources
  749. **/
  750. void i40e_free_vfs(struct i40e_pf *pf)
  751. {
  752. struct i40e_hw *hw = &pf->hw;
  753. int i;
  754. if (!pf->vf)
  755. return;
  756. /* Disable interrupt 0 so we don't try to handle the VFLR. */
  757. wr32(hw, I40E_PFINT_DYN_CTL0, 0);
  758. i40e_flush(hw);
  759. /* free up vf resources */
  760. for (i = 0; i < pf->num_alloc_vfs; i++) {
  761. if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
  762. i40e_free_vf_res(&pf->vf[i]);
  763. /* disable qp mappings */
  764. i40e_disable_vf_mappings(&pf->vf[i]);
  765. }
  766. kfree(pf->vf);
  767. pf->vf = NULL;
  768. pf->num_alloc_vfs = 0;
  769. if (!i40e_vfs_are_assigned(pf))
  770. pci_disable_sriov(pf->pdev);
  771. else
  772. dev_warn(&pf->pdev->dev,
  773. "unable to disable SR-IOV because VFs are assigned.\n");
  774. /* Re-enable interrupt 0. */
  775. wr32(hw, I40E_PFINT_DYN_CTL0,
  776. I40E_PFINT_DYN_CTL0_INTENA_MASK |
  777. I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
  778. (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
  779. i40e_flush(hw);
  780. }
  781. #ifdef CONFIG_PCI_IOV
  782. /**
  783. * i40e_alloc_vfs
  784. * @pf: pointer to the pf structure
  785. * @num_alloc_vfs: number of vfs to allocate
  786. *
  787. * allocate vf resources
  788. **/
  789. static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
  790. {
  791. struct i40e_vf *vfs;
  792. int i, ret = 0;
  793. ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
  794. if (ret) {
  795. dev_err(&pf->pdev->dev,
  796. "pci_enable_sriov failed with error %d!\n", ret);
  797. pf->num_alloc_vfs = 0;
  798. goto err_iov;
  799. }
  800. /* allocate memory */
  801. vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL);
  802. if (!vfs) {
  803. ret = -ENOMEM;
  804. goto err_alloc;
  805. }
  806. /* apply default profile */
  807. for (i = 0; i < num_alloc_vfs; i++) {
  808. vfs[i].pf = pf;
  809. vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
  810. vfs[i].vf_id = i;
  811. /* assign default capabilities */
  812. set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
  813. ret = i40e_alloc_vf_res(&vfs[i]);
  814. i40e_reset_vf(&vfs[i], true);
  815. if (ret)
  816. break;
  817. /* enable vf vplan_qtable mappings */
  818. i40e_enable_vf_mappings(&vfs[i]);
  819. }
  820. pf->vf = vfs;
  821. pf->num_alloc_vfs = num_alloc_vfs;
  822. err_alloc:
  823. if (ret)
  824. i40e_free_vfs(pf);
  825. err_iov:
  826. return ret;
  827. }
  828. #endif
  829. /**
  830. * i40e_pci_sriov_enable
  831. * @pdev: pointer to a pci_dev structure
  832. * @num_vfs: number of vfs to allocate
  833. *
  834. * Enable or change the number of VFs
  835. **/
  836. static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
  837. {
  838. #ifdef CONFIG_PCI_IOV
  839. struct i40e_pf *pf = pci_get_drvdata(pdev);
  840. int pre_existing_vfs = pci_num_vf(pdev);
  841. int err = 0;
  842. dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
  843. if (pre_existing_vfs && pre_existing_vfs != num_vfs)
  844. i40e_free_vfs(pf);
  845. else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
  846. goto out;
  847. if (num_vfs > pf->num_req_vfs) {
  848. err = -EPERM;
  849. goto err_out;
  850. }
  851. err = i40e_alloc_vfs(pf, num_vfs);
  852. if (err) {
  853. dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
  854. goto err_out;
  855. }
  856. out:
  857. return num_vfs;
  858. err_out:
  859. return err;
  860. #endif
  861. return 0;
  862. }
  863. /**
  864. * i40e_pci_sriov_configure
  865. * @pdev: pointer to a pci_dev structure
  866. * @num_vfs: number of vfs to allocate
  867. *
  868. * Enable or change the number of VFs. Called when the user updates the number
  869. * of VFs in sysfs.
  870. **/
  871. int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
  872. {
  873. struct i40e_pf *pf = pci_get_drvdata(pdev);
  874. if (num_vfs)
  875. return i40e_pci_sriov_enable(pdev, num_vfs);
  876. i40e_free_vfs(pf);
  877. return 0;
  878. }
  879. /***********************virtual channel routines******************/
  880. /**
  881. * i40e_vc_send_msg_to_vf
  882. * @vf: pointer to the vf info
  883. * @v_opcode: virtual channel opcode
  884. * @v_retval: virtual channel return value
  885. * @msg: pointer to the msg buffer
  886. * @msglen: msg length
  887. *
  888. * send msg to vf
  889. **/
  890. static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
  891. u32 v_retval, u8 *msg, u16 msglen)
  892. {
  893. struct i40e_pf *pf = vf->pf;
  894. struct i40e_hw *hw = &pf->hw;
  895. i40e_status aq_ret;
  896. /* single place to detect unsuccessful return values */
  897. if (v_retval) {
  898. vf->num_invalid_msgs++;
  899. dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n",
  900. v_opcode, v_retval);
  901. if (vf->num_invalid_msgs >
  902. I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
  903. dev_err(&pf->pdev->dev,
  904. "Number of invalid messages exceeded for VF %d\n",
  905. vf->vf_id);
  906. dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
  907. set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
  908. }
  909. } else {
  910. vf->num_valid_msgs++;
  911. }
  912. aq_ret = i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
  913. msg, msglen, NULL);
  914. if (aq_ret) {
  915. dev_err(&pf->pdev->dev,
  916. "Unable to send the message to VF %d aq_err %d\n",
  917. vf->vf_id, pf->hw.aq.asq_last_status);
  918. return -EIO;
  919. }
  920. return 0;
  921. }
  922. /**
  923. * i40e_vc_send_resp_to_vf
  924. * @vf: pointer to the vf info
  925. * @opcode: operation code
  926. * @retval: return value
  927. *
  928. * send resp msg to vf
  929. **/
  930. static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
  931. enum i40e_virtchnl_ops opcode,
  932. i40e_status retval)
  933. {
  934. return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
  935. }
  936. /**
  937. * i40e_vc_get_version_msg
  938. * @vf: pointer to the vf info
  939. *
  940. * called from the vf to request the API version used by the PF
  941. **/
  942. static int i40e_vc_get_version_msg(struct i40e_vf *vf)
  943. {
  944. struct i40e_virtchnl_version_info info = {
  945. I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
  946. };
  947. return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
  948. I40E_SUCCESS, (u8 *)&info,
  949. sizeof(struct
  950. i40e_virtchnl_version_info));
  951. }
  952. /**
  953. * i40e_vc_get_vf_resources_msg
  954. * @vf: pointer to the vf info
  955. * @msg: pointer to the msg buffer
  956. * @msglen: msg length
  957. *
  958. * called from the vf to request its resources
  959. **/
  960. static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
  961. {
  962. struct i40e_virtchnl_vf_resource *vfres = NULL;
  963. struct i40e_pf *pf = vf->pf;
  964. i40e_status aq_ret = 0;
  965. struct i40e_vsi *vsi;
  966. int i = 0, len = 0;
  967. int num_vsis = 1;
  968. int ret;
  969. if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
  970. aq_ret = I40E_ERR_PARAM;
  971. goto err;
  972. }
  973. len = (sizeof(struct i40e_virtchnl_vf_resource) +
  974. sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis);
  975. vfres = kzalloc(len, GFP_KERNEL);
  976. if (!vfres) {
  977. aq_ret = I40E_ERR_NO_MEMORY;
  978. len = 0;
  979. goto err;
  980. }
  981. vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
  982. vsi = pf->vsi[vf->lan_vsi_index];
  983. if (!vsi->info.pvid)
  984. vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
  985. vfres->num_vsis = num_vsis;
  986. vfres->num_queue_pairs = vf->num_queue_pairs;
  987. vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
  988. if (vf->lan_vsi_index) {
  989. vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
  990. vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
  991. vfres->vsi_res[i].num_queue_pairs =
  992. pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
  993. memcpy(vfres->vsi_res[i].default_mac_addr,
  994. vf->default_lan_addr.addr, ETH_ALEN);
  995. i++;
  996. }
  997. set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
  998. err:
  999. /* send the response back to the vf */
  1000. ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
  1001. aq_ret, (u8 *)vfres, len);
  1002. kfree(vfres);
  1003. return ret;
  1004. }
  1005. /**
  1006. * i40e_vc_reset_vf_msg
  1007. * @vf: pointer to the vf info
  1008. * @msg: pointer to the msg buffer
  1009. * @msglen: msg length
  1010. *
  1011. * called from the vf to reset itself,
  1012. * unlike other virtchnl messages, pf driver
  1013. * doesn't send the response back to the vf
  1014. **/
  1015. static int i40e_vc_reset_vf_msg(struct i40e_vf *vf)
  1016. {
  1017. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
  1018. return -ENOENT;
  1019. return i40e_reset_vf(vf, false);
  1020. }
  1021. /**
  1022. * i40e_vc_config_promiscuous_mode_msg
  1023. * @vf: pointer to the vf info
  1024. * @msg: pointer to the msg buffer
  1025. * @msglen: msg length
  1026. *
  1027. * called from the vf to configure the promiscuous mode of
  1028. * vf vsis
  1029. **/
  1030. static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
  1031. u8 *msg, u16 msglen)
  1032. {
  1033. struct i40e_virtchnl_promisc_info *info =
  1034. (struct i40e_virtchnl_promisc_info *)msg;
  1035. struct i40e_pf *pf = vf->pf;
  1036. struct i40e_hw *hw = &pf->hw;
  1037. bool allmulti = false;
  1038. bool promisc = false;
  1039. i40e_status aq_ret;
  1040. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
  1041. !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
  1042. !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
  1043. (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) {
  1044. aq_ret = I40E_ERR_PARAM;
  1045. goto error_param;
  1046. }
  1047. if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
  1048. promisc = true;
  1049. aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, info->vsi_id,
  1050. promisc, NULL);
  1051. if (aq_ret)
  1052. goto error_param;
  1053. if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
  1054. allmulti = true;
  1055. aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, info->vsi_id,
  1056. allmulti, NULL);
  1057. error_param:
  1058. /* send the response to the vf */
  1059. return i40e_vc_send_resp_to_vf(vf,
  1060. I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
  1061. aq_ret);
  1062. }
  1063. /**
  1064. * i40e_vc_config_queues_msg
  1065. * @vf: pointer to the vf info
  1066. * @msg: pointer to the msg buffer
  1067. * @msglen: msg length
  1068. *
  1069. * called from the vf to configure the rx/tx
  1070. * queues
  1071. **/
  1072. static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1073. {
  1074. struct i40e_virtchnl_vsi_queue_config_info *qci =
  1075. (struct i40e_virtchnl_vsi_queue_config_info *)msg;
  1076. struct i40e_virtchnl_queue_pair_info *qpi;
  1077. u16 vsi_id, vsi_queue_id;
  1078. i40e_status aq_ret = 0;
  1079. int i;
  1080. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
  1081. aq_ret = I40E_ERR_PARAM;
  1082. goto error_param;
  1083. }
  1084. vsi_id = qci->vsi_id;
  1085. if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
  1086. aq_ret = I40E_ERR_PARAM;
  1087. goto error_param;
  1088. }
  1089. for (i = 0; i < qci->num_queue_pairs; i++) {
  1090. qpi = &qci->qpair[i];
  1091. vsi_queue_id = qpi->txq.queue_id;
  1092. if ((qpi->txq.vsi_id != vsi_id) ||
  1093. (qpi->rxq.vsi_id != vsi_id) ||
  1094. (qpi->rxq.queue_id != vsi_queue_id) ||
  1095. !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
  1096. aq_ret = I40E_ERR_PARAM;
  1097. goto error_param;
  1098. }
  1099. if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
  1100. &qpi->rxq) ||
  1101. i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
  1102. &qpi->txq)) {
  1103. aq_ret = I40E_ERR_PARAM;
  1104. goto error_param;
  1105. }
  1106. }
  1107. error_param:
  1108. /* send the response to the vf */
  1109. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
  1110. aq_ret);
  1111. }
  1112. /**
  1113. * i40e_vc_config_irq_map_msg
  1114. * @vf: pointer to the vf info
  1115. * @msg: pointer to the msg buffer
  1116. * @msglen: msg length
  1117. *
  1118. * called from the vf to configure the irq to
  1119. * queue map
  1120. **/
  1121. static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1122. {
  1123. struct i40e_virtchnl_irq_map_info *irqmap_info =
  1124. (struct i40e_virtchnl_irq_map_info *)msg;
  1125. struct i40e_virtchnl_vector_map *map;
  1126. u16 vsi_id, vsi_queue_id, vector_id;
  1127. i40e_status aq_ret = 0;
  1128. unsigned long tempmap;
  1129. int i;
  1130. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
  1131. aq_ret = I40E_ERR_PARAM;
  1132. goto error_param;
  1133. }
  1134. for (i = 0; i < irqmap_info->num_vectors; i++) {
  1135. map = &irqmap_info->vecmap[i];
  1136. vector_id = map->vector_id;
  1137. vsi_id = map->vsi_id;
  1138. /* validate msg params */
  1139. if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
  1140. !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
  1141. aq_ret = I40E_ERR_PARAM;
  1142. goto error_param;
  1143. }
  1144. /* lookout for the invalid queue index */
  1145. tempmap = map->rxq_map;
  1146. vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  1147. while (vsi_queue_id < I40E_MAX_VSI_QP) {
  1148. if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
  1149. vsi_queue_id)) {
  1150. aq_ret = I40E_ERR_PARAM;
  1151. goto error_param;
  1152. }
  1153. vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
  1154. vsi_queue_id + 1);
  1155. }
  1156. tempmap = map->txq_map;
  1157. vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  1158. while (vsi_queue_id < I40E_MAX_VSI_QP) {
  1159. if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
  1160. vsi_queue_id)) {
  1161. aq_ret = I40E_ERR_PARAM;
  1162. goto error_param;
  1163. }
  1164. vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
  1165. vsi_queue_id + 1);
  1166. }
  1167. i40e_config_irq_link_list(vf, vsi_id, map);
  1168. }
  1169. error_param:
  1170. /* send the response to the vf */
  1171. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
  1172. aq_ret);
  1173. }
  1174. /**
  1175. * i40e_vc_enable_queues_msg
  1176. * @vf: pointer to the vf info
  1177. * @msg: pointer to the msg buffer
  1178. * @msglen: msg length
  1179. *
  1180. * called from the vf to enable all or specific queue(s)
  1181. **/
  1182. static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1183. {
  1184. struct i40e_virtchnl_queue_select *vqs =
  1185. (struct i40e_virtchnl_queue_select *)msg;
  1186. struct i40e_pf *pf = vf->pf;
  1187. u16 vsi_id = vqs->vsi_id;
  1188. i40e_status aq_ret = 0;
  1189. unsigned long tempmap;
  1190. u16 queue_id;
  1191. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
  1192. aq_ret = I40E_ERR_PARAM;
  1193. goto error_param;
  1194. }
  1195. if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
  1196. aq_ret = I40E_ERR_PARAM;
  1197. goto error_param;
  1198. }
  1199. if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
  1200. aq_ret = I40E_ERR_PARAM;
  1201. goto error_param;
  1202. }
  1203. tempmap = vqs->rx_queues;
  1204. queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  1205. while (queue_id < I40E_MAX_VSI_QP) {
  1206. if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
  1207. aq_ret = I40E_ERR_PARAM;
  1208. goto error_param;
  1209. }
  1210. i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
  1211. I40E_QUEUE_CTRL_ENABLE);
  1212. queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
  1213. queue_id + 1);
  1214. }
  1215. tempmap = vqs->tx_queues;
  1216. queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  1217. while (queue_id < I40E_MAX_VSI_QP) {
  1218. if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
  1219. aq_ret = I40E_ERR_PARAM;
  1220. goto error_param;
  1221. }
  1222. i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
  1223. I40E_QUEUE_CTRL_ENABLE);
  1224. queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
  1225. queue_id + 1);
  1226. }
  1227. /* Poll the status register to make sure that the
  1228. * requested op was completed successfully
  1229. */
  1230. udelay(10);
  1231. tempmap = vqs->rx_queues;
  1232. queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  1233. while (queue_id < I40E_MAX_VSI_QP) {
  1234. if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
  1235. I40E_QUEUE_CTRL_ENABLECHECK)) {
  1236. dev_err(&pf->pdev->dev,
  1237. "Queue control check failed on RX queue %d of VSI %d VF %d\n",
  1238. queue_id, vsi_id, vf->vf_id);
  1239. }
  1240. queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
  1241. queue_id + 1);
  1242. }
  1243. tempmap = vqs->tx_queues;
  1244. queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  1245. while (queue_id < I40E_MAX_VSI_QP) {
  1246. if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
  1247. I40E_QUEUE_CTRL_ENABLECHECK)) {
  1248. dev_err(&pf->pdev->dev,
  1249. "Queue control check failed on TX queue %d of VSI %d VF %d\n",
  1250. queue_id, vsi_id, vf->vf_id);
  1251. }
  1252. queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
  1253. queue_id + 1);
  1254. }
  1255. error_param:
  1256. /* send the response to the vf */
  1257. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
  1258. aq_ret);
  1259. }
  1260. /**
  1261. * i40e_vc_disable_queues_msg
  1262. * @vf: pointer to the vf info
  1263. * @msg: pointer to the msg buffer
  1264. * @msglen: msg length
  1265. *
  1266. * called from the vf to disable all or specific
  1267. * queue(s)
  1268. **/
  1269. static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1270. {
  1271. struct i40e_virtchnl_queue_select *vqs =
  1272. (struct i40e_virtchnl_queue_select *)msg;
  1273. struct i40e_pf *pf = vf->pf;
  1274. u16 vsi_id = vqs->vsi_id;
  1275. i40e_status aq_ret = 0;
  1276. unsigned long tempmap;
  1277. u16 queue_id;
  1278. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
  1279. aq_ret = I40E_ERR_PARAM;
  1280. goto error_param;
  1281. }
  1282. if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
  1283. aq_ret = I40E_ERR_PARAM;
  1284. goto error_param;
  1285. }
  1286. if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
  1287. aq_ret = I40E_ERR_PARAM;
  1288. goto error_param;
  1289. }
  1290. tempmap = vqs->rx_queues;
  1291. queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  1292. while (queue_id < I40E_MAX_VSI_QP) {
  1293. if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
  1294. aq_ret = I40E_ERR_PARAM;
  1295. goto error_param;
  1296. }
  1297. i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
  1298. I40E_QUEUE_CTRL_DISABLE);
  1299. queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
  1300. queue_id + 1);
  1301. }
  1302. tempmap = vqs->tx_queues;
  1303. queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  1304. while (queue_id < I40E_MAX_VSI_QP) {
  1305. if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
  1306. aq_ret = I40E_ERR_PARAM;
  1307. goto error_param;
  1308. }
  1309. i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
  1310. I40E_QUEUE_CTRL_DISABLE);
  1311. queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
  1312. queue_id + 1);
  1313. }
  1314. /* Poll the status register to make sure that the
  1315. * requested op was completed successfully
  1316. */
  1317. udelay(10);
  1318. tempmap = vqs->rx_queues;
  1319. queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  1320. while (queue_id < I40E_MAX_VSI_QP) {
  1321. if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
  1322. I40E_QUEUE_CTRL_DISABLECHECK)) {
  1323. dev_err(&pf->pdev->dev,
  1324. "Queue control check failed on RX queue %d of VSI %d VF %d\n",
  1325. queue_id, vsi_id, vf->vf_id);
  1326. }
  1327. queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
  1328. queue_id + 1);
  1329. }
  1330. tempmap = vqs->tx_queues;
  1331. queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  1332. while (queue_id < I40E_MAX_VSI_QP) {
  1333. if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
  1334. I40E_QUEUE_CTRL_DISABLECHECK)) {
  1335. dev_err(&pf->pdev->dev,
  1336. "Queue control check failed on TX queue %d of VSI %d VF %d\n",
  1337. queue_id, vsi_id, vf->vf_id);
  1338. }
  1339. queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
  1340. queue_id + 1);
  1341. }
  1342. error_param:
  1343. /* send the response to the vf */
  1344. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
  1345. aq_ret);
  1346. }
  1347. /**
  1348. * i40e_vc_get_stats_msg
  1349. * @vf: pointer to the vf info
  1350. * @msg: pointer to the msg buffer
  1351. * @msglen: msg length
  1352. *
  1353. * called from the vf to get vsi stats
  1354. **/
  1355. static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1356. {
  1357. struct i40e_virtchnl_queue_select *vqs =
  1358. (struct i40e_virtchnl_queue_select *)msg;
  1359. struct i40e_pf *pf = vf->pf;
  1360. struct i40e_eth_stats stats;
  1361. i40e_status aq_ret = 0;
  1362. struct i40e_vsi *vsi;
  1363. memset(&stats, 0, sizeof(struct i40e_eth_stats));
  1364. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
  1365. aq_ret = I40E_ERR_PARAM;
  1366. goto error_param;
  1367. }
  1368. if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
  1369. aq_ret = I40E_ERR_PARAM;
  1370. goto error_param;
  1371. }
  1372. vsi = pf->vsi[vqs->vsi_id];
  1373. if (!vsi) {
  1374. aq_ret = I40E_ERR_PARAM;
  1375. goto error_param;
  1376. }
  1377. i40e_update_eth_stats(vsi);
  1378. memcpy(&stats, &vsi->eth_stats, sizeof(struct i40e_eth_stats));
  1379. error_param:
  1380. /* send the response back to the vf */
  1381. return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
  1382. (u8 *)&stats, sizeof(stats));
  1383. }
  1384. /**
  1385. * i40e_vc_add_mac_addr_msg
  1386. * @vf: pointer to the vf info
  1387. * @msg: pointer to the msg buffer
  1388. * @msglen: msg length
  1389. *
  1390. * add guest mac address filter
  1391. **/
  1392. static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1393. {
  1394. struct i40e_virtchnl_ether_addr_list *al =
  1395. (struct i40e_virtchnl_ether_addr_list *)msg;
  1396. struct i40e_pf *pf = vf->pf;
  1397. struct i40e_vsi *vsi = NULL;
  1398. u16 vsi_id = al->vsi_id;
  1399. i40e_status aq_ret = 0;
  1400. int i;
  1401. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
  1402. !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
  1403. !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
  1404. aq_ret = I40E_ERR_PARAM;
  1405. goto error_param;
  1406. }
  1407. for (i = 0; i < al->num_elements; i++) {
  1408. if (is_broadcast_ether_addr(al->list[i].addr) ||
  1409. is_zero_ether_addr(al->list[i].addr)) {
  1410. dev_err(&pf->pdev->dev, "invalid VF MAC addr %pMAC\n",
  1411. al->list[i].addr);
  1412. aq_ret = I40E_ERR_PARAM;
  1413. goto error_param;
  1414. }
  1415. }
  1416. vsi = pf->vsi[vsi_id];
  1417. /* add new addresses to the list */
  1418. for (i = 0; i < al->num_elements; i++) {
  1419. struct i40e_mac_filter *f;
  1420. f = i40e_find_mac(vsi, al->list[i].addr, true, false);
  1421. if (!f) {
  1422. if (i40e_is_vsi_in_vlan(vsi))
  1423. f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
  1424. true, false);
  1425. else
  1426. f = i40e_add_filter(vsi, al->list[i].addr, -1,
  1427. true, false);
  1428. }
  1429. if (!f) {
  1430. dev_err(&pf->pdev->dev,
  1431. "Unable to add VF MAC filter\n");
  1432. aq_ret = I40E_ERR_PARAM;
  1433. goto error_param;
  1434. }
  1435. }
  1436. /* program the updated filter list */
  1437. if (i40e_sync_vsi_filters(vsi))
  1438. dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
  1439. error_param:
  1440. /* send the response to the vf */
  1441. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
  1442. aq_ret);
  1443. }
  1444. /**
  1445. * i40e_vc_del_mac_addr_msg
  1446. * @vf: pointer to the vf info
  1447. * @msg: pointer to the msg buffer
  1448. * @msglen: msg length
  1449. *
  1450. * remove guest mac address filter
  1451. **/
  1452. static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1453. {
  1454. struct i40e_virtchnl_ether_addr_list *al =
  1455. (struct i40e_virtchnl_ether_addr_list *)msg;
  1456. struct i40e_pf *pf = vf->pf;
  1457. struct i40e_vsi *vsi = NULL;
  1458. u16 vsi_id = al->vsi_id;
  1459. i40e_status aq_ret = 0;
  1460. int i;
  1461. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
  1462. !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
  1463. !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
  1464. aq_ret = I40E_ERR_PARAM;
  1465. goto error_param;
  1466. }
  1467. vsi = pf->vsi[vsi_id];
  1468. /* delete addresses from the list */
  1469. for (i = 0; i < al->num_elements; i++)
  1470. i40e_del_filter(vsi, al->list[i].addr,
  1471. I40E_VLAN_ANY, true, false);
  1472. /* program the updated filter list */
  1473. if (i40e_sync_vsi_filters(vsi))
  1474. dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
  1475. error_param:
  1476. /* send the response to the vf */
  1477. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
  1478. aq_ret);
  1479. }
  1480. /**
  1481. * i40e_vc_add_vlan_msg
  1482. * @vf: pointer to the vf info
  1483. * @msg: pointer to the msg buffer
  1484. * @msglen: msg length
  1485. *
  1486. * program guest vlan id
  1487. **/
  1488. static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1489. {
  1490. struct i40e_virtchnl_vlan_filter_list *vfl =
  1491. (struct i40e_virtchnl_vlan_filter_list *)msg;
  1492. struct i40e_pf *pf = vf->pf;
  1493. struct i40e_vsi *vsi = NULL;
  1494. u16 vsi_id = vfl->vsi_id;
  1495. i40e_status aq_ret = 0;
  1496. int i;
  1497. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
  1498. !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
  1499. !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
  1500. aq_ret = I40E_ERR_PARAM;
  1501. goto error_param;
  1502. }
  1503. for (i = 0; i < vfl->num_elements; i++) {
  1504. if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
  1505. aq_ret = I40E_ERR_PARAM;
  1506. dev_err(&pf->pdev->dev,
  1507. "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
  1508. goto error_param;
  1509. }
  1510. }
  1511. vsi = pf->vsi[vsi_id];
  1512. if (vsi->info.pvid) {
  1513. aq_ret = I40E_ERR_PARAM;
  1514. goto error_param;
  1515. }
  1516. i40e_vlan_stripping_enable(vsi);
  1517. for (i = 0; i < vfl->num_elements; i++) {
  1518. /* add new VLAN filter */
  1519. int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
  1520. if (ret)
  1521. dev_err(&pf->pdev->dev,
  1522. "Unable to add VF vlan filter %d, error %d\n",
  1523. vfl->vlan_id[i], ret);
  1524. }
  1525. error_param:
  1526. /* send the response to the vf */
  1527. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
  1528. }
  1529. /**
  1530. * i40e_vc_remove_vlan_msg
  1531. * @vf: pointer to the vf info
  1532. * @msg: pointer to the msg buffer
  1533. * @msglen: msg length
  1534. *
  1535. * remove programmed guest vlan id
  1536. **/
  1537. static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1538. {
  1539. struct i40e_virtchnl_vlan_filter_list *vfl =
  1540. (struct i40e_virtchnl_vlan_filter_list *)msg;
  1541. struct i40e_pf *pf = vf->pf;
  1542. struct i40e_vsi *vsi = NULL;
  1543. u16 vsi_id = vfl->vsi_id;
  1544. i40e_status aq_ret = 0;
  1545. int i;
  1546. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
  1547. !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
  1548. !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
  1549. aq_ret = I40E_ERR_PARAM;
  1550. goto error_param;
  1551. }
  1552. for (i = 0; i < vfl->num_elements; i++) {
  1553. if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
  1554. aq_ret = I40E_ERR_PARAM;
  1555. goto error_param;
  1556. }
  1557. }
  1558. vsi = pf->vsi[vsi_id];
  1559. if (vsi->info.pvid) {
  1560. aq_ret = I40E_ERR_PARAM;
  1561. goto error_param;
  1562. }
  1563. for (i = 0; i < vfl->num_elements; i++) {
  1564. int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
  1565. if (ret)
  1566. dev_err(&pf->pdev->dev,
  1567. "Unable to delete VF vlan filter %d, error %d\n",
  1568. vfl->vlan_id[i], ret);
  1569. }
  1570. error_param:
  1571. /* send the response to the vf */
  1572. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
  1573. }
  1574. /**
  1575. * i40e_vc_validate_vf_msg
  1576. * @vf: pointer to the vf info
  1577. * @msg: pointer to the msg buffer
  1578. * @msglen: msg length
  1579. * @msghndl: msg handle
  1580. *
  1581. * validate msg
  1582. **/
  1583. static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
  1584. u32 v_retval, u8 *msg, u16 msglen)
  1585. {
  1586. bool err_msg_format = false;
  1587. int valid_len;
  1588. /* Check if VF is disabled. */
  1589. if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
  1590. return I40E_ERR_PARAM;
  1591. /* Validate message length. */
  1592. switch (v_opcode) {
  1593. case I40E_VIRTCHNL_OP_VERSION:
  1594. valid_len = sizeof(struct i40e_virtchnl_version_info);
  1595. break;
  1596. case I40E_VIRTCHNL_OP_RESET_VF:
  1597. case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
  1598. valid_len = 0;
  1599. break;
  1600. case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
  1601. valid_len = sizeof(struct i40e_virtchnl_txq_info);
  1602. break;
  1603. case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
  1604. valid_len = sizeof(struct i40e_virtchnl_rxq_info);
  1605. break;
  1606. case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
  1607. valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
  1608. if (msglen >= valid_len) {
  1609. struct i40e_virtchnl_vsi_queue_config_info *vqc =
  1610. (struct i40e_virtchnl_vsi_queue_config_info *)msg;
  1611. valid_len += (vqc->num_queue_pairs *
  1612. sizeof(struct
  1613. i40e_virtchnl_queue_pair_info));
  1614. if (vqc->num_queue_pairs == 0)
  1615. err_msg_format = true;
  1616. }
  1617. break;
  1618. case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
  1619. valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
  1620. if (msglen >= valid_len) {
  1621. struct i40e_virtchnl_irq_map_info *vimi =
  1622. (struct i40e_virtchnl_irq_map_info *)msg;
  1623. valid_len += (vimi->num_vectors *
  1624. sizeof(struct i40e_virtchnl_vector_map));
  1625. if (vimi->num_vectors == 0)
  1626. err_msg_format = true;
  1627. }
  1628. break;
  1629. case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
  1630. case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
  1631. valid_len = sizeof(struct i40e_virtchnl_queue_select);
  1632. break;
  1633. case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
  1634. case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
  1635. valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
  1636. if (msglen >= valid_len) {
  1637. struct i40e_virtchnl_ether_addr_list *veal =
  1638. (struct i40e_virtchnl_ether_addr_list *)msg;
  1639. valid_len += veal->num_elements *
  1640. sizeof(struct i40e_virtchnl_ether_addr);
  1641. if (veal->num_elements == 0)
  1642. err_msg_format = true;
  1643. }
  1644. break;
  1645. case I40E_VIRTCHNL_OP_ADD_VLAN:
  1646. case I40E_VIRTCHNL_OP_DEL_VLAN:
  1647. valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
  1648. if (msglen >= valid_len) {
  1649. struct i40e_virtchnl_vlan_filter_list *vfl =
  1650. (struct i40e_virtchnl_vlan_filter_list *)msg;
  1651. valid_len += vfl->num_elements * sizeof(u16);
  1652. if (vfl->num_elements == 0)
  1653. err_msg_format = true;
  1654. }
  1655. break;
  1656. case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
  1657. valid_len = sizeof(struct i40e_virtchnl_promisc_info);
  1658. break;
  1659. case I40E_VIRTCHNL_OP_GET_STATS:
  1660. valid_len = sizeof(struct i40e_virtchnl_queue_select);
  1661. break;
  1662. /* These are always errors coming from the VF. */
  1663. case I40E_VIRTCHNL_OP_EVENT:
  1664. case I40E_VIRTCHNL_OP_UNKNOWN:
  1665. default:
  1666. return -EPERM;
  1667. break;
  1668. }
  1669. /* few more checks */
  1670. if ((valid_len != msglen) || (err_msg_format)) {
  1671. i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
  1672. return -EINVAL;
  1673. } else {
  1674. return 0;
  1675. }
  1676. }
  1677. /**
  1678. * i40e_vc_process_vf_msg
  1679. * @pf: pointer to the pf structure
  1680. * @vf_id: source vf id
  1681. * @msg: pointer to the msg buffer
  1682. * @msglen: msg length
  1683. * @msghndl: msg handle
  1684. *
  1685. * called from the common aeq/arq handler to
  1686. * process request from vf
  1687. **/
  1688. int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
  1689. u32 v_retval, u8 *msg, u16 msglen)
  1690. {
  1691. struct i40e_vf *vf = &(pf->vf[vf_id]);
  1692. struct i40e_hw *hw = &pf->hw;
  1693. int ret;
  1694. pf->vf_aq_requests++;
  1695. /* perform basic checks on the msg */
  1696. ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
  1697. if (ret) {
  1698. dev_err(&pf->pdev->dev, "invalid message from vf %d\n", vf_id);
  1699. return ret;
  1700. }
  1701. wr32(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
  1702. switch (v_opcode) {
  1703. case I40E_VIRTCHNL_OP_VERSION:
  1704. ret = i40e_vc_get_version_msg(vf);
  1705. break;
  1706. case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
  1707. ret = i40e_vc_get_vf_resources_msg(vf);
  1708. break;
  1709. case I40E_VIRTCHNL_OP_RESET_VF:
  1710. ret = i40e_vc_reset_vf_msg(vf);
  1711. break;
  1712. case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
  1713. ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
  1714. break;
  1715. case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
  1716. ret = i40e_vc_config_queues_msg(vf, msg, msglen);
  1717. break;
  1718. case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
  1719. ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
  1720. break;
  1721. case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
  1722. ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
  1723. break;
  1724. case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
  1725. ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
  1726. break;
  1727. case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
  1728. ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
  1729. break;
  1730. case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
  1731. ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
  1732. break;
  1733. case I40E_VIRTCHNL_OP_ADD_VLAN:
  1734. ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
  1735. break;
  1736. case I40E_VIRTCHNL_OP_DEL_VLAN:
  1737. ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
  1738. break;
  1739. case I40E_VIRTCHNL_OP_GET_STATS:
  1740. ret = i40e_vc_get_stats_msg(vf, msg, msglen);
  1741. break;
  1742. case I40E_VIRTCHNL_OP_UNKNOWN:
  1743. default:
  1744. dev_err(&pf->pdev->dev,
  1745. "Unsupported opcode %d from vf %d\n", v_opcode, vf_id);
  1746. ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
  1747. I40E_ERR_NOT_IMPLEMENTED);
  1748. break;
  1749. }
  1750. return ret;
  1751. }
  1752. /**
  1753. * i40e_vc_process_vflr_event
  1754. * @pf: pointer to the pf structure
  1755. *
  1756. * called from the vlfr irq handler to
  1757. * free up vf resources and state variables
  1758. **/
  1759. int i40e_vc_process_vflr_event(struct i40e_pf *pf)
  1760. {
  1761. u32 reg, reg_idx, bit_idx, vf_id;
  1762. struct i40e_hw *hw = &pf->hw;
  1763. struct i40e_vf *vf;
  1764. if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
  1765. return 0;
  1766. clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
  1767. for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
  1768. reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
  1769. bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
  1770. /* read GLGEN_VFLRSTAT register to find out the flr vfs */
  1771. vf = &pf->vf[vf_id];
  1772. reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
  1773. if (reg & (1 << bit_idx)) {
  1774. /* clear the bit in GLGEN_VFLRSTAT */
  1775. wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
  1776. if (i40e_reset_vf(vf, true))
  1777. dev_err(&pf->pdev->dev,
  1778. "Unable to reset the VF %d\n", vf_id);
  1779. /* free up vf resources to destroy vsi state */
  1780. i40e_free_vf_res(vf);
  1781. /* allocate new vf resources with the default state */
  1782. if (i40e_alloc_vf_res(vf))
  1783. dev_err(&pf->pdev->dev,
  1784. "Unable to allocate VF resources %d\n",
  1785. vf_id);
  1786. i40e_enable_vf_mappings(vf);
  1787. }
  1788. }
  1789. /* re-enable vflr interrupt cause */
  1790. reg = rd32(hw, I40E_PFINT_ICR0_ENA);
  1791. reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
  1792. wr32(hw, I40E_PFINT_ICR0_ENA, reg);
  1793. i40e_flush(hw);
  1794. return 0;
  1795. }
  1796. /**
  1797. * i40e_vc_vf_broadcast
  1798. * @pf: pointer to the pf structure
  1799. * @opcode: operation code
  1800. * @retval: return value
  1801. * @msg: pointer to the msg buffer
  1802. * @msglen: msg length
  1803. *
  1804. * send a message to all VFs on a given PF
  1805. **/
  1806. static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
  1807. enum i40e_virtchnl_ops v_opcode,
  1808. i40e_status v_retval, u8 *msg,
  1809. u16 msglen)
  1810. {
  1811. struct i40e_hw *hw = &pf->hw;
  1812. struct i40e_vf *vf = pf->vf;
  1813. int i;
  1814. for (i = 0; i < pf->num_alloc_vfs; i++) {
  1815. /* Ignore return value on purpose - a given VF may fail, but
  1816. * we need to keep going and send to all of them
  1817. */
  1818. i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
  1819. msg, msglen, NULL);
  1820. vf++;
  1821. }
  1822. }
  1823. /**
  1824. * i40e_vc_notify_link_state
  1825. * @pf: pointer to the pf structure
  1826. *
  1827. * send a link status message to all VFs on a given PF
  1828. **/
  1829. void i40e_vc_notify_link_state(struct i40e_pf *pf)
  1830. {
  1831. struct i40e_virtchnl_pf_event pfe;
  1832. pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
  1833. pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
  1834. pfe.event_data.link_event.link_status =
  1835. pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
  1836. pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed;
  1837. i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
  1838. (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
  1839. }
  1840. /**
  1841. * i40e_vc_notify_reset
  1842. * @pf: pointer to the pf structure
  1843. *
  1844. * indicate a pending reset to all VFs on a given PF
  1845. **/
  1846. void i40e_vc_notify_reset(struct i40e_pf *pf)
  1847. {
  1848. struct i40e_virtchnl_pf_event pfe;
  1849. pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
  1850. pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
  1851. i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
  1852. (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
  1853. }
  1854. /**
  1855. * i40e_vc_notify_vf_reset
  1856. * @vf: pointer to the vf structure
  1857. *
  1858. * indicate a pending reset to the given VF
  1859. **/
  1860. void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
  1861. {
  1862. struct i40e_virtchnl_pf_event pfe;
  1863. pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
  1864. pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
  1865. i40e_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
  1866. I40E_SUCCESS, (u8 *)&pfe,
  1867. sizeof(struct i40e_virtchnl_pf_event), NULL);
  1868. }
  1869. /**
  1870. * i40e_ndo_set_vf_mac
  1871. * @netdev: network interface device structure
  1872. * @vf_id: vf identifier
  1873. * @mac: mac address
  1874. *
  1875. * program vf mac address
  1876. **/
  1877. int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
  1878. {
  1879. struct i40e_netdev_priv *np = netdev_priv(netdev);
  1880. struct i40e_vsi *vsi = np->vsi;
  1881. struct i40e_pf *pf = vsi->back;
  1882. struct i40e_mac_filter *f;
  1883. struct i40e_vf *vf;
  1884. int ret = 0;
  1885. /* validate the request */
  1886. if (vf_id >= pf->num_alloc_vfs) {
  1887. dev_err(&pf->pdev->dev,
  1888. "Invalid VF Identifier %d\n", vf_id);
  1889. ret = -EINVAL;
  1890. goto error_param;
  1891. }
  1892. vf = &(pf->vf[vf_id]);
  1893. vsi = pf->vsi[vf->lan_vsi_index];
  1894. if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
  1895. dev_err(&pf->pdev->dev,
  1896. "Uninitialized VF %d\n", vf_id);
  1897. ret = -EINVAL;
  1898. goto error_param;
  1899. }
  1900. if (!is_valid_ether_addr(mac)) {
  1901. dev_err(&pf->pdev->dev,
  1902. "Invalid VF ethernet address\n");
  1903. ret = -EINVAL;
  1904. goto error_param;
  1905. }
  1906. /* delete the temporary mac address */
  1907. i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false);
  1908. /* add the new mac address */
  1909. f = i40e_add_filter(vsi, mac, 0, true, false);
  1910. if (!f) {
  1911. dev_err(&pf->pdev->dev,
  1912. "Unable to add VF ucast filter\n");
  1913. ret = -ENOMEM;
  1914. goto error_param;
  1915. }
  1916. dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
  1917. /* program mac filter */
  1918. if (i40e_sync_vsi_filters(vsi)) {
  1919. dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
  1920. ret = -EIO;
  1921. goto error_param;
  1922. }
  1923. memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
  1924. dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
  1925. ret = 0;
  1926. error_param:
  1927. return ret;
  1928. }
  1929. /**
  1930. * i40e_ndo_set_vf_port_vlan
  1931. * @netdev: network interface device structure
  1932. * @vf_id: vf identifier
  1933. * @vlan_id: mac address
  1934. * @qos: priority setting
  1935. *
  1936. * program vf vlan id and/or qos
  1937. **/
  1938. int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
  1939. int vf_id, u16 vlan_id, u8 qos)
  1940. {
  1941. struct i40e_netdev_priv *np = netdev_priv(netdev);
  1942. struct i40e_pf *pf = np->vsi->back;
  1943. struct i40e_vsi *vsi;
  1944. struct i40e_vf *vf;
  1945. int ret = 0;
  1946. /* validate the request */
  1947. if (vf_id >= pf->num_alloc_vfs) {
  1948. dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
  1949. ret = -EINVAL;
  1950. goto error_pvid;
  1951. }
  1952. if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
  1953. dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
  1954. ret = -EINVAL;
  1955. goto error_pvid;
  1956. }
  1957. vf = &(pf->vf[vf_id]);
  1958. vsi = pf->vsi[vf->lan_vsi_index];
  1959. if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
  1960. dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
  1961. ret = -EINVAL;
  1962. goto error_pvid;
  1963. }
  1964. if (vsi->info.pvid) {
  1965. /* kill old VLAN */
  1966. ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
  1967. VLAN_VID_MASK));
  1968. if (ret) {
  1969. dev_info(&vsi->back->pdev->dev,
  1970. "remove VLAN failed, ret=%d, aq_err=%d\n",
  1971. ret, pf->hw.aq.asq_last_status);
  1972. }
  1973. }
  1974. if (vlan_id || qos)
  1975. ret = i40e_vsi_add_pvid(vsi,
  1976. vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
  1977. else
  1978. i40e_vlan_stripping_disable(vsi);
  1979. if (vlan_id) {
  1980. dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
  1981. vlan_id, qos, vf_id);
  1982. /* add new VLAN filter */
  1983. ret = i40e_vsi_add_vlan(vsi, vlan_id);
  1984. if (ret) {
  1985. dev_info(&vsi->back->pdev->dev,
  1986. "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
  1987. vsi->back->hw.aq.asq_last_status);
  1988. goto error_pvid;
  1989. }
  1990. }
  1991. if (ret) {
  1992. dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
  1993. goto error_pvid;
  1994. }
  1995. ret = 0;
  1996. error_pvid:
  1997. return ret;
  1998. }
  1999. /**
  2000. * i40e_ndo_set_vf_bw
  2001. * @netdev: network interface device structure
  2002. * @vf_id: vf identifier
  2003. * @tx_rate: tx rate
  2004. *
  2005. * configure vf tx rate
  2006. **/
  2007. int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
  2008. {
  2009. return -EOPNOTSUPP;
  2010. }
  2011. /**
  2012. * i40e_ndo_get_vf_config
  2013. * @netdev: network interface device structure
  2014. * @vf_id: vf identifier
  2015. * @ivi: vf configuration structure
  2016. *
  2017. * return vf configuration
  2018. **/
  2019. int i40e_ndo_get_vf_config(struct net_device *netdev,
  2020. int vf_id, struct ifla_vf_info *ivi)
  2021. {
  2022. struct i40e_netdev_priv *np = netdev_priv(netdev);
  2023. struct i40e_mac_filter *f, *ftmp;
  2024. struct i40e_vsi *vsi = np->vsi;
  2025. struct i40e_pf *pf = vsi->back;
  2026. struct i40e_vf *vf;
  2027. int ret = 0;
  2028. /* validate the request */
  2029. if (vf_id >= pf->num_alloc_vfs) {
  2030. dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
  2031. ret = -EINVAL;
  2032. goto error_param;
  2033. }
  2034. vf = &(pf->vf[vf_id]);
  2035. /* first vsi is always the LAN vsi */
  2036. vsi = pf->vsi[vf->lan_vsi_index];
  2037. if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
  2038. dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
  2039. ret = -EINVAL;
  2040. goto error_param;
  2041. }
  2042. ivi->vf = vf_id;
  2043. /* first entry of the list is the default ethernet address */
  2044. list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
  2045. memcpy(&ivi->mac, f->macaddr, I40E_ETH_LENGTH_OF_ADDRESS);
  2046. break;
  2047. }
  2048. ivi->tx_rate = 0;
  2049. ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
  2050. ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
  2051. I40E_VLAN_PRIORITY_SHIFT;
  2052. ret = 0;
  2053. error_param:
  2054. return ret;
  2055. }