ixgbe_sriov.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 1999 - 2018 Intel Corporation. */
  3. #include <linux/types.h>
  4. #include <linux/module.h>
  5. #include <linux/pci.h>
  6. #include <linux/netdevice.h>
  7. #include <linux/vmalloc.h>
  8. #include <linux/string.h>
  9. #include <linux/in.h>
  10. #include <linux/ip.h>
  11. #include <linux/tcp.h>
  12. #include <linux/ipv6.h>
  13. #include <linux/if_bridge.h>
  14. #ifdef NETIF_F_HW_VLAN_CTAG_TX
  15. #include <linux/if_vlan.h>
  16. #endif
  17. #include "ixgbe.h"
  18. #include "ixgbe_type.h"
  19. #include "ixgbe_sriov.h"
  20. #ifdef CONFIG_PCI_IOV
  21. static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
  22. unsigned int num_vfs)
  23. {
  24. struct ixgbe_hw *hw = &adapter->hw;
  25. struct vf_macvlans *mv_list;
  26. int num_vf_macvlans, i;
  27. num_vf_macvlans = hw->mac.num_rar_entries -
  28. (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs);
  29. if (!num_vf_macvlans)
  30. return;
  31. mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
  32. GFP_KERNEL);
  33. if (mv_list) {
  34. /* Initialize list of VF macvlans */
  35. INIT_LIST_HEAD(&adapter->vf_mvs.l);
  36. for (i = 0; i < num_vf_macvlans; i++) {
  37. mv_list[i].vf = -1;
  38. mv_list[i].free = true;
  39. list_add(&mv_list[i].l, &adapter->vf_mvs.l);
  40. }
  41. adapter->mv_list = mv_list;
  42. }
  43. }
  44. static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
  45. unsigned int num_vfs)
  46. {
  47. struct ixgbe_hw *hw = &adapter->hw;
  48. int i;
  49. /* Enable VMDq flag so device will be set in VM mode */
  50. adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED |
  51. IXGBE_FLAG_VMDQ_ENABLED;
  52. /* Allocate memory for per VF control structures */
  53. adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
  54. GFP_KERNEL);
  55. if (!adapter->vfinfo)
  56. return -ENOMEM;
  57. adapter->num_vfs = num_vfs;
  58. ixgbe_alloc_vf_macvlans(adapter, num_vfs);
  59. adapter->ring_feature[RING_F_VMDQ].offset = num_vfs;
  60. /* Initialize default switching mode VEB */
  61. IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
  62. adapter->bridge_mode = BRIDGE_MODE_VEB;
  63. /* limit trafffic classes based on VFs enabled */
  64. if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) {
  65. adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
  66. adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
  67. } else if (num_vfs < 32) {
  68. adapter->dcb_cfg.num_tcs.pg_tcs = 4;
  69. adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
  70. } else {
  71. adapter->dcb_cfg.num_tcs.pg_tcs = 1;
  72. adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
  73. }
  74. /* Disable RSC when in SR-IOV mode */
  75. adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
  76. IXGBE_FLAG2_RSC_ENABLED);
  77. for (i = 0; i < num_vfs; i++) {
  78. /* enable spoof checking for all VFs */
  79. adapter->vfinfo[i].spoofchk_enabled = true;
  80. /* We support VF RSS querying only for 82599 and x540
  81. * devices at the moment. These devices share RSS
  82. * indirection table and RSS hash key with PF therefore
  83. * we want to disable the querying by default.
  84. */
  85. adapter->vfinfo[i].rss_query_enabled = 0;
  86. /* Untrust all VFs */
  87. adapter->vfinfo[i].trusted = false;
  88. /* set the default xcast mode */
  89. adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE;
  90. }
  91. e_info(probe, "SR-IOV enabled with %d VFs\n", num_vfs);
  92. return 0;
  93. }
  94. /**
  95. * ixgbe_get_vfs - Find and take references to all vf devices
  96. * @adapter: Pointer to adapter struct
  97. */
  98. static void ixgbe_get_vfs(struct ixgbe_adapter *adapter)
  99. {
  100. struct pci_dev *pdev = adapter->pdev;
  101. u16 vendor = pdev->vendor;
  102. struct pci_dev *vfdev;
  103. int vf = 0;
  104. u16 vf_id;
  105. int pos;
  106. pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
  107. if (!pos)
  108. return;
  109. pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
  110. vfdev = pci_get_device(vendor, vf_id, NULL);
  111. for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) {
  112. if (!vfdev->is_virtfn)
  113. continue;
  114. if (vfdev->physfn != pdev)
  115. continue;
  116. if (vf >= adapter->num_vfs)
  117. continue;
  118. pci_dev_get(vfdev);
  119. adapter->vfinfo[vf].vfdev = vfdev;
  120. ++vf;
  121. }
  122. }
  123. /* Note this function is called when the user wants to enable SR-IOV
  124. * VFs using the now deprecated module parameter
  125. */
  126. void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
  127. {
  128. int pre_existing_vfs = 0;
  129. unsigned int num_vfs;
  130. pre_existing_vfs = pci_num_vf(adapter->pdev);
  131. if (!pre_existing_vfs && !max_vfs)
  132. return;
  133. /* If there are pre-existing VFs then we have to force
  134. * use of that many - over ride any module parameter value.
  135. * This may result from the user unloading the PF driver
  136. * while VFs were assigned to guest VMs or because the VFs
  137. * have been created via the new PCI SR-IOV sysfs interface.
  138. */
  139. if (pre_existing_vfs) {
  140. num_vfs = pre_existing_vfs;
  141. dev_warn(&adapter->pdev->dev,
  142. "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n");
  143. } else {
  144. int err;
  145. /*
  146. * The 82599 supports up to 64 VFs per physical function
  147. * but this implementation limits allocation to 63 so that
  148. * basic networking resources are still available to the
  149. * physical function. If the user requests greater than
  150. * 63 VFs then it is an error - reset to default of zero.
  151. */
  152. num_vfs = min_t(unsigned int, max_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
  153. err = pci_enable_sriov(adapter->pdev, num_vfs);
  154. if (err) {
  155. e_err(probe, "Failed to enable PCI sriov: %d\n", err);
  156. return;
  157. }
  158. }
  159. if (!__ixgbe_enable_sriov(adapter, num_vfs)) {
  160. ixgbe_get_vfs(adapter);
  161. return;
  162. }
  163. /* If we have gotten to this point then there is no memory available
  164. * to manage the VF devices - print message and bail.
  165. */
  166. e_err(probe, "Unable to allocate memory for VF Data Storage - "
  167. "SRIOV disabled\n");
  168. ixgbe_disable_sriov(adapter);
  169. }
  170. #endif /* #ifdef CONFIG_PCI_IOV */
  171. int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
  172. {
  173. unsigned int num_vfs = adapter->num_vfs, vf;
  174. int rss;
  175. /* set num VFs to 0 to prevent access to vfinfo */
  176. adapter->num_vfs = 0;
  177. /* put the reference to all of the vf devices */
  178. for (vf = 0; vf < num_vfs; ++vf) {
  179. struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
  180. if (!vfdev)
  181. continue;
  182. adapter->vfinfo[vf].vfdev = NULL;
  183. pci_dev_put(vfdev);
  184. }
  185. /* free VF control structures */
  186. kfree(adapter->vfinfo);
  187. adapter->vfinfo = NULL;
  188. /* free macvlan list */
  189. kfree(adapter->mv_list);
  190. adapter->mv_list = NULL;
  191. /* if SR-IOV is already disabled then there is nothing to do */
  192. if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  193. return 0;
  194. #ifdef CONFIG_PCI_IOV
  195. /*
  196. * If our VFs are assigned we cannot shut down SR-IOV
  197. * without causing issues, so just leave the hardware
  198. * available but disabled
  199. */
  200. if (pci_vfs_assigned(adapter->pdev)) {
  201. e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
  202. return -EPERM;
  203. }
  204. /* disable iov and allow time for transactions to clear */
  205. pci_disable_sriov(adapter->pdev);
  206. #endif
  207. /* Disable VMDq flag so device will be set in VM mode */
  208. if (bitmap_weight(adapter->fwd_bitmask, adapter->num_rx_pools) == 1) {
  209. adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
  210. adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
  211. rss = min_t(int, ixgbe_max_rss_indices(adapter),
  212. num_online_cpus());
  213. } else {
  214. rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus());
  215. }
  216. adapter->ring_feature[RING_F_VMDQ].offset = 0;
  217. adapter->ring_feature[RING_F_RSS].limit = rss;
  218. /* take a breather then clean up driver data */
  219. msleep(100);
  220. return 0;
  221. }
  222. static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
  223. {
  224. #ifdef CONFIG_PCI_IOV
  225. struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
  226. int pre_existing_vfs = pci_num_vf(dev);
  227. int err = 0, num_rx_pools, i, limit;
  228. u8 num_tc;
  229. if (pre_existing_vfs && pre_existing_vfs != num_vfs)
  230. err = ixgbe_disable_sriov(adapter);
  231. else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
  232. return num_vfs;
  233. if (err)
  234. return err;
  235. /* While the SR-IOV capability structure reports total VFs to be 64,
  236. * we limit the actual number allocated as below based on two factors.
  237. * Num_TCs MAX_VFs
  238. * 1 63
  239. * <=4 31
  240. * >4 15
  241. * First, we reserve some transmit/receive resources for the PF.
  242. * Second, VMDQ also uses the same pools that SR-IOV does. We need to
  243. * account for this, so that we don't accidentally allocate more VFs
  244. * than we have available pools. The PCI bus driver already checks for
  245. * other values out of range.
  246. */
  247. num_tc = adapter->hw_tcs;
  248. num_rx_pools = bitmap_weight(adapter->fwd_bitmask,
  249. adapter->num_rx_pools);
  250. limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC :
  251. (num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC;
  252. if (num_vfs > (limit - num_rx_pools)) {
  253. e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n",
  254. num_tc, num_rx_pools - 1, limit - num_rx_pools);
  255. return -EPERM;
  256. }
  257. err = __ixgbe_enable_sriov(adapter, num_vfs);
  258. if (err)
  259. return err;
  260. for (i = 0; i < num_vfs; i++)
  261. ixgbe_vf_configuration(dev, (i | 0x10000000));
  262. /* reset before enabling SRIOV to avoid mailbox issues */
  263. ixgbe_sriov_reinit(adapter);
  264. err = pci_enable_sriov(dev, num_vfs);
  265. if (err) {
  266. e_dev_warn("Failed to enable PCI sriov: %d\n", err);
  267. return err;
  268. }
  269. ixgbe_get_vfs(adapter);
  270. return num_vfs;
  271. #else
  272. return 0;
  273. #endif
  274. }
  275. static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
  276. {
  277. struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
  278. int err;
  279. #ifdef CONFIG_PCI_IOV
  280. u32 current_flags = adapter->flags;
  281. int prev_num_vf = pci_num_vf(dev);
  282. #endif
  283. err = ixgbe_disable_sriov(adapter);
  284. /* Only reinit if no error and state changed */
  285. #ifdef CONFIG_PCI_IOV
  286. if (!err && (current_flags != adapter->flags ||
  287. prev_num_vf != pci_num_vf(dev)))
  288. ixgbe_sriov_reinit(adapter);
  289. #endif
  290. return err;
  291. }
  292. int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
  293. {
  294. if (num_vfs == 0)
  295. return ixgbe_pci_sriov_disable(dev);
  296. else
  297. return ixgbe_pci_sriov_enable(dev, num_vfs);
  298. }
  299. static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
  300. u32 *msgbuf, u32 vf)
  301. {
  302. int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
  303. >> IXGBE_VT_MSGINFO_SHIFT;
  304. u16 *hash_list = (u16 *)&msgbuf[1];
  305. struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
  306. struct ixgbe_hw *hw = &adapter->hw;
  307. int i;
  308. u32 vector_bit;
  309. u32 vector_reg;
  310. u32 mta_reg;
  311. u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
  312. /* only so many hash values supported */
  313. entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
  314. /*
  315. * salt away the number of multi cast addresses assigned
  316. * to this VF for later use to restore when the PF multi cast
  317. * list changes
  318. */
  319. vfinfo->num_vf_mc_hashes = entries;
  320. /*
  321. * VFs are limited to using the MTA hash table for their multicast
  322. * addresses
  323. */
  324. for (i = 0; i < entries; i++) {
  325. vfinfo->vf_mc_hashes[i] = hash_list[i];
  326. }
  327. for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
  328. vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
  329. vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
  330. mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
  331. mta_reg |= BIT(vector_bit);
  332. IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
  333. }
  334. vmolr |= IXGBE_VMOLR_ROMPE;
  335. IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
  336. return 0;
  337. }
  338. #ifdef CONFIG_PCI_IOV
  339. void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
  340. {
  341. struct ixgbe_hw *hw = &adapter->hw;
  342. struct vf_data_storage *vfinfo;
  343. int i, j;
  344. u32 vector_bit;
  345. u32 vector_reg;
  346. u32 mta_reg;
  347. for (i = 0; i < adapter->num_vfs; i++) {
  348. u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i));
  349. vfinfo = &adapter->vfinfo[i];
  350. for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
  351. hw->addr_ctrl.mta_in_use++;
  352. vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
  353. vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
  354. mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
  355. mta_reg |= BIT(vector_bit);
  356. IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
  357. }
  358. if (vfinfo->num_vf_mc_hashes)
  359. vmolr |= IXGBE_VMOLR_ROMPE;
  360. else
  361. vmolr &= ~IXGBE_VMOLR_ROMPE;
  362. IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
  363. }
  364. /* Restore any VF macvlans */
  365. ixgbe_full_sync_mac_table(adapter);
  366. }
  367. #endif
  368. static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
  369. u32 vf)
  370. {
  371. struct ixgbe_hw *hw = &adapter->hw;
  372. int err;
  373. /* If VLAN overlaps with one the PF is currently monitoring make
  374. * sure that we are able to allocate a VLVF entry. This may be
  375. * redundant but it guarantees PF will maintain visibility to
  376. * the VLAN.
  377. */
  378. if (add && test_bit(vid, adapter->active_vlans)) {
  379. err = hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), true, false);
  380. if (err)
  381. return err;
  382. }
  383. err = hw->mac.ops.set_vfta(hw, vid, vf, !!add, false);
  384. if (add && !err)
  385. return err;
  386. /* If we failed to add the VF VLAN or we are removing the VF VLAN
  387. * we may need to drop the PF pool bit in order to allow us to free
  388. * up the VLVF resources.
  389. */
  390. if (test_bit(vid, adapter->active_vlans) ||
  391. (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
  392. ixgbe_update_pf_promisc_vlvf(adapter, vid);
  393. return err;
  394. }
  395. static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
  396. {
  397. struct ixgbe_hw *hw = &adapter->hw;
  398. int max_frame = msgbuf[1];
  399. u32 max_frs;
  400. /*
  401. * For 82599EB we have to keep all PFs and VFs operating with
  402. * the same max_frame value in order to avoid sending an oversize
  403. * frame to a VF. In order to guarantee this is handled correctly
  404. * for all cases we have several special exceptions to take into
  405. * account before we can enable the VF for receive
  406. */
  407. if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
  408. struct net_device *dev = adapter->netdev;
  409. int pf_max_frame = dev->mtu + ETH_HLEN;
  410. u32 reg_offset, vf_shift, vfre;
  411. s32 err = 0;
  412. #ifdef CONFIG_FCOE
  413. if (dev->features & NETIF_F_FCOE_MTU)
  414. pf_max_frame = max_t(int, pf_max_frame,
  415. IXGBE_FCOE_JUMBO_FRAME_SIZE);
  416. #endif /* CONFIG_FCOE */
  417. switch (adapter->vfinfo[vf].vf_api) {
  418. case ixgbe_mbox_api_11:
  419. case ixgbe_mbox_api_12:
  420. case ixgbe_mbox_api_13:
  421. /* Version 1.1 supports jumbo frames on VFs if PF has
  422. * jumbo frames enabled which means legacy VFs are
  423. * disabled
  424. */
  425. if (pf_max_frame > ETH_FRAME_LEN)
  426. break;
  427. /* fall through */
  428. default:
  429. /* If the PF or VF are running w/ jumbo frames enabled
  430. * we need to shut down the VF Rx path as we cannot
  431. * support jumbo frames on legacy VFs
  432. */
  433. if ((pf_max_frame > ETH_FRAME_LEN) ||
  434. (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
  435. err = -EINVAL;
  436. break;
  437. }
  438. /* determine VF receive enable location */
  439. vf_shift = vf % 32;
  440. reg_offset = vf / 32;
  441. /* enable or disable receive depending on error */
  442. vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
  443. if (err)
  444. vfre &= ~BIT(vf_shift);
  445. else
  446. vfre |= BIT(vf_shift);
  447. IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
  448. if (err) {
  449. e_err(drv, "VF max_frame %d out of range\n", max_frame);
  450. return err;
  451. }
  452. }
  453. /* MTU < 68 is an error and causes problems on some kernels */
  454. if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
  455. e_err(drv, "VF max_frame %d out of range\n", max_frame);
  456. return -EINVAL;
  457. }
  458. /* pull current max frame size from hardware */
  459. max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
  460. max_frs &= IXGBE_MHADD_MFS_MASK;
  461. max_frs >>= IXGBE_MHADD_MFS_SHIFT;
  462. if (max_frs < max_frame) {
  463. max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;
  464. IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
  465. }
  466. e_info(hw, "VF requests change max MTU to %d\n", max_frame);
  467. return 0;
  468. }
  469. static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
  470. {
  471. u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
  472. vmolr |= IXGBE_VMOLR_BAM;
  473. if (aupe)
  474. vmolr |= IXGBE_VMOLR_AUPE;
  475. else
  476. vmolr &= ~IXGBE_VMOLR_AUPE;
  477. IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
  478. }
  479. static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
  480. {
  481. struct ixgbe_hw *hw = &adapter->hw;
  482. IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
  483. }
  484. static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
  485. {
  486. struct ixgbe_hw *hw = &adapter->hw;
  487. u32 vlvfb_mask, pool_mask, i;
  488. /* create mask for VF and other pools */
  489. pool_mask = ~BIT(VMDQ_P(0) % 32);
  490. vlvfb_mask = BIT(vf % 32);
  491. /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */
  492. for (i = IXGBE_VLVF_ENTRIES; i--;) {
  493. u32 bits[2], vlvfb, vid, vfta, vlvf;
  494. u32 word = i * 2 + vf / 32;
  495. u32 mask;
  496. vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
  497. /* if our bit isn't set we can skip it */
  498. if (!(vlvfb & vlvfb_mask))
  499. continue;
  500. /* clear our bit from vlvfb */
  501. vlvfb ^= vlvfb_mask;
  502. /* create 64b mask to chedk to see if we should clear VLVF */
  503. bits[word % 2] = vlvfb;
  504. bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1));
  505. /* if other pools are present, just remove ourselves */
  506. if (bits[(VMDQ_P(0) / 32) ^ 1] ||
  507. (bits[VMDQ_P(0) / 32] & pool_mask))
  508. goto update_vlvfb;
  509. /* if PF is present, leave VFTA */
  510. if (bits[0] || bits[1])
  511. goto update_vlvf;
  512. /* if we cannot determine VLAN just remove ourselves */
  513. vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
  514. if (!vlvf)
  515. goto update_vlvfb;
  516. vid = vlvf & VLAN_VID_MASK;
  517. mask = BIT(vid % 32);
  518. /* clear bit from VFTA */
  519. vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32));
  520. if (vfta & mask)
  521. IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid / 32), vfta ^ mask);
  522. update_vlvf:
  523. /* clear POOL selection enable */
  524. IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0);
  525. if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
  526. vlvfb = 0;
  527. update_vlvfb:
  528. /* clear pool bits */
  529. IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb);
  530. }
  531. }
  532. static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
  533. int vf, int index, unsigned char *mac_addr)
  534. {
  535. struct vf_macvlans *entry;
  536. struct list_head *pos;
  537. int retval = 0;
  538. if (index <= 1) {
  539. list_for_each(pos, &adapter->vf_mvs.l) {
  540. entry = list_entry(pos, struct vf_macvlans, l);
  541. if (entry->vf == vf) {
  542. entry->vf = -1;
  543. entry->free = true;
  544. entry->is_macvlan = false;
  545. ixgbe_del_mac_filter(adapter,
  546. entry->vf_macvlan, vf);
  547. }
  548. }
  549. }
  550. /*
  551. * If index was zero then we were asked to clear the uc list
  552. * for the VF. We're done.
  553. */
  554. if (!index)
  555. return 0;
  556. entry = NULL;
  557. list_for_each(pos, &adapter->vf_mvs.l) {
  558. entry = list_entry(pos, struct vf_macvlans, l);
  559. if (entry->free)
  560. break;
  561. }
  562. /*
  563. * If we traversed the entire list and didn't find a free entry
  564. * then we're out of space on the RAR table. Also entry may
  565. * be NULL because the original memory allocation for the list
  566. * failed, which is not fatal but does mean we can't support
  567. * VF requests for MACVLAN because we couldn't allocate
  568. * memory for the list management required.
  569. */
  570. if (!entry || !entry->free)
  571. return -ENOSPC;
  572. retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
  573. if (retval < 0)
  574. return retval;
  575. entry->free = false;
  576. entry->is_macvlan = true;
  577. entry->vf = vf;
  578. memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
  579. return 0;
  580. }
  581. static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
  582. {
  583. struct ixgbe_hw *hw = &adapter->hw;
  584. struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
  585. u8 num_tcs = adapter->hw_tcs;
  586. /* remove VLAN filters beloning to this VF */
  587. ixgbe_clear_vf_vlans(adapter, vf);
  588. /* add back PF assigned VLAN or VLAN 0 */
  589. ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf);
  590. /* reset offloads to defaults */
  591. ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan);
  592. /* set outgoing tags for VFs */
  593. if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
  594. ixgbe_clear_vmvir(adapter, vf);
  595. } else {
  596. if (vfinfo->pf_qos || !num_tcs)
  597. ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
  598. vfinfo->pf_qos, vf);
  599. else
  600. ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
  601. adapter->default_up, vf);
  602. if (vfinfo->spoofchk_enabled)
  603. hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
  604. }
  605. /* reset multicast table array for vf */
  606. adapter->vfinfo[vf].num_vf_mc_hashes = 0;
  607. /* Flush and reset the mta with the new values */
  608. ixgbe_set_rx_mode(adapter->netdev);
  609. ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
  610. ixgbe_set_vf_macvlan(adapter, vf, 0, NULL);
  611. /* reset VF api back to unknown */
  612. adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
  613. }
  614. static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
  615. int vf, unsigned char *mac_addr)
  616. {
  617. s32 retval;
  618. ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
  619. retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
  620. if (retval >= 0)
  621. memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr,
  622. ETH_ALEN);
  623. else
  624. memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN);
  625. return retval;
  626. }
  627. int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
  628. {
  629. struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
  630. unsigned int vfn = (event_mask & 0x3f);
  631. bool enable = ((event_mask & 0x10000000U) != 0);
  632. if (enable)
  633. eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses);
  634. return 0;
  635. }
  636. static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf,
  637. u32 qde)
  638. {
  639. struct ixgbe_hw *hw = &adapter->hw;
  640. struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
  641. u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
  642. int i;
  643. for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) {
  644. u32 reg;
  645. /* flush previous write */
  646. IXGBE_WRITE_FLUSH(hw);
  647. /* indicate to hardware that we want to set drop enable */
  648. reg = IXGBE_QDE_WRITE | qde;
  649. reg |= i << IXGBE_QDE_IDX_SHIFT;
  650. IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
  651. }
  652. }
  653. static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
  654. {
  655. struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
  656. struct ixgbe_hw *hw = &adapter->hw;
  657. unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
  658. u32 reg, reg_offset, vf_shift;
  659. u32 msgbuf[4] = {0, 0, 0, 0};
  660. u8 *addr = (u8 *)(&msgbuf[1]);
  661. u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
  662. int i;
  663. e_info(probe, "VF Reset msg received from vf %d\n", vf);
  664. /* reset the filters for the device */
  665. ixgbe_vf_reset_event(adapter, vf);
  666. /* set vf mac address */
  667. if (!is_zero_ether_addr(vf_mac))
  668. ixgbe_set_vf_mac(adapter, vf, vf_mac);
  669. vf_shift = vf % 32;
  670. reg_offset = vf / 32;
  671. /* enable transmit for vf */
  672. reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
  673. reg |= BIT(vf_shift);
  674. IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
  675. /* force drop enable for all VF Rx queues */
  676. reg = IXGBE_QDE_ENABLE;
  677. if (adapter->vfinfo[vf].pf_vlan)
  678. reg |= IXGBE_QDE_HIDE_VLAN;
  679. ixgbe_write_qde(adapter, vf, reg);
  680. /* enable receive for vf */
  681. reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
  682. reg |= BIT(vf_shift);
  683. /*
  684. * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
  685. * For more info take a look at ixgbe_set_vf_lpe
  686. */
  687. if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
  688. struct net_device *dev = adapter->netdev;
  689. int pf_max_frame = dev->mtu + ETH_HLEN;
  690. #ifdef CONFIG_FCOE
  691. if (dev->features & NETIF_F_FCOE_MTU)
  692. pf_max_frame = max_t(int, pf_max_frame,
  693. IXGBE_FCOE_JUMBO_FRAME_SIZE);
  694. #endif /* CONFIG_FCOE */
  695. if (pf_max_frame > ETH_FRAME_LEN)
  696. reg &= ~BIT(vf_shift);
  697. }
  698. IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
  699. /* enable VF mailbox for further messages */
  700. adapter->vfinfo[vf].clear_to_send = true;
  701. /* Enable counting of spoofed packets in the SSVPC register */
  702. reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
  703. reg |= BIT(vf_shift);
  704. IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
  705. /*
  706. * Reset the VFs TDWBAL and TDWBAH registers
  707. * which are not cleared by an FLR
  708. */
  709. for (i = 0; i < q_per_pool; i++) {
  710. IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0);
  711. IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0);
  712. }
  713. /* reply to reset with ack and vf mac address */
  714. msgbuf[0] = IXGBE_VF_RESET;
  715. if (!is_zero_ether_addr(vf_mac) && adapter->vfinfo[vf].pf_set_mac) {
  716. msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
  717. memcpy(addr, vf_mac, ETH_ALEN);
  718. } else {
  719. msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
  720. }
  721. /*
  722. * Piggyback the multicast filter type so VF can compute the
  723. * correct vectors
  724. */
  725. msgbuf[3] = hw->mac.mc_filter_type;
  726. ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
  727. return 0;
  728. }
  729. static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
  730. u32 *msgbuf, u32 vf)
  731. {
  732. u8 *new_mac = ((u8 *)(&msgbuf[1]));
  733. if (!is_valid_ether_addr(new_mac)) {
  734. e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
  735. return -1;
  736. }
  737. if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
  738. !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) {
  739. e_warn(drv,
  740. "VF %d attempted to override administratively set MAC address\n"
  741. "Reload the VF driver to resume operations\n",
  742. vf);
  743. return -1;
  744. }
  745. return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0;
  746. }
  747. static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
  748. u32 *msgbuf, u32 vf)
  749. {
  750. u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
  751. u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
  752. u8 tcs = adapter->hw_tcs;
  753. if (adapter->vfinfo[vf].pf_vlan || tcs) {
  754. e_warn(drv,
  755. "VF %d attempted to override administratively set VLAN configuration\n"
  756. "Reload the VF driver to resume operations\n",
  757. vf);
  758. return -1;
  759. }
  760. /* VLAN 0 is a special case, don't allow it to be removed */
  761. if (!vid && !add)
  762. return 0;
  763. return ixgbe_set_vf_vlan(adapter, add, vid, vf);
  764. }
  765. static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
  766. u32 *msgbuf, u32 vf)
  767. {
  768. u8 *new_mac = ((u8 *)(&msgbuf[1]));
  769. int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
  770. IXGBE_VT_MSGINFO_SHIFT;
  771. int err;
  772. if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
  773. index > 0) {
  774. e_warn(drv,
  775. "VF %d requested MACVLAN filter but is administratively denied\n",
  776. vf);
  777. return -1;
  778. }
  779. /* An non-zero index indicates the VF is setting a filter */
  780. if (index) {
  781. if (!is_valid_ether_addr(new_mac)) {
  782. e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
  783. return -1;
  784. }
  785. /*
  786. * If the VF is allowed to set MAC filters then turn off
  787. * anti-spoofing to avoid false positives.
  788. */
  789. if (adapter->vfinfo[vf].spoofchk_enabled) {
  790. struct ixgbe_hw *hw = &adapter->hw;
  791. hw->mac.ops.set_mac_anti_spoofing(hw, false, vf);
  792. hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
  793. }
  794. }
  795. err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
  796. if (err == -ENOSPC)
  797. e_warn(drv,
  798. "VF %d has requested a MACVLAN filter but there is no space for it\n",
  799. vf);
  800. return err < 0;
  801. }
  802. static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
  803. u32 *msgbuf, u32 vf)
  804. {
  805. int api = msgbuf[1];
  806. switch (api) {
  807. case ixgbe_mbox_api_10:
  808. case ixgbe_mbox_api_11:
  809. case ixgbe_mbox_api_12:
  810. case ixgbe_mbox_api_13:
  811. adapter->vfinfo[vf].vf_api = api;
  812. return 0;
  813. default:
  814. break;
  815. }
  816. e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
  817. return -1;
  818. }
  819. static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
  820. u32 *msgbuf, u32 vf)
  821. {
  822. struct net_device *dev = adapter->netdev;
  823. struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
  824. unsigned int default_tc = 0;
  825. u8 num_tcs = adapter->hw_tcs;
  826. /* verify the PF is supporting the correct APIs */
  827. switch (adapter->vfinfo[vf].vf_api) {
  828. case ixgbe_mbox_api_20:
  829. case ixgbe_mbox_api_11:
  830. case ixgbe_mbox_api_12:
  831. case ixgbe_mbox_api_13:
  832. break;
  833. default:
  834. return -1;
  835. }
  836. /* only allow 1 Tx queue for bandwidth limiting */
  837. msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
  838. msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
  839. /* if TCs > 1 determine which TC belongs to default user priority */
  840. if (num_tcs > 1)
  841. default_tc = netdev_get_prio_tc_map(dev, adapter->default_up);
  842. /* notify VF of need for VLAN tag stripping, and correct queue */
  843. if (num_tcs)
  844. msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
  845. else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos)
  846. msgbuf[IXGBE_VF_TRANS_VLAN] = 1;
  847. else
  848. msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
  849. /* notify VF of default queue */
  850. msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc;
  851. return 0;
  852. }
  853. static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
  854. {
  855. u32 i, j;
  856. u32 *out_buf = &msgbuf[1];
  857. const u8 *reta = adapter->rss_indir_tbl;
  858. u32 reta_size = ixgbe_rss_indir_tbl_entries(adapter);
  859. /* Check if operation is permitted */
  860. if (!adapter->vfinfo[vf].rss_query_enabled)
  861. return -EPERM;
  862. /* verify the PF is supporting the correct API */
  863. switch (adapter->vfinfo[vf].vf_api) {
  864. case ixgbe_mbox_api_13:
  865. case ixgbe_mbox_api_12:
  866. break;
  867. default:
  868. return -EOPNOTSUPP;
  869. }
  870. /* This mailbox command is supported (required) only for 82599 and x540
  871. * VFs which support up to 4 RSS queues. Therefore we will compress the
  872. * RETA by saving only 2 bits from each entry. This way we will be able
  873. * to transfer the whole RETA in a single mailbox operation.
  874. */
  875. for (i = 0; i < reta_size / 16; i++) {
  876. out_buf[i] = 0;
  877. for (j = 0; j < 16; j++)
  878. out_buf[i] |= (u32)(reta[16 * i + j] & 0x3) << (2 * j);
  879. }
  880. return 0;
  881. }
  882. static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter,
  883. u32 *msgbuf, u32 vf)
  884. {
  885. u32 *rss_key = &msgbuf[1];
  886. /* Check if the operation is permitted */
  887. if (!adapter->vfinfo[vf].rss_query_enabled)
  888. return -EPERM;
  889. /* verify the PF is supporting the correct API */
  890. switch (adapter->vfinfo[vf].vf_api) {
  891. case ixgbe_mbox_api_13:
  892. case ixgbe_mbox_api_12:
  893. break;
  894. default:
  895. return -EOPNOTSUPP;
  896. }
  897. memcpy(rss_key, adapter->rss_key, IXGBE_RSS_KEY_SIZE);
  898. return 0;
  899. }
  900. static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
  901. u32 *msgbuf, u32 vf)
  902. {
  903. struct ixgbe_hw *hw = &adapter->hw;
  904. int xcast_mode = msgbuf[1];
  905. u32 vmolr, fctrl, disable, enable;
  906. /* verify the PF is supporting the correct APIs */
  907. switch (adapter->vfinfo[vf].vf_api) {
  908. case ixgbe_mbox_api_12:
  909. /* promisc introduced in 1.3 version */
  910. if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
  911. return -EOPNOTSUPP;
  912. /* Fall threw */
  913. case ixgbe_mbox_api_13:
  914. break;
  915. default:
  916. return -EOPNOTSUPP;
  917. }
  918. if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI &&
  919. !adapter->vfinfo[vf].trusted) {
  920. xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
  921. }
  922. if (adapter->vfinfo[vf].xcast_mode == xcast_mode)
  923. goto out;
  924. switch (xcast_mode) {
  925. case IXGBEVF_XCAST_MODE_NONE:
  926. disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
  927. IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
  928. enable = 0;
  929. break;
  930. case IXGBEVF_XCAST_MODE_MULTI:
  931. disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
  932. enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE;
  933. break;
  934. case IXGBEVF_XCAST_MODE_ALLMULTI:
  935. disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
  936. enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
  937. break;
  938. case IXGBEVF_XCAST_MODE_PROMISC:
  939. if (hw->mac.type <= ixgbe_mac_82599EB)
  940. return -EOPNOTSUPP;
  941. fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  942. if (!(fctrl & IXGBE_FCTRL_UPE)) {
  943. /* VF promisc requires PF in promisc */
  944. e_warn(drv,
  945. "Enabling VF promisc requires PF in promisc\n");
  946. return -EPERM;
  947. }
  948. disable = 0;
  949. enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
  950. IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
  951. break;
  952. default:
  953. return -EOPNOTSUPP;
  954. }
  955. vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
  956. vmolr &= ~disable;
  957. vmolr |= enable;
  958. IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
  959. adapter->vfinfo[vf].xcast_mode = xcast_mode;
  960. out:
  961. msgbuf[1] = xcast_mode;
  962. return 0;
  963. }
  964. static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
  965. {
  966. u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
  967. u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
  968. struct ixgbe_hw *hw = &adapter->hw;
  969. s32 retval;
  970. retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
  971. if (retval) {
  972. pr_err("Error receiving message from VF\n");
  973. return retval;
  974. }
  975. /* this is a message we already processed, do nothing */
  976. if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
  977. return 0;
  978. /* flush the ack before we write any messages back */
  979. IXGBE_WRITE_FLUSH(hw);
  980. if (msgbuf[0] == IXGBE_VF_RESET)
  981. return ixgbe_vf_reset_msg(adapter, vf);
  982. /*
  983. * until the vf completes a virtual function reset it should not be
  984. * allowed to start any configuration.
  985. */
  986. if (!adapter->vfinfo[vf].clear_to_send) {
  987. msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
  988. ixgbe_write_mbx(hw, msgbuf, 1, vf);
  989. return 0;
  990. }
  991. switch ((msgbuf[0] & 0xFFFF)) {
  992. case IXGBE_VF_SET_MAC_ADDR:
  993. retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf);
  994. break;
  995. case IXGBE_VF_SET_MULTICAST:
  996. retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf);
  997. break;
  998. case IXGBE_VF_SET_VLAN:
  999. retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
  1000. break;
  1001. case IXGBE_VF_SET_LPE:
  1002. retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
  1003. break;
  1004. case IXGBE_VF_SET_MACVLAN:
  1005. retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
  1006. break;
  1007. case IXGBE_VF_API_NEGOTIATE:
  1008. retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf);
  1009. break;
  1010. case IXGBE_VF_GET_QUEUES:
  1011. retval = ixgbe_get_vf_queues(adapter, msgbuf, vf);
  1012. break;
  1013. case IXGBE_VF_GET_RETA:
  1014. retval = ixgbe_get_vf_reta(adapter, msgbuf, vf);
  1015. break;
  1016. case IXGBE_VF_GET_RSS_KEY:
  1017. retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf);
  1018. break;
  1019. case IXGBE_VF_UPDATE_XCAST_MODE:
  1020. retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf);
  1021. break;
  1022. default:
  1023. e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
  1024. retval = IXGBE_ERR_MBX;
  1025. break;
  1026. }
  1027. /* notify the VF of the results of what it sent us */
  1028. if (retval)
  1029. msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
  1030. else
  1031. msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
  1032. msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
  1033. ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
  1034. return retval;
  1035. }
  1036. static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
  1037. {
  1038. struct ixgbe_hw *hw = &adapter->hw;
  1039. u32 msg = IXGBE_VT_MSGTYPE_NACK;
  1040. /* if device isn't clear to send it shouldn't be reading either */
  1041. if (!adapter->vfinfo[vf].clear_to_send)
  1042. ixgbe_write_mbx(hw, &msg, 1, vf);
  1043. }
  1044. void ixgbe_msg_task(struct ixgbe_adapter *adapter)
  1045. {
  1046. struct ixgbe_hw *hw = &adapter->hw;
  1047. u32 vf;
  1048. for (vf = 0; vf < adapter->num_vfs; vf++) {
  1049. /* process any reset requests */
  1050. if (!ixgbe_check_for_rst(hw, vf))
  1051. ixgbe_vf_reset_event(adapter, vf);
  1052. /* process any messages pending */
  1053. if (!ixgbe_check_for_msg(hw, vf))
  1054. ixgbe_rcv_msg_from_vf(adapter, vf);
  1055. /* process any acks */
  1056. if (!ixgbe_check_for_ack(hw, vf))
  1057. ixgbe_rcv_ack_from_vf(adapter, vf);
  1058. }
  1059. }
  1060. void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
  1061. {
  1062. struct ixgbe_hw *hw = &adapter->hw;
  1063. /* disable transmit and receive for all vfs */
  1064. IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
  1065. IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
  1066. IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
  1067. IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
  1068. }
  1069. static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf)
  1070. {
  1071. struct ixgbe_hw *hw = &adapter->hw;
  1072. u32 ping;
  1073. ping = IXGBE_PF_CONTROL_MSG;
  1074. if (adapter->vfinfo[vf].clear_to_send)
  1075. ping |= IXGBE_VT_MSGTYPE_CTS;
  1076. ixgbe_write_mbx(hw, &ping, 1, vf);
  1077. }
  1078. void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
  1079. {
  1080. struct ixgbe_hw *hw = &adapter->hw;
  1081. u32 ping;
  1082. int i;
  1083. for (i = 0 ; i < adapter->num_vfs; i++) {
  1084. ping = IXGBE_PF_CONTROL_MSG;
  1085. if (adapter->vfinfo[i].clear_to_send)
  1086. ping |= IXGBE_VT_MSGTYPE_CTS;
  1087. ixgbe_write_mbx(hw, &ping, 1, i);
  1088. }
  1089. }
  1090. int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
  1091. {
  1092. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1093. s32 retval;
  1094. if (vf >= adapter->num_vfs)
  1095. return -EINVAL;
  1096. if (is_valid_ether_addr(mac)) {
  1097. dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
  1098. mac, vf);
  1099. dev_info(&adapter->pdev->dev, "Reload the VF driver to make this change effective.");
  1100. retval = ixgbe_set_vf_mac(adapter, vf, mac);
  1101. if (retval >= 0) {
  1102. adapter->vfinfo[vf].pf_set_mac = true;
  1103. if (test_bit(__IXGBE_DOWN, &adapter->state)) {
  1104. dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n");
  1105. dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n");
  1106. }
  1107. } else {
  1108. dev_warn(&adapter->pdev->dev, "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n");
  1109. }
  1110. } else if (is_zero_ether_addr(mac)) {
  1111. unsigned char *vf_mac_addr =
  1112. adapter->vfinfo[vf].vf_mac_addresses;
  1113. /* nothing to do */
  1114. if (is_zero_ether_addr(vf_mac_addr))
  1115. return 0;
  1116. dev_info(&adapter->pdev->dev, "removing MAC on VF %d\n", vf);
  1117. retval = ixgbe_del_mac_filter(adapter, vf_mac_addr, vf);
  1118. if (retval >= 0) {
  1119. adapter->vfinfo[vf].pf_set_mac = false;
  1120. memcpy(vf_mac_addr, mac, ETH_ALEN);
  1121. } else {
  1122. dev_warn(&adapter->pdev->dev, "Could NOT remove the VF MAC address.\n");
  1123. }
  1124. } else {
  1125. retval = -EINVAL;
  1126. }
  1127. return retval;
  1128. }
  1129. static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
  1130. u16 vlan, u8 qos)
  1131. {
  1132. struct ixgbe_hw *hw = &adapter->hw;
  1133. int err;
  1134. err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
  1135. if (err)
  1136. goto out;
  1137. /* Revoke tagless access via VLAN 0 */
  1138. ixgbe_set_vf_vlan(adapter, false, 0, vf);
  1139. ixgbe_set_vmvir(adapter, vlan, qos, vf);
  1140. ixgbe_set_vmolr(hw, vf, false);
  1141. /* enable hide vlan on X550 */
  1142. if (hw->mac.type >= ixgbe_mac_X550)
  1143. ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE |
  1144. IXGBE_QDE_HIDE_VLAN);
  1145. adapter->vfinfo[vf].pf_vlan = vlan;
  1146. adapter->vfinfo[vf].pf_qos = qos;
  1147. dev_info(&adapter->pdev->dev,
  1148. "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
  1149. if (test_bit(__IXGBE_DOWN, &adapter->state)) {
  1150. dev_warn(&adapter->pdev->dev,
  1151. "The VF VLAN has been set, but the PF device is not up.\n");
  1152. dev_warn(&adapter->pdev->dev,
  1153. "Bring the PF device up before attempting to use the VF device.\n");
  1154. }
  1155. out:
  1156. return err;
  1157. }
  1158. static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
  1159. {
  1160. struct ixgbe_hw *hw = &adapter->hw;
  1161. int err;
  1162. err = ixgbe_set_vf_vlan(adapter, false,
  1163. adapter->vfinfo[vf].pf_vlan, vf);
  1164. /* Restore tagless access via VLAN 0 */
  1165. ixgbe_set_vf_vlan(adapter, true, 0, vf);
  1166. ixgbe_clear_vmvir(adapter, vf);
  1167. ixgbe_set_vmolr(hw, vf, true);
  1168. /* disable hide VLAN on X550 */
  1169. if (hw->mac.type >= ixgbe_mac_X550)
  1170. ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE);
  1171. adapter->vfinfo[vf].pf_vlan = 0;
  1172. adapter->vfinfo[vf].pf_qos = 0;
  1173. return err;
  1174. }
  1175. int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
  1176. u8 qos, __be16 vlan_proto)
  1177. {
  1178. int err = 0;
  1179. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1180. if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
  1181. return -EINVAL;
  1182. if (vlan_proto != htons(ETH_P_8021Q))
  1183. return -EPROTONOSUPPORT;
  1184. if (vlan || qos) {
  1185. /* Check if there is already a port VLAN set, if so
  1186. * we have to delete the old one first before we
  1187. * can set the new one. The usage model had
  1188. * previously assumed the user would delete the
  1189. * old port VLAN before setting a new one but this
  1190. * is not necessarily the case.
  1191. */
  1192. if (adapter->vfinfo[vf].pf_vlan)
  1193. err = ixgbe_disable_port_vlan(adapter, vf);
  1194. if (err)
  1195. goto out;
  1196. err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos);
  1197. } else {
  1198. err = ixgbe_disable_port_vlan(adapter, vf);
  1199. }
  1200. out:
  1201. return err;
  1202. }
  1203. int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
  1204. {
  1205. switch (adapter->link_speed) {
  1206. case IXGBE_LINK_SPEED_100_FULL:
  1207. return 100;
  1208. case IXGBE_LINK_SPEED_1GB_FULL:
  1209. return 1000;
  1210. case IXGBE_LINK_SPEED_10GB_FULL:
  1211. return 10000;
  1212. default:
  1213. return 0;
  1214. }
  1215. }
  1216. static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf)
  1217. {
  1218. struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
  1219. struct ixgbe_hw *hw = &adapter->hw;
  1220. u32 bcnrc_val = 0;
  1221. u16 queue, queues_per_pool;
  1222. u16 tx_rate = adapter->vfinfo[vf].tx_rate;
  1223. if (tx_rate) {
  1224. /* start with base link speed value */
  1225. bcnrc_val = adapter->vf_rate_link_speed;
  1226. /* Calculate the rate factor values to set */
  1227. bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
  1228. bcnrc_val /= tx_rate;
  1229. /* clear everything but the rate factor */
  1230. bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
  1231. IXGBE_RTTBCNRC_RF_DEC_MASK;
  1232. /* enable the rate scheduler */
  1233. bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
  1234. }
  1235. /*
  1236. * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
  1237. * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
  1238. * and 0x004 otherwise.
  1239. */
  1240. switch (hw->mac.type) {
  1241. case ixgbe_mac_82599EB:
  1242. IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4);
  1243. break;
  1244. case ixgbe_mac_X540:
  1245. IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14);
  1246. break;
  1247. default:
  1248. break;
  1249. }
  1250. /* determine how many queues per pool based on VMDq mask */
  1251. queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
  1252. /* write value for all Tx queues belonging to VF */
  1253. for (queue = 0; queue < queues_per_pool; queue++) {
  1254. unsigned int reg_idx = (vf * queues_per_pool) + queue;
  1255. IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx);
  1256. IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
  1257. }
  1258. }
  1259. void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
  1260. {
  1261. int i;
  1262. /* VF Tx rate limit was not set */
  1263. if (!adapter->vf_rate_link_speed)
  1264. return;
  1265. if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) {
  1266. adapter->vf_rate_link_speed = 0;
  1267. dev_info(&adapter->pdev->dev,
  1268. "Link speed has been changed. VF Transmit rate is disabled\n");
  1269. }
  1270. for (i = 0; i < adapter->num_vfs; i++) {
  1271. if (!adapter->vf_rate_link_speed)
  1272. adapter->vfinfo[i].tx_rate = 0;
  1273. ixgbe_set_vf_rate_limit(adapter, i);
  1274. }
  1275. }
  1276. int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
  1277. int max_tx_rate)
  1278. {
  1279. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1280. int link_speed;
  1281. /* verify VF is active */
  1282. if (vf >= adapter->num_vfs)
  1283. return -EINVAL;
  1284. /* verify link is up */
  1285. if (!adapter->link_up)
  1286. return -EINVAL;
  1287. /* verify we are linked at 10Gbps */
  1288. link_speed = ixgbe_link_mbps(adapter);
  1289. if (link_speed != 10000)
  1290. return -EINVAL;
  1291. if (min_tx_rate)
  1292. return -EINVAL;
  1293. /* rate limit cannot be less than 10Mbs or greater than link speed */
  1294. if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed)))
  1295. return -EINVAL;
  1296. /* store values */
  1297. adapter->vf_rate_link_speed = link_speed;
  1298. adapter->vfinfo[vf].tx_rate = max_tx_rate;
  1299. /* update hardware configuration */
  1300. ixgbe_set_vf_rate_limit(adapter, vf);
  1301. return 0;
  1302. }
  1303. int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
  1304. {
  1305. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1306. struct ixgbe_hw *hw = &adapter->hw;
  1307. if (vf >= adapter->num_vfs)
  1308. return -EINVAL;
  1309. adapter->vfinfo[vf].spoofchk_enabled = setting;
  1310. /* configure MAC spoofing */
  1311. hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf);
  1312. /* configure VLAN spoofing */
  1313. hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf);
  1314. /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
  1315. * calling set_ethertype_anti_spoofing for each VF in loop below
  1316. */
  1317. if (hw->mac.ops.set_ethertype_anti_spoofing) {
  1318. IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
  1319. (IXGBE_ETQF_FILTER_EN |
  1320. IXGBE_ETQF_TX_ANTISPOOF |
  1321. IXGBE_ETH_P_LLDP));
  1322. IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
  1323. (IXGBE_ETQF_FILTER_EN |
  1324. IXGBE_ETQF_TX_ANTISPOOF |
  1325. ETH_P_PAUSE));
  1326. hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf);
  1327. }
  1328. return 0;
  1329. }
  1330. int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
  1331. bool setting)
  1332. {
  1333. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1334. /* This operation is currently supported only for 82599 and x540
  1335. * devices.
  1336. */
  1337. if (adapter->hw.mac.type < ixgbe_mac_82599EB ||
  1338. adapter->hw.mac.type >= ixgbe_mac_X550)
  1339. return -EOPNOTSUPP;
  1340. if (vf >= adapter->num_vfs)
  1341. return -EINVAL;
  1342. adapter->vfinfo[vf].rss_query_enabled = setting;
  1343. return 0;
  1344. }
  1345. int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
  1346. {
  1347. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1348. if (vf >= adapter->num_vfs)
  1349. return -EINVAL;
  1350. /* nothing to do */
  1351. if (adapter->vfinfo[vf].trusted == setting)
  1352. return 0;
  1353. adapter->vfinfo[vf].trusted = setting;
  1354. /* reset VF to reconfigure features */
  1355. adapter->vfinfo[vf].clear_to_send = false;
  1356. ixgbe_ping_vf(adapter, vf);
  1357. e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not ");
  1358. return 0;
  1359. }
  1360. int ixgbe_ndo_get_vf_config(struct net_device *netdev,
  1361. int vf, struct ifla_vf_info *ivi)
  1362. {
  1363. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1364. if (vf >= adapter->num_vfs)
  1365. return -EINVAL;
  1366. ivi->vf = vf;
  1367. memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
  1368. ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate;
  1369. ivi->min_tx_rate = 0;
  1370. ivi->vlan = adapter->vfinfo[vf].pf_vlan;
  1371. ivi->qos = adapter->vfinfo[vf].pf_qos;
  1372. ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
  1373. ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled;
  1374. ivi->trusted = adapter->vfinfo[vf].trusted;
  1375. return 0;
  1376. }