ixgbe_lib.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282
  1. /*******************************************************************************
  2. Intel 10 Gigabit PCI Express Linux driver
  3. Copyright(c) 1999 - 2016 Intel Corporation.
  4. This program is free software; you can redistribute it and/or modify it
  5. under the terms and conditions of the GNU General Public License,
  6. version 2, as published by the Free Software Foundation.
  7. This program is distributed in the hope it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  10. more details.
  11. You should have received a copy of the GNU General Public License along with
  12. this program; if not, write to the Free Software Foundation, Inc.,
  13. 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  14. The full GNU General Public License is included in this distribution in
  15. the file called "COPYING".
  16. Contact Information:
  17. Linux NICS <linux.nics@intel.com>
  18. e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  19. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  20. *******************************************************************************/
  21. #include "ixgbe.h"
  22. #include "ixgbe_sriov.h"
  23. #ifdef CONFIG_IXGBE_DCB
  24. /**
  25. * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
  26. * @adapter: board private structure to initialize
  27. *
  28. * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It
  29. * will also try to cache the proper offsets if RSS/FCoE are enabled along
  30. * with VMDq.
  31. *
  32. **/
  33. static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
  34. {
  35. #ifdef IXGBE_FCOE
  36. struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
  37. #endif /* IXGBE_FCOE */
  38. struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
  39. int i;
  40. u16 reg_idx;
  41. u8 tcs = netdev_get_num_tc(adapter->netdev);
  42. /* verify we have DCB queueing enabled before proceeding */
  43. if (tcs <= 1)
  44. return false;
  45. /* verify we have VMDq enabled before proceeding */
  46. if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  47. return false;
  48. /* start at VMDq register offset for SR-IOV enabled setups */
  49. reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
  50. for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
  51. /* If we are greater than indices move to next pool */
  52. if ((reg_idx & ~vmdq->mask) >= tcs)
  53. reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
  54. adapter->rx_ring[i]->reg_idx = reg_idx;
  55. }
  56. reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
  57. for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
  58. /* If we are greater than indices move to next pool */
  59. if ((reg_idx & ~vmdq->mask) >= tcs)
  60. reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
  61. adapter->tx_ring[i]->reg_idx = reg_idx;
  62. }
  63. #ifdef IXGBE_FCOE
  64. /* nothing to do if FCoE is disabled */
  65. if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
  66. return true;
  67. /* The work is already done if the FCoE ring is shared */
  68. if (fcoe->offset < tcs)
  69. return true;
  70. /* The FCoE rings exist separately, we need to move their reg_idx */
  71. if (fcoe->indices) {
  72. u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
  73. u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
  74. reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
  75. for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
  76. reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
  77. adapter->rx_ring[i]->reg_idx = reg_idx;
  78. reg_idx++;
  79. }
  80. reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
  81. for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
  82. reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
  83. adapter->tx_ring[i]->reg_idx = reg_idx;
  84. reg_idx++;
  85. }
  86. }
  87. #endif /* IXGBE_FCOE */
  88. return true;
  89. }
  90. /* ixgbe_get_first_reg_idx - Return first register index associated with ring */
  91. static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
  92. unsigned int *tx, unsigned int *rx)
  93. {
  94. struct net_device *dev = adapter->netdev;
  95. struct ixgbe_hw *hw = &adapter->hw;
  96. u8 num_tcs = netdev_get_num_tc(dev);
  97. *tx = 0;
  98. *rx = 0;
  99. switch (hw->mac.type) {
  100. case ixgbe_mac_82598EB:
  101. /* TxQs/TC: 4 RxQs/TC: 8 */
  102. *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */
  103. *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
  104. break;
  105. case ixgbe_mac_82599EB:
  106. case ixgbe_mac_X540:
  107. case ixgbe_mac_X550:
  108. case ixgbe_mac_X550EM_x:
  109. case ixgbe_mac_x550em_a:
  110. if (num_tcs > 4) {
  111. /*
  112. * TCs : TC0/1 TC2/3 TC4-7
  113. * TxQs/TC: 32 16 8
  114. * RxQs/TC: 16 16 16
  115. */
  116. *rx = tc << 4;
  117. if (tc < 3)
  118. *tx = tc << 5; /* 0, 32, 64 */
  119. else if (tc < 5)
  120. *tx = (tc + 2) << 4; /* 80, 96 */
  121. else
  122. *tx = (tc + 8) << 3; /* 104, 112, 120 */
  123. } else {
  124. /*
  125. * TCs : TC0 TC1 TC2/3
  126. * TxQs/TC: 64 32 16
  127. * RxQs/TC: 32 32 32
  128. */
  129. *rx = tc << 5;
  130. if (tc < 2)
  131. *tx = tc << 6; /* 0, 64 */
  132. else
  133. *tx = (tc + 4) << 4; /* 96, 112 */
  134. }
  135. default:
  136. break;
  137. }
  138. }
  139. /**
  140. * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
  141. * @adapter: board private structure to initialize
  142. *
  143. * Cache the descriptor ring offsets for DCB to the assigned rings.
  144. *
  145. **/
  146. static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
  147. {
  148. struct net_device *dev = adapter->netdev;
  149. unsigned int tx_idx, rx_idx;
  150. int tc, offset, rss_i, i;
  151. u8 num_tcs = netdev_get_num_tc(dev);
  152. /* verify we have DCB queueing enabled before proceeding */
  153. if (num_tcs <= 1)
  154. return false;
  155. rss_i = adapter->ring_feature[RING_F_RSS].indices;
  156. for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
  157. ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
  158. for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
  159. adapter->tx_ring[offset + i]->reg_idx = tx_idx;
  160. adapter->rx_ring[offset + i]->reg_idx = rx_idx;
  161. adapter->tx_ring[offset + i]->dcb_tc = tc;
  162. adapter->rx_ring[offset + i]->dcb_tc = tc;
  163. }
  164. }
  165. return true;
  166. }
  167. #endif
  168. /**
  169. * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
  170. * @adapter: board private structure to initialize
  171. *
  172. * SR-IOV doesn't use any descriptor rings but changes the default if
  173. * no other mapping is used.
  174. *
  175. */
  176. static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
  177. {
  178. #ifdef IXGBE_FCOE
  179. struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
  180. #endif /* IXGBE_FCOE */
  181. struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
  182. struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
  183. int i;
  184. u16 reg_idx;
  185. /* only proceed if VMDq is enabled */
  186. if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
  187. return false;
  188. /* start at VMDq register offset for SR-IOV enabled setups */
  189. reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
  190. for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
  191. #ifdef IXGBE_FCOE
  192. /* Allow first FCoE queue to be mapped as RSS */
  193. if (fcoe->offset && (i > fcoe->offset))
  194. break;
  195. #endif
  196. /* If we are greater than indices move to next pool */
  197. if ((reg_idx & ~vmdq->mask) >= rss->indices)
  198. reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
  199. adapter->rx_ring[i]->reg_idx = reg_idx;
  200. }
  201. #ifdef IXGBE_FCOE
  202. /* FCoE uses a linear block of queues so just assigning 1:1 */
  203. for (; i < adapter->num_rx_queues; i++, reg_idx++)
  204. adapter->rx_ring[i]->reg_idx = reg_idx;
  205. #endif
  206. reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
  207. for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
  208. #ifdef IXGBE_FCOE
  209. /* Allow first FCoE queue to be mapped as RSS */
  210. if (fcoe->offset && (i > fcoe->offset))
  211. break;
  212. #endif
  213. /* If we are greater than indices move to next pool */
  214. if ((reg_idx & rss->mask) >= rss->indices)
  215. reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
  216. adapter->tx_ring[i]->reg_idx = reg_idx;
  217. }
  218. #ifdef IXGBE_FCOE
  219. /* FCoE uses a linear block of queues so just assigning 1:1 */
  220. for (; i < adapter->num_tx_queues; i++, reg_idx++)
  221. adapter->tx_ring[i]->reg_idx = reg_idx;
  222. #endif
  223. return true;
  224. }
  225. /**
  226. * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
  227. * @adapter: board private structure to initialize
  228. *
  229. * Cache the descriptor ring offsets for RSS to the assigned rings.
  230. *
  231. **/
  232. static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
  233. {
  234. int i, reg_idx;
  235. for (i = 0; i < adapter->num_rx_queues; i++)
  236. adapter->rx_ring[i]->reg_idx = i;
  237. for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
  238. adapter->tx_ring[i]->reg_idx = reg_idx;
  239. for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
  240. adapter->xdp_ring[i]->reg_idx = reg_idx;
  241. return true;
  242. }
  243. /**
  244. * ixgbe_cache_ring_register - Descriptor ring to register mapping
  245. * @adapter: board private structure to initialize
  246. *
  247. * Once we know the feature-set enabled for the device, we'll cache
  248. * the register offset the descriptor ring is assigned to.
  249. *
  250. * Note, the order the various feature calls is important. It must start with
  251. * the "most" features enabled at the same time, then trickle down to the
  252. * least amount of features turned on at once.
  253. **/
  254. static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
  255. {
  256. /* start with default case */
  257. adapter->rx_ring[0]->reg_idx = 0;
  258. adapter->tx_ring[0]->reg_idx = 0;
  259. #ifdef CONFIG_IXGBE_DCB
  260. if (ixgbe_cache_ring_dcb_sriov(adapter))
  261. return;
  262. if (ixgbe_cache_ring_dcb(adapter))
  263. return;
  264. #endif
  265. if (ixgbe_cache_ring_sriov(adapter))
  266. return;
  267. ixgbe_cache_ring_rss(adapter);
  268. }
  269. static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
  270. {
  271. return adapter->xdp_prog ? nr_cpu_ids : 0;
  272. }
  273. #define IXGBE_RSS_64Q_MASK 0x3F
  274. #define IXGBE_RSS_16Q_MASK 0xF
  275. #define IXGBE_RSS_8Q_MASK 0x7
  276. #define IXGBE_RSS_4Q_MASK 0x3
  277. #define IXGBE_RSS_2Q_MASK 0x1
  278. #define IXGBE_RSS_DISABLED_MASK 0x0
  279. #ifdef CONFIG_IXGBE_DCB
  280. /**
  281. * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
  282. * @adapter: board private structure to initialize
  283. *
  284. * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
  285. * and VM pools where appropriate. Also assign queues based on DCB
  286. * priorities and map accordingly..
  287. *
  288. **/
  289. static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
  290. {
  291. int i;
  292. u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
  293. u16 vmdq_m = 0;
  294. #ifdef IXGBE_FCOE
  295. u16 fcoe_i = 0;
  296. #endif
  297. u8 tcs = netdev_get_num_tc(adapter->netdev);
  298. /* verify we have DCB queueing enabled before proceeding */
  299. if (tcs <= 1)
  300. return false;
  301. /* verify we have VMDq enabled before proceeding */
  302. if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  303. return false;
  304. /* Add starting offset to total pool count */
  305. vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
  306. /* 16 pools w/ 8 TC per pool */
  307. if (tcs > 4) {
  308. vmdq_i = min_t(u16, vmdq_i, 16);
  309. vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
  310. /* 32 pools w/ 4 TC per pool */
  311. } else {
  312. vmdq_i = min_t(u16, vmdq_i, 32);
  313. vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
  314. }
  315. #ifdef IXGBE_FCOE
  316. /* queues in the remaining pools are available for FCoE */
  317. fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
  318. #endif
  319. /* remove the starting offset from the pool count */
  320. vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
  321. /* save features for later use */
  322. adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
  323. adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
  324. /*
  325. * We do not support DCB, VMDq, and RSS all simultaneously
  326. * so we will disable RSS since it is the lowest priority
  327. */
  328. adapter->ring_feature[RING_F_RSS].indices = 1;
  329. adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
  330. /* disable ATR as it is not supported when VMDq is enabled */
  331. adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
  332. adapter->num_rx_pools = vmdq_i;
  333. adapter->num_rx_queues_per_pool = tcs;
  334. adapter->num_tx_queues = vmdq_i * tcs;
  335. adapter->num_xdp_queues = 0;
  336. adapter->num_rx_queues = vmdq_i * tcs;
  337. #ifdef IXGBE_FCOE
  338. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
  339. struct ixgbe_ring_feature *fcoe;
  340. fcoe = &adapter->ring_feature[RING_F_FCOE];
  341. /* limit ourselves based on feature limits */
  342. fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
  343. if (fcoe_i) {
  344. /* alloc queues for FCoE separately */
  345. fcoe->indices = fcoe_i;
  346. fcoe->offset = vmdq_i * tcs;
  347. /* add queues to adapter */
  348. adapter->num_tx_queues += fcoe_i;
  349. adapter->num_rx_queues += fcoe_i;
  350. } else if (tcs > 1) {
  351. /* use queue belonging to FcoE TC */
  352. fcoe->indices = 1;
  353. fcoe->offset = ixgbe_fcoe_get_tc(adapter);
  354. } else {
  355. adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
  356. fcoe->indices = 0;
  357. fcoe->offset = 0;
  358. }
  359. }
  360. #endif /* IXGBE_FCOE */
  361. /* configure TC to queue mapping */
  362. for (i = 0; i < tcs; i++)
  363. netdev_set_tc_queue(adapter->netdev, i, 1, i);
  364. return true;
  365. }
  366. static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
  367. {
  368. struct net_device *dev = adapter->netdev;
  369. struct ixgbe_ring_feature *f;
  370. int rss_i, rss_m, i;
  371. int tcs;
  372. /* Map queue offset and counts onto allocated tx queues */
  373. tcs = netdev_get_num_tc(dev);
  374. /* verify we have DCB queueing enabled before proceeding */
  375. if (tcs <= 1)
  376. return false;
  377. /* determine the upper limit for our current DCB mode */
  378. rss_i = dev->num_tx_queues / tcs;
  379. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  380. /* 8 TC w/ 4 queues per TC */
  381. rss_i = min_t(u16, rss_i, 4);
  382. rss_m = IXGBE_RSS_4Q_MASK;
  383. } else if (tcs > 4) {
  384. /* 8 TC w/ 8 queues per TC */
  385. rss_i = min_t(u16, rss_i, 8);
  386. rss_m = IXGBE_RSS_8Q_MASK;
  387. } else {
  388. /* 4 TC w/ 16 queues per TC */
  389. rss_i = min_t(u16, rss_i, 16);
  390. rss_m = IXGBE_RSS_16Q_MASK;
  391. }
  392. /* set RSS mask and indices */
  393. f = &adapter->ring_feature[RING_F_RSS];
  394. rss_i = min_t(int, rss_i, f->limit);
  395. f->indices = rss_i;
  396. f->mask = rss_m;
  397. /* disable ATR as it is not supported when multiple TCs are enabled */
  398. adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
  399. #ifdef IXGBE_FCOE
  400. /* FCoE enabled queues require special configuration indexed
  401. * by feature specific indices and offset. Here we map FCoE
  402. * indices onto the DCB queue pairs allowing FCoE to own
  403. * configuration later.
  404. */
  405. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
  406. u8 tc = ixgbe_fcoe_get_tc(adapter);
  407. f = &adapter->ring_feature[RING_F_FCOE];
  408. f->indices = min_t(u16, rss_i, f->limit);
  409. f->offset = rss_i * tc;
  410. }
  411. #endif /* IXGBE_FCOE */
  412. for (i = 0; i < tcs; i++)
  413. netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
  414. adapter->num_tx_queues = rss_i * tcs;
  415. adapter->num_xdp_queues = 0;
  416. adapter->num_rx_queues = rss_i * tcs;
  417. return true;
  418. }
  419. #endif
  420. /**
  421. * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
  422. * @adapter: board private structure to initialize
  423. *
  424. * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
  425. * and VM pools where appropriate. If RSS is available, then also try and
  426. * enable RSS and map accordingly.
  427. *
  428. **/
  429. static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
  430. {
  431. u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
  432. u16 vmdq_m = 0;
  433. u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
  434. u16 rss_m = IXGBE_RSS_DISABLED_MASK;
  435. #ifdef IXGBE_FCOE
  436. u16 fcoe_i = 0;
  437. #endif
  438. bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
  439. /* only proceed if SR-IOV is enabled */
  440. if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  441. return false;
  442. /* Add starting offset to total pool count */
  443. vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
  444. /* double check we are limited to maximum pools */
  445. vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
  446. /* 64 pool mode with 2 queues per pool */
  447. if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) {
  448. vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
  449. rss_m = IXGBE_RSS_2Q_MASK;
  450. rss_i = min_t(u16, rss_i, 2);
  451. /* 32 pool mode with up to 4 queues per pool */
  452. } else {
  453. vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
  454. rss_m = IXGBE_RSS_4Q_MASK;
  455. /* We can support 4, 2, or 1 queues */
  456. rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
  457. }
  458. #ifdef IXGBE_FCOE
  459. /* queues in the remaining pools are available for FCoE */
  460. fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
  461. #endif
  462. /* remove the starting offset from the pool count */
  463. vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
  464. /* save features for later use */
  465. adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
  466. adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
  467. /* limit RSS based on user input and save for later use */
  468. adapter->ring_feature[RING_F_RSS].indices = rss_i;
  469. adapter->ring_feature[RING_F_RSS].mask = rss_m;
  470. adapter->num_rx_pools = vmdq_i;
  471. adapter->num_rx_queues_per_pool = rss_i;
  472. adapter->num_rx_queues = vmdq_i * rss_i;
  473. adapter->num_tx_queues = vmdq_i * rss_i;
  474. adapter->num_xdp_queues = 0;
  475. /* disable ATR as it is not supported when VMDq is enabled */
  476. adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
  477. #ifdef IXGBE_FCOE
  478. /*
  479. * FCoE can use rings from adjacent buffers to allow RSS
  480. * like behavior. To account for this we need to add the
  481. * FCoE indices to the total ring count.
  482. */
  483. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
  484. struct ixgbe_ring_feature *fcoe;
  485. fcoe = &adapter->ring_feature[RING_F_FCOE];
  486. /* limit ourselves based on feature limits */
  487. fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
  488. if (vmdq_i > 1 && fcoe_i) {
  489. /* alloc queues for FCoE separately */
  490. fcoe->indices = fcoe_i;
  491. fcoe->offset = vmdq_i * rss_i;
  492. } else {
  493. /* merge FCoE queues with RSS queues */
  494. fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
  495. /* limit indices to rss_i if MSI-X is disabled */
  496. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
  497. fcoe_i = rss_i;
  498. /* attempt to reserve some queues for just FCoE */
  499. fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
  500. fcoe->offset = fcoe_i - fcoe->indices;
  501. fcoe_i -= rss_i;
  502. }
  503. /* add queues to adapter */
  504. adapter->num_tx_queues += fcoe_i;
  505. adapter->num_rx_queues += fcoe_i;
  506. }
  507. #endif
  508. return true;
  509. }
  510. /**
  511. * ixgbe_set_rss_queues - Allocate queues for RSS
  512. * @adapter: board private structure to initialize
  513. *
  514. * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
  515. * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
  516. *
  517. **/
  518. static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
  519. {
  520. struct ixgbe_hw *hw = &adapter->hw;
  521. struct ixgbe_ring_feature *f;
  522. u16 rss_i;
  523. /* set mask for 16 queue limit of RSS */
  524. f = &adapter->ring_feature[RING_F_RSS];
  525. rss_i = f->limit;
  526. f->indices = rss_i;
  527. if (hw->mac.type < ixgbe_mac_X550)
  528. f->mask = IXGBE_RSS_16Q_MASK;
  529. else
  530. f->mask = IXGBE_RSS_64Q_MASK;
  531. /* disable ATR by default, it will be configured below */
  532. adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
  533. /*
  534. * Use Flow Director in addition to RSS to ensure the best
  535. * distribution of flows across cores, even when an FDIR flow
  536. * isn't matched.
  537. */
  538. if (rss_i > 1 && adapter->atr_sample_rate) {
  539. f = &adapter->ring_feature[RING_F_FDIR];
  540. rss_i = f->indices = f->limit;
  541. if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
  542. adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
  543. }
  544. #ifdef IXGBE_FCOE
  545. /*
  546. * FCoE can exist on the same rings as standard network traffic
  547. * however it is preferred to avoid that if possible. In order
  548. * to get the best performance we allocate as many FCoE queues
  549. * as we can and we place them at the end of the ring array to
  550. * avoid sharing queues with standard RSS on systems with 24 or
  551. * more CPUs.
  552. */
  553. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
  554. struct net_device *dev = adapter->netdev;
  555. u16 fcoe_i;
  556. f = &adapter->ring_feature[RING_F_FCOE];
  557. /* merge FCoE queues with RSS queues */
  558. fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
  559. fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
  560. /* limit indices to rss_i if MSI-X is disabled */
  561. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
  562. fcoe_i = rss_i;
  563. /* attempt to reserve some queues for just FCoE */
  564. f->indices = min_t(u16, fcoe_i, f->limit);
  565. f->offset = fcoe_i - f->indices;
  566. rss_i = max_t(u16, fcoe_i, rss_i);
  567. }
  568. #endif /* IXGBE_FCOE */
  569. adapter->num_rx_queues = rss_i;
  570. adapter->num_tx_queues = rss_i;
  571. adapter->num_xdp_queues = ixgbe_xdp_queues(adapter);
  572. return true;
  573. }
  574. /**
  575. * ixgbe_set_num_queues - Allocate queues for device, feature dependent
  576. * @adapter: board private structure to initialize
  577. *
  578. * This is the top level queue allocation routine. The order here is very
  579. * important, starting with the "most" number of features turned on at once,
  580. * and ending with the smallest set of features. This way large combinations
  581. * can be allocated if they're turned on, and smaller combinations are the
  582. * fallthrough conditions.
  583. *
  584. **/
  585. static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
  586. {
  587. /* Start with base case */
  588. adapter->num_rx_queues = 1;
  589. adapter->num_tx_queues = 1;
  590. adapter->num_xdp_queues = 0;
  591. adapter->num_rx_pools = adapter->num_rx_queues;
  592. adapter->num_rx_queues_per_pool = 1;
  593. #ifdef CONFIG_IXGBE_DCB
  594. if (ixgbe_set_dcb_sriov_queues(adapter))
  595. return;
  596. if (ixgbe_set_dcb_queues(adapter))
  597. return;
  598. #endif
  599. if (ixgbe_set_sriov_queues(adapter))
  600. return;
  601. ixgbe_set_rss_queues(adapter);
  602. }
  603. /**
  604. * ixgbe_acquire_msix_vectors - acquire MSI-X vectors
  605. * @adapter: board private structure
  606. *
  607. * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
  608. * return a negative error code if unable to acquire MSI-X vectors for any
  609. * reason.
  610. */
  611. static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
  612. {
  613. struct ixgbe_hw *hw = &adapter->hw;
  614. int i, vectors, vector_threshold;
  615. /* We start by asking for one vector per queue pair with XDP queues
  616. * being stacked with TX queues.
  617. */
  618. vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
  619. vectors = max(vectors, adapter->num_xdp_queues);
  620. /* It is easy to be greedy for MSI-X vectors. However, it really
  621. * doesn't do much good if we have a lot more vectors than CPUs. We'll
  622. * be somewhat conservative and only ask for (roughly) the same number
  623. * of vectors as there are CPUs.
  624. */
  625. vectors = min_t(int, vectors, num_online_cpus());
  626. /* Some vectors are necessary for non-queue interrupts */
  627. vectors += NON_Q_VECTORS;
  628. /* Hardware can only support a maximum of hw.mac->max_msix_vectors.
  629. * With features such as RSS and VMDq, we can easily surpass the
  630. * number of Rx and Tx descriptor queues supported by our device.
  631. * Thus, we cap the maximum in the rare cases where the CPU count also
  632. * exceeds our vector limit
  633. */
  634. vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
  635. /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0]
  636. * handler, and (2) an Other (Link Status Change, etc.) handler.
  637. */
  638. vector_threshold = MIN_MSIX_COUNT;
  639. adapter->msix_entries = kcalloc(vectors,
  640. sizeof(struct msix_entry),
  641. GFP_KERNEL);
  642. if (!adapter->msix_entries)
  643. return -ENOMEM;
  644. for (i = 0; i < vectors; i++)
  645. adapter->msix_entries[i].entry = i;
  646. vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
  647. vector_threshold, vectors);
  648. if (vectors < 0) {
  649. /* A negative count of allocated vectors indicates an error in
  650. * acquiring within the specified range of MSI-X vectors
  651. */
  652. e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
  653. vectors);
  654. adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
  655. kfree(adapter->msix_entries);
  656. adapter->msix_entries = NULL;
  657. return vectors;
  658. }
  659. /* we successfully allocated some number of vectors within our
  660. * requested range.
  661. */
  662. adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
  663. /* Adjust for only the vectors we'll use, which is minimum
  664. * of max_q_vectors, or the number of vectors we were allocated.
  665. */
  666. vectors -= NON_Q_VECTORS;
  667. adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
  668. return 0;
  669. }
  670. static void ixgbe_add_ring(struct ixgbe_ring *ring,
  671. struct ixgbe_ring_container *head)
  672. {
  673. ring->next = head->ring;
  674. head->ring = ring;
  675. head->count++;
  676. }
  677. /**
  678. * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
  679. * @adapter: board private structure to initialize
  680. * @v_count: q_vectors allocated on adapter, used for ring interleaving
  681. * @v_idx: index of vector in adapter struct
  682. * @txr_count: total number of Tx rings to allocate
  683. * @txr_idx: index of first Tx ring to allocate
  684. * @xdp_count: total number of XDP rings to allocate
  685. * @xdp_idx: index of first XDP ring to allocate
  686. * @rxr_count: total number of Rx rings to allocate
  687. * @rxr_idx: index of first Rx ring to allocate
  688. *
  689. * We allocate one q_vector. If allocation fails we return -ENOMEM.
  690. **/
  691. static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
  692. int v_count, int v_idx,
  693. int txr_count, int txr_idx,
  694. int xdp_count, int xdp_idx,
  695. int rxr_count, int rxr_idx)
  696. {
  697. struct ixgbe_q_vector *q_vector;
  698. struct ixgbe_ring *ring;
  699. int node = NUMA_NO_NODE;
  700. int cpu = -1;
  701. int ring_count, size;
  702. u8 tcs = netdev_get_num_tc(adapter->netdev);
  703. ring_count = txr_count + rxr_count + xdp_count;
  704. size = sizeof(struct ixgbe_q_vector) +
  705. (sizeof(struct ixgbe_ring) * ring_count);
  706. /* customize cpu for Flow Director mapping */
  707. if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
  708. u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
  709. if (rss_i > 1 && adapter->atr_sample_rate) {
  710. if (cpu_online(v_idx)) {
  711. cpu = v_idx;
  712. node = cpu_to_node(cpu);
  713. }
  714. }
  715. }
  716. /* allocate q_vector and rings */
  717. q_vector = kzalloc_node(size, GFP_KERNEL, node);
  718. if (!q_vector)
  719. q_vector = kzalloc(size, GFP_KERNEL);
  720. if (!q_vector)
  721. return -ENOMEM;
  722. /* setup affinity mask and node */
  723. if (cpu != -1)
  724. cpumask_set_cpu(cpu, &q_vector->affinity_mask);
  725. q_vector->numa_node = node;
  726. #ifdef CONFIG_IXGBE_DCA
  727. /* initialize CPU for DCA */
  728. q_vector->cpu = -1;
  729. #endif
  730. /* initialize NAPI */
  731. netif_napi_add(adapter->netdev, &q_vector->napi,
  732. ixgbe_poll, 64);
  733. /* tie q_vector and adapter together */
  734. adapter->q_vector[v_idx] = q_vector;
  735. q_vector->adapter = adapter;
  736. q_vector->v_idx = v_idx;
  737. /* initialize work limits */
  738. q_vector->tx.work_limit = adapter->tx_work_limit;
  739. /* initialize pointer to rings */
  740. ring = q_vector->ring;
  741. /* intialize ITR */
  742. if (txr_count && !rxr_count) {
  743. /* tx only vector */
  744. if (adapter->tx_itr_setting == 1)
  745. q_vector->itr = IXGBE_12K_ITR;
  746. else
  747. q_vector->itr = adapter->tx_itr_setting;
  748. } else {
  749. /* rx or rx/tx vector */
  750. if (adapter->rx_itr_setting == 1)
  751. q_vector->itr = IXGBE_20K_ITR;
  752. else
  753. q_vector->itr = adapter->rx_itr_setting;
  754. }
  755. while (txr_count) {
  756. /* assign generic ring traits */
  757. ring->dev = &adapter->pdev->dev;
  758. ring->netdev = adapter->netdev;
  759. /* configure backlink on ring */
  760. ring->q_vector = q_vector;
  761. /* update q_vector Tx values */
  762. ixgbe_add_ring(ring, &q_vector->tx);
  763. /* apply Tx specific ring traits */
  764. ring->count = adapter->tx_ring_count;
  765. if (adapter->num_rx_pools > 1)
  766. ring->queue_index =
  767. txr_idx % adapter->num_rx_queues_per_pool;
  768. else
  769. ring->queue_index = txr_idx;
  770. /* assign ring to adapter */
  771. adapter->tx_ring[txr_idx] = ring;
  772. /* update count and index */
  773. txr_count--;
  774. txr_idx += v_count;
  775. /* push pointer to next ring */
  776. ring++;
  777. }
  778. while (xdp_count) {
  779. /* assign generic ring traits */
  780. ring->dev = &adapter->pdev->dev;
  781. ring->netdev = adapter->netdev;
  782. /* configure backlink on ring */
  783. ring->q_vector = q_vector;
  784. /* update q_vector Tx values */
  785. ixgbe_add_ring(ring, &q_vector->tx);
  786. /* apply Tx specific ring traits */
  787. ring->count = adapter->tx_ring_count;
  788. ring->queue_index = xdp_idx;
  789. set_ring_xdp(ring);
  790. /* assign ring to adapter */
  791. adapter->xdp_ring[xdp_idx] = ring;
  792. /* update count and index */
  793. xdp_count--;
  794. xdp_idx++;
  795. /* push pointer to next ring */
  796. ring++;
  797. }
  798. while (rxr_count) {
  799. /* assign generic ring traits */
  800. ring->dev = &adapter->pdev->dev;
  801. ring->netdev = adapter->netdev;
  802. /* configure backlink on ring */
  803. ring->q_vector = q_vector;
  804. /* update q_vector Rx values */
  805. ixgbe_add_ring(ring, &q_vector->rx);
  806. /*
  807. * 82599 errata, UDP frames with a 0 checksum
  808. * can be marked as checksum errors.
  809. */
  810. if (adapter->hw.mac.type == ixgbe_mac_82599EB)
  811. set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
  812. #ifdef IXGBE_FCOE
  813. if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
  814. struct ixgbe_ring_feature *f;
  815. f = &adapter->ring_feature[RING_F_FCOE];
  816. if ((rxr_idx >= f->offset) &&
  817. (rxr_idx < f->offset + f->indices))
  818. set_bit(__IXGBE_RX_FCOE, &ring->state);
  819. }
  820. #endif /* IXGBE_FCOE */
  821. /* apply Rx specific ring traits */
  822. ring->count = adapter->rx_ring_count;
  823. if (adapter->num_rx_pools > 1)
  824. ring->queue_index =
  825. rxr_idx % adapter->num_rx_queues_per_pool;
  826. else
  827. ring->queue_index = rxr_idx;
  828. /* assign ring to adapter */
  829. adapter->rx_ring[rxr_idx] = ring;
  830. /* update count and index */
  831. rxr_count--;
  832. rxr_idx += v_count;
  833. /* push pointer to next ring */
  834. ring++;
  835. }
  836. return 0;
  837. }
  838. /**
  839. * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
  840. * @adapter: board private structure to initialize
  841. * @v_idx: Index of vector to be freed
  842. *
  843. * This function frees the memory allocated to the q_vector. In addition if
  844. * NAPI is enabled it will delete any references to the NAPI struct prior
  845. * to freeing the q_vector.
  846. **/
  847. static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
  848. {
  849. struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
  850. struct ixgbe_ring *ring;
  851. ixgbe_for_each_ring(ring, q_vector->tx) {
  852. if (ring_is_xdp(ring))
  853. adapter->xdp_ring[ring->queue_index] = NULL;
  854. else
  855. adapter->tx_ring[ring->queue_index] = NULL;
  856. }
  857. ixgbe_for_each_ring(ring, q_vector->rx)
  858. adapter->rx_ring[ring->queue_index] = NULL;
  859. adapter->q_vector[v_idx] = NULL;
  860. napi_hash_del(&q_vector->napi);
  861. netif_napi_del(&q_vector->napi);
  862. /*
  863. * ixgbe_get_stats64() might access the rings on this vector,
  864. * we must wait a grace period before freeing it.
  865. */
  866. kfree_rcu(q_vector, rcu);
  867. }
  868. /**
  869. * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
  870. * @adapter: board private structure to initialize
  871. *
  872. * We allocate one q_vector per queue interrupt. If allocation fails we
  873. * return -ENOMEM.
  874. **/
  875. static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
  876. {
  877. int q_vectors = adapter->num_q_vectors;
  878. int rxr_remaining = adapter->num_rx_queues;
  879. int txr_remaining = adapter->num_tx_queues;
  880. int xdp_remaining = adapter->num_xdp_queues;
  881. int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
  882. int err;
  883. /* only one q_vector if MSI-X is disabled. */
  884. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
  885. q_vectors = 1;
  886. if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
  887. for (; rxr_remaining; v_idx++) {
  888. err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
  889. 0, 0, 0, 0, 1, rxr_idx);
  890. if (err)
  891. goto err_out;
  892. /* update counts and index */
  893. rxr_remaining--;
  894. rxr_idx++;
  895. }
  896. }
  897. for (; v_idx < q_vectors; v_idx++) {
  898. int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
  899. int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
  900. int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx);
  901. err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
  902. tqpv, txr_idx,
  903. xqpv, xdp_idx,
  904. rqpv, rxr_idx);
  905. if (err)
  906. goto err_out;
  907. /* update counts and index */
  908. rxr_remaining -= rqpv;
  909. txr_remaining -= tqpv;
  910. xdp_remaining -= xqpv;
  911. rxr_idx++;
  912. txr_idx++;
  913. xdp_idx += xqpv;
  914. }
  915. return 0;
  916. err_out:
  917. adapter->num_tx_queues = 0;
  918. adapter->num_xdp_queues = 0;
  919. adapter->num_rx_queues = 0;
  920. adapter->num_q_vectors = 0;
  921. while (v_idx--)
  922. ixgbe_free_q_vector(adapter, v_idx);
  923. return -ENOMEM;
  924. }
  925. /**
  926. * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
  927. * @adapter: board private structure to initialize
  928. *
  929. * This function frees the memory allocated to the q_vectors. In addition if
  930. * NAPI is enabled it will delete any references to the NAPI struct prior
  931. * to freeing the q_vector.
  932. **/
  933. static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
  934. {
  935. int v_idx = adapter->num_q_vectors;
  936. adapter->num_tx_queues = 0;
  937. adapter->num_xdp_queues = 0;
  938. adapter->num_rx_queues = 0;
  939. adapter->num_q_vectors = 0;
  940. while (v_idx--)
  941. ixgbe_free_q_vector(adapter, v_idx);
  942. }
  943. static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
  944. {
  945. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  946. adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
  947. pci_disable_msix(adapter->pdev);
  948. kfree(adapter->msix_entries);
  949. adapter->msix_entries = NULL;
  950. } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
  951. adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
  952. pci_disable_msi(adapter->pdev);
  953. }
  954. }
  955. /**
  956. * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
  957. * @adapter: board private structure to initialize
  958. *
  959. * Attempt to configure the interrupts using the best available
  960. * capabilities of the hardware and the kernel.
  961. **/
  962. static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
  963. {
  964. int err;
  965. /* We will try to get MSI-X interrupts first */
  966. if (!ixgbe_acquire_msix_vectors(adapter))
  967. return;
  968. /* At this point, we do not have MSI-X capabilities. We need to
  969. * reconfigure or disable various features which require MSI-X
  970. * capability.
  971. */
  972. /* Disable DCB unless we only have a single traffic class */
  973. if (netdev_get_num_tc(adapter->netdev) > 1) {
  974. e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
  975. netdev_reset_tc(adapter->netdev);
  976. if (adapter->hw.mac.type == ixgbe_mac_82598EB)
  977. adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
  978. adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
  979. adapter->temp_dcb_cfg.pfc_mode_enable = false;
  980. adapter->dcb_cfg.pfc_mode_enable = false;
  981. }
  982. adapter->dcb_cfg.num_tcs.pg_tcs = 1;
  983. adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
  984. /* Disable SR-IOV support */
  985. e_dev_warn("Disabling SR-IOV support\n");
  986. ixgbe_disable_sriov(adapter);
  987. /* Disable RSS */
  988. e_dev_warn("Disabling RSS support\n");
  989. adapter->ring_feature[RING_F_RSS].limit = 1;
  990. /* recalculate number of queues now that many features have been
  991. * changed or disabled.
  992. */
  993. ixgbe_set_num_queues(adapter);
  994. adapter->num_q_vectors = 1;
  995. err = pci_enable_msi(adapter->pdev);
  996. if (err)
  997. e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
  998. err);
  999. else
  1000. adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
  1001. }
  1002. /**
  1003. * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
  1004. * @adapter: board private structure to initialize
  1005. *
  1006. * We determine which interrupt scheme to use based on...
  1007. * - Kernel support (MSI, MSI-X)
  1008. * - which can be user-defined (via MODULE_PARAM)
  1009. * - Hardware queue count (num_*_queues)
  1010. * - defined by miscellaneous hardware support/features (RSS, etc.)
  1011. **/
  1012. int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
  1013. {
  1014. int err;
  1015. /* Number of supported queues */
  1016. ixgbe_set_num_queues(adapter);
  1017. /* Set interrupt mode */
  1018. ixgbe_set_interrupt_capability(adapter);
  1019. err = ixgbe_alloc_q_vectors(adapter);
  1020. if (err) {
  1021. e_dev_err("Unable to allocate memory for queue vectors\n");
  1022. goto err_alloc_q_vectors;
  1023. }
  1024. ixgbe_cache_ring_register(adapter);
  1025. e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n",
  1026. (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
  1027. adapter->num_rx_queues, adapter->num_tx_queues,
  1028. adapter->num_xdp_queues);
  1029. set_bit(__IXGBE_DOWN, &adapter->state);
  1030. return 0;
  1031. err_alloc_q_vectors:
  1032. ixgbe_reset_interrupt_capability(adapter);
  1033. return err;
  1034. }
  1035. /**
  1036. * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
  1037. * @adapter: board private structure to clear interrupt scheme on
  1038. *
  1039. * We go through and clear interrupt specific resources and reset the structure
  1040. * to pre-load conditions
  1041. **/
  1042. void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
  1043. {
  1044. adapter->num_tx_queues = 0;
  1045. adapter->num_xdp_queues = 0;
  1046. adapter->num_rx_queues = 0;
  1047. ixgbe_free_q_vectors(adapter);
  1048. ixgbe_reset_interrupt_capability(adapter);
  1049. }
  1050. void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
  1051. u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
  1052. {
  1053. struct ixgbe_adv_tx_context_desc *context_desc;
  1054. u16 i = tx_ring->next_to_use;
  1055. context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
  1056. i++;
  1057. tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
  1058. /* set bits to identify this as an advanced context descriptor */
  1059. type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
  1060. context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
  1061. context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
  1062. context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
  1063. context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
  1064. }