ixgbe_lib.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 1999 - 2018 Intel Corporation. */
  3. #include "ixgbe.h"
  4. #include "ixgbe_sriov.h"
  5. #ifdef CONFIG_IXGBE_DCB
  6. /**
  7. * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
  8. * @adapter: board private structure to initialize
  9. *
  10. * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It
  11. * will also try to cache the proper offsets if RSS/FCoE are enabled along
  12. * with VMDq.
  13. *
  14. **/
  15. static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
  16. {
  17. #ifdef IXGBE_FCOE
  18. struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
  19. #endif /* IXGBE_FCOE */
  20. struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
  21. int i;
  22. u16 reg_idx, pool;
  23. u8 tcs = adapter->hw_tcs;
  24. /* verify we have DCB queueing enabled before proceeding */
  25. if (tcs <= 1)
  26. return false;
  27. /* verify we have VMDq enabled before proceeding */
  28. if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  29. return false;
  30. /* start at VMDq register offset for SR-IOV enabled setups */
  31. reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
  32. for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
  33. /* If we are greater than indices move to next pool */
  34. if ((reg_idx & ~vmdq->mask) >= tcs) {
  35. pool++;
  36. reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
  37. }
  38. adapter->rx_ring[i]->reg_idx = reg_idx;
  39. adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
  40. }
  41. reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
  42. for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
  43. /* If we are greater than indices move to next pool */
  44. if ((reg_idx & ~vmdq->mask) >= tcs)
  45. reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
  46. adapter->tx_ring[i]->reg_idx = reg_idx;
  47. }
  48. #ifdef IXGBE_FCOE
  49. /* nothing to do if FCoE is disabled */
  50. if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
  51. return true;
  52. /* The work is already done if the FCoE ring is shared */
  53. if (fcoe->offset < tcs)
  54. return true;
  55. /* The FCoE rings exist separately, we need to move their reg_idx */
  56. if (fcoe->indices) {
  57. u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
  58. u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
  59. reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
  60. for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
  61. reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
  62. adapter->rx_ring[i]->reg_idx = reg_idx;
  63. adapter->rx_ring[i]->netdev = adapter->netdev;
  64. reg_idx++;
  65. }
  66. reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
  67. for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
  68. reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
  69. adapter->tx_ring[i]->reg_idx = reg_idx;
  70. reg_idx++;
  71. }
  72. }
  73. #endif /* IXGBE_FCOE */
  74. return true;
  75. }
  76. /* ixgbe_get_first_reg_idx - Return first register index associated with ring */
  77. static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
  78. unsigned int *tx, unsigned int *rx)
  79. {
  80. struct ixgbe_hw *hw = &adapter->hw;
  81. u8 num_tcs = adapter->hw_tcs;
  82. *tx = 0;
  83. *rx = 0;
  84. switch (hw->mac.type) {
  85. case ixgbe_mac_82598EB:
  86. /* TxQs/TC: 4 RxQs/TC: 8 */
  87. *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */
  88. *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
  89. break;
  90. case ixgbe_mac_82599EB:
  91. case ixgbe_mac_X540:
  92. case ixgbe_mac_X550:
  93. case ixgbe_mac_X550EM_x:
  94. case ixgbe_mac_x550em_a:
  95. if (num_tcs > 4) {
  96. /*
  97. * TCs : TC0/1 TC2/3 TC4-7
  98. * TxQs/TC: 32 16 8
  99. * RxQs/TC: 16 16 16
  100. */
  101. *rx = tc << 4;
  102. if (tc < 3)
  103. *tx = tc << 5; /* 0, 32, 64 */
  104. else if (tc < 5)
  105. *tx = (tc + 2) << 4; /* 80, 96 */
  106. else
  107. *tx = (tc + 8) << 3; /* 104, 112, 120 */
  108. } else {
  109. /*
  110. * TCs : TC0 TC1 TC2/3
  111. * TxQs/TC: 64 32 16
  112. * RxQs/TC: 32 32 32
  113. */
  114. *rx = tc << 5;
  115. if (tc < 2)
  116. *tx = tc << 6; /* 0, 64 */
  117. else
  118. *tx = (tc + 4) << 4; /* 96, 112 */
  119. }
  120. default:
  121. break;
  122. }
  123. }
  124. /**
  125. * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
  126. * @adapter: board private structure to initialize
  127. *
  128. * Cache the descriptor ring offsets for DCB to the assigned rings.
  129. *
  130. **/
  131. static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
  132. {
  133. u8 num_tcs = adapter->hw_tcs;
  134. unsigned int tx_idx, rx_idx;
  135. int tc, offset, rss_i, i;
  136. /* verify we have DCB queueing enabled before proceeding */
  137. if (num_tcs <= 1)
  138. return false;
  139. rss_i = adapter->ring_feature[RING_F_RSS].indices;
  140. for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
  141. ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
  142. for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
  143. adapter->tx_ring[offset + i]->reg_idx = tx_idx;
  144. adapter->rx_ring[offset + i]->reg_idx = rx_idx;
  145. adapter->rx_ring[offset + i]->netdev = adapter->netdev;
  146. adapter->tx_ring[offset + i]->dcb_tc = tc;
  147. adapter->rx_ring[offset + i]->dcb_tc = tc;
  148. }
  149. }
  150. return true;
  151. }
  152. #endif
  153. /**
  154. * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
  155. * @adapter: board private structure to initialize
  156. *
  157. * SR-IOV doesn't use any descriptor rings but changes the default if
  158. * no other mapping is used.
  159. *
  160. */
  161. static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
  162. {
  163. #ifdef IXGBE_FCOE
  164. struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
  165. #endif /* IXGBE_FCOE */
  166. struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
  167. struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
  168. u16 reg_idx, pool;
  169. int i;
  170. /* only proceed if VMDq is enabled */
  171. if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
  172. return false;
  173. /* start at VMDq register offset for SR-IOV enabled setups */
  174. pool = 0;
  175. reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
  176. for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
  177. #ifdef IXGBE_FCOE
  178. /* Allow first FCoE queue to be mapped as RSS */
  179. if (fcoe->offset && (i > fcoe->offset))
  180. break;
  181. #endif
  182. /* If we are greater than indices move to next pool */
  183. if ((reg_idx & ~vmdq->mask) >= rss->indices) {
  184. pool++;
  185. reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
  186. }
  187. adapter->rx_ring[i]->reg_idx = reg_idx;
  188. adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
  189. }
  190. #ifdef IXGBE_FCOE
  191. /* FCoE uses a linear block of queues so just assigning 1:1 */
  192. for (; i < adapter->num_rx_queues; i++, reg_idx++) {
  193. adapter->rx_ring[i]->reg_idx = reg_idx;
  194. adapter->rx_ring[i]->netdev = adapter->netdev;
  195. }
  196. #endif
  197. reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
  198. for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
  199. #ifdef IXGBE_FCOE
  200. /* Allow first FCoE queue to be mapped as RSS */
  201. if (fcoe->offset && (i > fcoe->offset))
  202. break;
  203. #endif
  204. /* If we are greater than indices move to next pool */
  205. if ((reg_idx & rss->mask) >= rss->indices)
  206. reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
  207. adapter->tx_ring[i]->reg_idx = reg_idx;
  208. }
  209. #ifdef IXGBE_FCOE
  210. /* FCoE uses a linear block of queues so just assigning 1:1 */
  211. for (; i < adapter->num_tx_queues; i++, reg_idx++)
  212. adapter->tx_ring[i]->reg_idx = reg_idx;
  213. #endif
  214. return true;
  215. }
  216. /**
  217. * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
  218. * @adapter: board private structure to initialize
  219. *
  220. * Cache the descriptor ring offsets for RSS to the assigned rings.
  221. *
  222. **/
  223. static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
  224. {
  225. int i, reg_idx;
  226. for (i = 0; i < adapter->num_rx_queues; i++) {
  227. adapter->rx_ring[i]->reg_idx = i;
  228. adapter->rx_ring[i]->netdev = adapter->netdev;
  229. }
  230. for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
  231. adapter->tx_ring[i]->reg_idx = reg_idx;
  232. for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
  233. adapter->xdp_ring[i]->reg_idx = reg_idx;
  234. return true;
  235. }
  236. /**
  237. * ixgbe_cache_ring_register - Descriptor ring to register mapping
  238. * @adapter: board private structure to initialize
  239. *
  240. * Once we know the feature-set enabled for the device, we'll cache
  241. * the register offset the descriptor ring is assigned to.
  242. *
  243. * Note, the order the various feature calls is important. It must start with
  244. * the "most" features enabled at the same time, then trickle down to the
  245. * least amount of features turned on at once.
  246. **/
  247. static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
  248. {
  249. /* start with default case */
  250. adapter->rx_ring[0]->reg_idx = 0;
  251. adapter->tx_ring[0]->reg_idx = 0;
  252. #ifdef CONFIG_IXGBE_DCB
  253. if (ixgbe_cache_ring_dcb_sriov(adapter))
  254. return;
  255. if (ixgbe_cache_ring_dcb(adapter))
  256. return;
  257. #endif
  258. if (ixgbe_cache_ring_sriov(adapter))
  259. return;
  260. ixgbe_cache_ring_rss(adapter);
  261. }
  262. static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
  263. {
  264. return adapter->xdp_prog ? nr_cpu_ids : 0;
  265. }
  266. #define IXGBE_RSS_64Q_MASK 0x3F
  267. #define IXGBE_RSS_16Q_MASK 0xF
  268. #define IXGBE_RSS_8Q_MASK 0x7
  269. #define IXGBE_RSS_4Q_MASK 0x3
  270. #define IXGBE_RSS_2Q_MASK 0x1
  271. #define IXGBE_RSS_DISABLED_MASK 0x0
  272. #ifdef CONFIG_IXGBE_DCB
  273. /**
  274. * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
  275. * @adapter: board private structure to initialize
  276. *
  277. * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
  278. * and VM pools where appropriate. Also assign queues based on DCB
  279. * priorities and map accordingly..
  280. *
  281. **/
  282. static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
  283. {
  284. int i;
  285. u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
  286. u16 vmdq_m = 0;
  287. #ifdef IXGBE_FCOE
  288. u16 fcoe_i = 0;
  289. #endif
  290. u8 tcs = adapter->hw_tcs;
  291. /* verify we have DCB queueing enabled before proceeding */
  292. if (tcs <= 1)
  293. return false;
  294. /* verify we have VMDq enabled before proceeding */
  295. if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  296. return false;
  297. /* limit VMDq instances on the PF by number of Tx queues */
  298. vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
  299. /* Add starting offset to total pool count */
  300. vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
  301. /* 16 pools w/ 8 TC per pool */
  302. if (tcs > 4) {
  303. vmdq_i = min_t(u16, vmdq_i, 16);
  304. vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
  305. /* 32 pools w/ 4 TC per pool */
  306. } else {
  307. vmdq_i = min_t(u16, vmdq_i, 32);
  308. vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
  309. }
  310. #ifdef IXGBE_FCOE
  311. /* queues in the remaining pools are available for FCoE */
  312. fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
  313. #endif
  314. /* remove the starting offset from the pool count */
  315. vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
  316. /* save features for later use */
  317. adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
  318. adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
  319. /*
  320. * We do not support DCB, VMDq, and RSS all simultaneously
  321. * so we will disable RSS since it is the lowest priority
  322. */
  323. adapter->ring_feature[RING_F_RSS].indices = 1;
  324. adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
  325. /* disable ATR as it is not supported when VMDq is enabled */
  326. adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
  327. adapter->num_rx_pools = vmdq_i;
  328. adapter->num_rx_queues_per_pool = tcs;
  329. adapter->num_tx_queues = vmdq_i * tcs;
  330. adapter->num_xdp_queues = 0;
  331. adapter->num_rx_queues = vmdq_i * tcs;
  332. #ifdef IXGBE_FCOE
  333. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
  334. struct ixgbe_ring_feature *fcoe;
  335. fcoe = &adapter->ring_feature[RING_F_FCOE];
  336. /* limit ourselves based on feature limits */
  337. fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
  338. if (fcoe_i) {
  339. /* alloc queues for FCoE separately */
  340. fcoe->indices = fcoe_i;
  341. fcoe->offset = vmdq_i * tcs;
  342. /* add queues to adapter */
  343. adapter->num_tx_queues += fcoe_i;
  344. adapter->num_rx_queues += fcoe_i;
  345. } else if (tcs > 1) {
  346. /* use queue belonging to FcoE TC */
  347. fcoe->indices = 1;
  348. fcoe->offset = ixgbe_fcoe_get_tc(adapter);
  349. } else {
  350. adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
  351. fcoe->indices = 0;
  352. fcoe->offset = 0;
  353. }
  354. }
  355. #endif /* IXGBE_FCOE */
  356. /* configure TC to queue mapping */
  357. for (i = 0; i < tcs; i++)
  358. netdev_set_tc_queue(adapter->netdev, i, 1, i);
  359. return true;
  360. }
  361. static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
  362. {
  363. struct net_device *dev = adapter->netdev;
  364. struct ixgbe_ring_feature *f;
  365. int rss_i, rss_m, i;
  366. int tcs;
  367. /* Map queue offset and counts onto allocated tx queues */
  368. tcs = adapter->hw_tcs;
  369. /* verify we have DCB queueing enabled before proceeding */
  370. if (tcs <= 1)
  371. return false;
  372. /* determine the upper limit for our current DCB mode */
  373. rss_i = dev->num_tx_queues / tcs;
  374. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  375. /* 8 TC w/ 4 queues per TC */
  376. rss_i = min_t(u16, rss_i, 4);
  377. rss_m = IXGBE_RSS_4Q_MASK;
  378. } else if (tcs > 4) {
  379. /* 8 TC w/ 8 queues per TC */
  380. rss_i = min_t(u16, rss_i, 8);
  381. rss_m = IXGBE_RSS_8Q_MASK;
  382. } else {
  383. /* 4 TC w/ 16 queues per TC */
  384. rss_i = min_t(u16, rss_i, 16);
  385. rss_m = IXGBE_RSS_16Q_MASK;
  386. }
  387. /* set RSS mask and indices */
  388. f = &adapter->ring_feature[RING_F_RSS];
  389. rss_i = min_t(int, rss_i, f->limit);
  390. f->indices = rss_i;
  391. f->mask = rss_m;
  392. /* disable ATR as it is not supported when multiple TCs are enabled */
  393. adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
  394. #ifdef IXGBE_FCOE
  395. /* FCoE enabled queues require special configuration indexed
  396. * by feature specific indices and offset. Here we map FCoE
  397. * indices onto the DCB queue pairs allowing FCoE to own
  398. * configuration later.
  399. */
  400. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
  401. u8 tc = ixgbe_fcoe_get_tc(adapter);
  402. f = &adapter->ring_feature[RING_F_FCOE];
  403. f->indices = min_t(u16, rss_i, f->limit);
  404. f->offset = rss_i * tc;
  405. }
  406. #endif /* IXGBE_FCOE */
  407. for (i = 0; i < tcs; i++)
  408. netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
  409. adapter->num_tx_queues = rss_i * tcs;
  410. adapter->num_xdp_queues = 0;
  411. adapter->num_rx_queues = rss_i * tcs;
  412. return true;
  413. }
  414. #endif
  415. /**
  416. * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
  417. * @adapter: board private structure to initialize
  418. *
  419. * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
  420. * and VM pools where appropriate. If RSS is available, then also try and
  421. * enable RSS and map accordingly.
  422. *
  423. **/
  424. static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
  425. {
  426. u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
  427. u16 vmdq_m = 0;
  428. u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
  429. u16 rss_m = IXGBE_RSS_DISABLED_MASK;
  430. #ifdef IXGBE_FCOE
  431. u16 fcoe_i = 0;
  432. #endif
  433. /* only proceed if SR-IOV is enabled */
  434. if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  435. return false;
  436. /* limit l2fwd RSS based on total Tx queue limit */
  437. rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
  438. /* Add starting offset to total pool count */
  439. vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
  440. /* double check we are limited to maximum pools */
  441. vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
  442. /* 64 pool mode with 2 queues per pool */
  443. if (vmdq_i > 32) {
  444. vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
  445. rss_m = IXGBE_RSS_2Q_MASK;
  446. rss_i = min_t(u16, rss_i, 2);
  447. /* 32 pool mode with up to 4 queues per pool */
  448. } else {
  449. vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
  450. rss_m = IXGBE_RSS_4Q_MASK;
  451. /* We can support 4, 2, or 1 queues */
  452. rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
  453. }
  454. #ifdef IXGBE_FCOE
  455. /* queues in the remaining pools are available for FCoE */
  456. fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
  457. #endif
  458. /* remove the starting offset from the pool count */
  459. vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
  460. /* save features for later use */
  461. adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
  462. adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
  463. /* limit RSS based on user input and save for later use */
  464. adapter->ring_feature[RING_F_RSS].indices = rss_i;
  465. adapter->ring_feature[RING_F_RSS].mask = rss_m;
  466. adapter->num_rx_pools = vmdq_i;
  467. adapter->num_rx_queues_per_pool = rss_i;
  468. adapter->num_rx_queues = vmdq_i * rss_i;
  469. adapter->num_tx_queues = vmdq_i * rss_i;
  470. adapter->num_xdp_queues = 0;
  471. /* disable ATR as it is not supported when VMDq is enabled */
  472. adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
  473. #ifdef IXGBE_FCOE
  474. /*
  475. * FCoE can use rings from adjacent buffers to allow RSS
  476. * like behavior. To account for this we need to add the
  477. * FCoE indices to the total ring count.
  478. */
  479. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
  480. struct ixgbe_ring_feature *fcoe;
  481. fcoe = &adapter->ring_feature[RING_F_FCOE];
  482. /* limit ourselves based on feature limits */
  483. fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
  484. if (vmdq_i > 1 && fcoe_i) {
  485. /* alloc queues for FCoE separately */
  486. fcoe->indices = fcoe_i;
  487. fcoe->offset = vmdq_i * rss_i;
  488. } else {
  489. /* merge FCoE queues with RSS queues */
  490. fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
  491. /* limit indices to rss_i if MSI-X is disabled */
  492. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
  493. fcoe_i = rss_i;
  494. /* attempt to reserve some queues for just FCoE */
  495. fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
  496. fcoe->offset = fcoe_i - fcoe->indices;
  497. fcoe_i -= rss_i;
  498. }
  499. /* add queues to adapter */
  500. adapter->num_tx_queues += fcoe_i;
  501. adapter->num_rx_queues += fcoe_i;
  502. }
  503. #endif
  504. /* To support macvlan offload we have to use num_tc to
  505. * restrict the queues that can be used by the device.
  506. * By doing this we can avoid reporting a false number of
  507. * queues.
  508. */
  509. if (vmdq_i > 1)
  510. netdev_set_num_tc(adapter->netdev, 1);
  511. /* populate TC0 for use by pool 0 */
  512. netdev_set_tc_queue(adapter->netdev, 0,
  513. adapter->num_rx_queues_per_pool, 0);
  514. return true;
  515. }
  516. /**
  517. * ixgbe_set_rss_queues - Allocate queues for RSS
  518. * @adapter: board private structure to initialize
  519. *
  520. * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
  521. * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
  522. *
  523. **/
  524. static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
  525. {
  526. struct ixgbe_hw *hw = &adapter->hw;
  527. struct ixgbe_ring_feature *f;
  528. u16 rss_i;
  529. /* set mask for 16 queue limit of RSS */
  530. f = &adapter->ring_feature[RING_F_RSS];
  531. rss_i = f->limit;
  532. f->indices = rss_i;
  533. if (hw->mac.type < ixgbe_mac_X550)
  534. f->mask = IXGBE_RSS_16Q_MASK;
  535. else
  536. f->mask = IXGBE_RSS_64Q_MASK;
  537. /* disable ATR by default, it will be configured below */
  538. adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
  539. /*
  540. * Use Flow Director in addition to RSS to ensure the best
  541. * distribution of flows across cores, even when an FDIR flow
  542. * isn't matched.
  543. */
  544. if (rss_i > 1 && adapter->atr_sample_rate) {
  545. f = &adapter->ring_feature[RING_F_FDIR];
  546. rss_i = f->indices = f->limit;
  547. if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
  548. adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
  549. }
  550. #ifdef IXGBE_FCOE
  551. /*
  552. * FCoE can exist on the same rings as standard network traffic
  553. * however it is preferred to avoid that if possible. In order
  554. * to get the best performance we allocate as many FCoE queues
  555. * as we can and we place them at the end of the ring array to
  556. * avoid sharing queues with standard RSS on systems with 24 or
  557. * more CPUs.
  558. */
  559. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
  560. struct net_device *dev = adapter->netdev;
  561. u16 fcoe_i;
  562. f = &adapter->ring_feature[RING_F_FCOE];
  563. /* merge FCoE queues with RSS queues */
  564. fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
  565. fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
  566. /* limit indices to rss_i if MSI-X is disabled */
  567. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
  568. fcoe_i = rss_i;
  569. /* attempt to reserve some queues for just FCoE */
  570. f->indices = min_t(u16, fcoe_i, f->limit);
  571. f->offset = fcoe_i - f->indices;
  572. rss_i = max_t(u16, fcoe_i, rss_i);
  573. }
  574. #endif /* IXGBE_FCOE */
  575. adapter->num_rx_queues = rss_i;
  576. adapter->num_tx_queues = rss_i;
  577. adapter->num_xdp_queues = ixgbe_xdp_queues(adapter);
  578. return true;
  579. }
  580. /**
  581. * ixgbe_set_num_queues - Allocate queues for device, feature dependent
  582. * @adapter: board private structure to initialize
  583. *
  584. * This is the top level queue allocation routine. The order here is very
  585. * important, starting with the "most" number of features turned on at once,
  586. * and ending with the smallest set of features. This way large combinations
  587. * can be allocated if they're turned on, and smaller combinations are the
  588. * fallthrough conditions.
  589. *
  590. **/
  591. static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
  592. {
  593. /* Start with base case */
  594. adapter->num_rx_queues = 1;
  595. adapter->num_tx_queues = 1;
  596. adapter->num_xdp_queues = 0;
  597. adapter->num_rx_pools = 1;
  598. adapter->num_rx_queues_per_pool = 1;
  599. #ifdef CONFIG_IXGBE_DCB
  600. if (ixgbe_set_dcb_sriov_queues(adapter))
  601. return;
  602. if (ixgbe_set_dcb_queues(adapter))
  603. return;
  604. #endif
  605. if (ixgbe_set_sriov_queues(adapter))
  606. return;
  607. ixgbe_set_rss_queues(adapter);
  608. }
  609. /**
  610. * ixgbe_acquire_msix_vectors - acquire MSI-X vectors
  611. * @adapter: board private structure
  612. *
  613. * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
  614. * return a negative error code if unable to acquire MSI-X vectors for any
  615. * reason.
  616. */
  617. static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
  618. {
  619. struct ixgbe_hw *hw = &adapter->hw;
  620. int i, vectors, vector_threshold;
  621. /* We start by asking for one vector per queue pair with XDP queues
  622. * being stacked with TX queues.
  623. */
  624. vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
  625. vectors = max(vectors, adapter->num_xdp_queues);
  626. /* It is easy to be greedy for MSI-X vectors. However, it really
  627. * doesn't do much good if we have a lot more vectors than CPUs. We'll
  628. * be somewhat conservative and only ask for (roughly) the same number
  629. * of vectors as there are CPUs.
  630. */
  631. vectors = min_t(int, vectors, num_online_cpus());
  632. /* Some vectors are necessary for non-queue interrupts */
  633. vectors += NON_Q_VECTORS;
  634. /* Hardware can only support a maximum of hw.mac->max_msix_vectors.
  635. * With features such as RSS and VMDq, we can easily surpass the
  636. * number of Rx and Tx descriptor queues supported by our device.
  637. * Thus, we cap the maximum in the rare cases where the CPU count also
  638. * exceeds our vector limit
  639. */
  640. vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
  641. /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0]
  642. * handler, and (2) an Other (Link Status Change, etc.) handler.
  643. */
  644. vector_threshold = MIN_MSIX_COUNT;
  645. adapter->msix_entries = kcalloc(vectors,
  646. sizeof(struct msix_entry),
  647. GFP_KERNEL);
  648. if (!adapter->msix_entries)
  649. return -ENOMEM;
  650. for (i = 0; i < vectors; i++)
  651. adapter->msix_entries[i].entry = i;
  652. vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
  653. vector_threshold, vectors);
  654. if (vectors < 0) {
  655. /* A negative count of allocated vectors indicates an error in
  656. * acquiring within the specified range of MSI-X vectors
  657. */
  658. e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
  659. vectors);
  660. adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
  661. kfree(adapter->msix_entries);
  662. adapter->msix_entries = NULL;
  663. return vectors;
  664. }
  665. /* we successfully allocated some number of vectors within our
  666. * requested range.
  667. */
  668. adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
  669. /* Adjust for only the vectors we'll use, which is minimum
  670. * of max_q_vectors, or the number of vectors we were allocated.
  671. */
  672. vectors -= NON_Q_VECTORS;
  673. adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
  674. return 0;
  675. }
  676. static void ixgbe_add_ring(struct ixgbe_ring *ring,
  677. struct ixgbe_ring_container *head)
  678. {
  679. ring->next = head->ring;
  680. head->ring = ring;
  681. head->count++;
  682. head->next_update = jiffies + 1;
  683. }
  684. /**
  685. * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
  686. * @adapter: board private structure to initialize
  687. * @v_count: q_vectors allocated on adapter, used for ring interleaving
  688. * @v_idx: index of vector in adapter struct
  689. * @txr_count: total number of Tx rings to allocate
  690. * @txr_idx: index of first Tx ring to allocate
  691. * @xdp_count: total number of XDP rings to allocate
  692. * @xdp_idx: index of first XDP ring to allocate
  693. * @rxr_count: total number of Rx rings to allocate
  694. * @rxr_idx: index of first Rx ring to allocate
  695. *
  696. * We allocate one q_vector. If allocation fails we return -ENOMEM.
  697. **/
  698. static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
  699. int v_count, int v_idx,
  700. int txr_count, int txr_idx,
  701. int xdp_count, int xdp_idx,
  702. int rxr_count, int rxr_idx)
  703. {
  704. struct ixgbe_q_vector *q_vector;
  705. struct ixgbe_ring *ring;
  706. int node = NUMA_NO_NODE;
  707. int cpu = -1;
  708. int ring_count, size;
  709. u8 tcs = adapter->hw_tcs;
  710. ring_count = txr_count + rxr_count + xdp_count;
  711. size = sizeof(struct ixgbe_q_vector) +
  712. (sizeof(struct ixgbe_ring) * ring_count);
  713. /* customize cpu for Flow Director mapping */
  714. if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
  715. u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
  716. if (rss_i > 1 && adapter->atr_sample_rate) {
  717. if (cpu_online(v_idx)) {
  718. cpu = v_idx;
  719. node = cpu_to_node(cpu);
  720. }
  721. }
  722. }
  723. /* allocate q_vector and rings */
  724. q_vector = kzalloc_node(size, GFP_KERNEL, node);
  725. if (!q_vector)
  726. q_vector = kzalloc(size, GFP_KERNEL);
  727. if (!q_vector)
  728. return -ENOMEM;
  729. /* setup affinity mask and node */
  730. if (cpu != -1)
  731. cpumask_set_cpu(cpu, &q_vector->affinity_mask);
  732. q_vector->numa_node = node;
  733. #ifdef CONFIG_IXGBE_DCA
  734. /* initialize CPU for DCA */
  735. q_vector->cpu = -1;
  736. #endif
  737. /* initialize NAPI */
  738. netif_napi_add(adapter->netdev, &q_vector->napi,
  739. ixgbe_poll, 64);
  740. /* tie q_vector and adapter together */
  741. adapter->q_vector[v_idx] = q_vector;
  742. q_vector->adapter = adapter;
  743. q_vector->v_idx = v_idx;
  744. /* initialize work limits */
  745. q_vector->tx.work_limit = adapter->tx_work_limit;
  746. /* Initialize setting for adaptive ITR */
  747. q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
  748. IXGBE_ITR_ADAPTIVE_LATENCY;
  749. q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
  750. IXGBE_ITR_ADAPTIVE_LATENCY;
  751. /* intialize ITR */
  752. if (txr_count && !rxr_count) {
  753. /* tx only vector */
  754. if (adapter->tx_itr_setting == 1)
  755. q_vector->itr = IXGBE_12K_ITR;
  756. else
  757. q_vector->itr = adapter->tx_itr_setting;
  758. } else {
  759. /* rx or rx/tx vector */
  760. if (adapter->rx_itr_setting == 1)
  761. q_vector->itr = IXGBE_20K_ITR;
  762. else
  763. q_vector->itr = adapter->rx_itr_setting;
  764. }
  765. /* initialize pointer to rings */
  766. ring = q_vector->ring;
  767. while (txr_count) {
  768. /* assign generic ring traits */
  769. ring->dev = &adapter->pdev->dev;
  770. ring->netdev = adapter->netdev;
  771. /* configure backlink on ring */
  772. ring->q_vector = q_vector;
  773. /* update q_vector Tx values */
  774. ixgbe_add_ring(ring, &q_vector->tx);
  775. /* apply Tx specific ring traits */
  776. ring->count = adapter->tx_ring_count;
  777. ring->queue_index = txr_idx;
  778. /* assign ring to adapter */
  779. adapter->tx_ring[txr_idx] = ring;
  780. /* update count and index */
  781. txr_count--;
  782. txr_idx += v_count;
  783. /* push pointer to next ring */
  784. ring++;
  785. }
  786. while (xdp_count) {
  787. /* assign generic ring traits */
  788. ring->dev = &adapter->pdev->dev;
  789. ring->netdev = adapter->netdev;
  790. /* configure backlink on ring */
  791. ring->q_vector = q_vector;
  792. /* update q_vector Tx values */
  793. ixgbe_add_ring(ring, &q_vector->tx);
  794. /* apply Tx specific ring traits */
  795. ring->count = adapter->tx_ring_count;
  796. ring->queue_index = xdp_idx;
  797. set_ring_xdp(ring);
  798. /* assign ring to adapter */
  799. adapter->xdp_ring[xdp_idx] = ring;
  800. /* update count and index */
  801. xdp_count--;
  802. xdp_idx++;
  803. /* push pointer to next ring */
  804. ring++;
  805. }
  806. while (rxr_count) {
  807. /* assign generic ring traits */
  808. ring->dev = &adapter->pdev->dev;
  809. ring->netdev = adapter->netdev;
  810. /* configure backlink on ring */
  811. ring->q_vector = q_vector;
  812. /* update q_vector Rx values */
  813. ixgbe_add_ring(ring, &q_vector->rx);
  814. /*
  815. * 82599 errata, UDP frames with a 0 checksum
  816. * can be marked as checksum errors.
  817. */
  818. if (adapter->hw.mac.type == ixgbe_mac_82599EB)
  819. set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
  820. #ifdef IXGBE_FCOE
  821. if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
  822. struct ixgbe_ring_feature *f;
  823. f = &adapter->ring_feature[RING_F_FCOE];
  824. if ((rxr_idx >= f->offset) &&
  825. (rxr_idx < f->offset + f->indices))
  826. set_bit(__IXGBE_RX_FCOE, &ring->state);
  827. }
  828. #endif /* IXGBE_FCOE */
  829. /* apply Rx specific ring traits */
  830. ring->count = adapter->rx_ring_count;
  831. ring->queue_index = rxr_idx;
  832. /* assign ring to adapter */
  833. adapter->rx_ring[rxr_idx] = ring;
  834. /* update count and index */
  835. rxr_count--;
  836. rxr_idx += v_count;
  837. /* push pointer to next ring */
  838. ring++;
  839. }
  840. return 0;
  841. }
  842. /**
  843. * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
  844. * @adapter: board private structure to initialize
  845. * @v_idx: Index of vector to be freed
  846. *
  847. * This function frees the memory allocated to the q_vector. In addition if
  848. * NAPI is enabled it will delete any references to the NAPI struct prior
  849. * to freeing the q_vector.
  850. **/
  851. static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
  852. {
  853. struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
  854. struct ixgbe_ring *ring;
  855. ixgbe_for_each_ring(ring, q_vector->tx) {
  856. if (ring_is_xdp(ring))
  857. adapter->xdp_ring[ring->queue_index] = NULL;
  858. else
  859. adapter->tx_ring[ring->queue_index] = NULL;
  860. }
  861. ixgbe_for_each_ring(ring, q_vector->rx)
  862. adapter->rx_ring[ring->queue_index] = NULL;
  863. adapter->q_vector[v_idx] = NULL;
  864. napi_hash_del(&q_vector->napi);
  865. netif_napi_del(&q_vector->napi);
  866. /*
  867. * ixgbe_get_stats64() might access the rings on this vector,
  868. * we must wait a grace period before freeing it.
  869. */
  870. kfree_rcu(q_vector, rcu);
  871. }
  872. /**
  873. * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
  874. * @adapter: board private structure to initialize
  875. *
  876. * We allocate one q_vector per queue interrupt. If allocation fails we
  877. * return -ENOMEM.
  878. **/
  879. static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
  880. {
  881. int q_vectors = adapter->num_q_vectors;
  882. int rxr_remaining = adapter->num_rx_queues;
  883. int txr_remaining = adapter->num_tx_queues;
  884. int xdp_remaining = adapter->num_xdp_queues;
  885. int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
  886. int err;
  887. /* only one q_vector if MSI-X is disabled. */
  888. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
  889. q_vectors = 1;
  890. if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
  891. for (; rxr_remaining; v_idx++) {
  892. err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
  893. 0, 0, 0, 0, 1, rxr_idx);
  894. if (err)
  895. goto err_out;
  896. /* update counts and index */
  897. rxr_remaining--;
  898. rxr_idx++;
  899. }
  900. }
  901. for (; v_idx < q_vectors; v_idx++) {
  902. int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
  903. int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
  904. int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx);
  905. err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
  906. tqpv, txr_idx,
  907. xqpv, xdp_idx,
  908. rqpv, rxr_idx);
  909. if (err)
  910. goto err_out;
  911. /* update counts and index */
  912. rxr_remaining -= rqpv;
  913. txr_remaining -= tqpv;
  914. xdp_remaining -= xqpv;
  915. rxr_idx++;
  916. txr_idx++;
  917. xdp_idx += xqpv;
  918. }
  919. return 0;
  920. err_out:
  921. adapter->num_tx_queues = 0;
  922. adapter->num_xdp_queues = 0;
  923. adapter->num_rx_queues = 0;
  924. adapter->num_q_vectors = 0;
  925. while (v_idx--)
  926. ixgbe_free_q_vector(adapter, v_idx);
  927. return -ENOMEM;
  928. }
  929. /**
  930. * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
  931. * @adapter: board private structure to initialize
  932. *
  933. * This function frees the memory allocated to the q_vectors. In addition if
  934. * NAPI is enabled it will delete any references to the NAPI struct prior
  935. * to freeing the q_vector.
  936. **/
  937. static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
  938. {
  939. int v_idx = adapter->num_q_vectors;
  940. adapter->num_tx_queues = 0;
  941. adapter->num_xdp_queues = 0;
  942. adapter->num_rx_queues = 0;
  943. adapter->num_q_vectors = 0;
  944. while (v_idx--)
  945. ixgbe_free_q_vector(adapter, v_idx);
  946. }
  947. static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
  948. {
  949. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  950. adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
  951. pci_disable_msix(adapter->pdev);
  952. kfree(adapter->msix_entries);
  953. adapter->msix_entries = NULL;
  954. } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
  955. adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
  956. pci_disable_msi(adapter->pdev);
  957. }
  958. }
  959. /**
  960. * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
  961. * @adapter: board private structure to initialize
  962. *
  963. * Attempt to configure the interrupts using the best available
  964. * capabilities of the hardware and the kernel.
  965. **/
  966. static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
  967. {
  968. int err;
  969. /* We will try to get MSI-X interrupts first */
  970. if (!ixgbe_acquire_msix_vectors(adapter))
  971. return;
  972. /* At this point, we do not have MSI-X capabilities. We need to
  973. * reconfigure or disable various features which require MSI-X
  974. * capability.
  975. */
  976. /* Disable DCB unless we only have a single traffic class */
  977. if (adapter->hw_tcs > 1) {
  978. e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
  979. netdev_reset_tc(adapter->netdev);
  980. if (adapter->hw.mac.type == ixgbe_mac_82598EB)
  981. adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
  982. adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
  983. adapter->temp_dcb_cfg.pfc_mode_enable = false;
  984. adapter->dcb_cfg.pfc_mode_enable = false;
  985. }
  986. adapter->hw_tcs = 0;
  987. adapter->dcb_cfg.num_tcs.pg_tcs = 1;
  988. adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
  989. /* Disable SR-IOV support */
  990. e_dev_warn("Disabling SR-IOV support\n");
  991. ixgbe_disable_sriov(adapter);
  992. /* Disable RSS */
  993. e_dev_warn("Disabling RSS support\n");
  994. adapter->ring_feature[RING_F_RSS].limit = 1;
  995. /* recalculate number of queues now that many features have been
  996. * changed or disabled.
  997. */
  998. ixgbe_set_num_queues(adapter);
  999. adapter->num_q_vectors = 1;
  1000. err = pci_enable_msi(adapter->pdev);
  1001. if (err)
  1002. e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
  1003. err);
  1004. else
  1005. adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
  1006. }
  1007. /**
  1008. * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
  1009. * @adapter: board private structure to initialize
  1010. *
  1011. * We determine which interrupt scheme to use based on...
  1012. * - Kernel support (MSI, MSI-X)
  1013. * - which can be user-defined (via MODULE_PARAM)
  1014. * - Hardware queue count (num_*_queues)
  1015. * - defined by miscellaneous hardware support/features (RSS, etc.)
  1016. **/
  1017. int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
  1018. {
  1019. int err;
  1020. /* Number of supported queues */
  1021. ixgbe_set_num_queues(adapter);
  1022. /* Set interrupt mode */
  1023. ixgbe_set_interrupt_capability(adapter);
  1024. err = ixgbe_alloc_q_vectors(adapter);
  1025. if (err) {
  1026. e_dev_err("Unable to allocate memory for queue vectors\n");
  1027. goto err_alloc_q_vectors;
  1028. }
  1029. ixgbe_cache_ring_register(adapter);
  1030. e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n",
  1031. (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
  1032. adapter->num_rx_queues, adapter->num_tx_queues,
  1033. adapter->num_xdp_queues);
  1034. set_bit(__IXGBE_DOWN, &adapter->state);
  1035. return 0;
  1036. err_alloc_q_vectors:
  1037. ixgbe_reset_interrupt_capability(adapter);
  1038. return err;
  1039. }
  1040. /**
  1041. * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
  1042. * @adapter: board private structure to clear interrupt scheme on
  1043. *
  1044. * We go through and clear interrupt specific resources and reset the structure
  1045. * to pre-load conditions
  1046. **/
  1047. void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
  1048. {
  1049. adapter->num_tx_queues = 0;
  1050. adapter->num_xdp_queues = 0;
  1051. adapter->num_rx_queues = 0;
  1052. ixgbe_free_q_vectors(adapter);
  1053. ixgbe_reset_interrupt_capability(adapter);
  1054. }
  1055. void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
  1056. u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx)
  1057. {
  1058. struct ixgbe_adv_tx_context_desc *context_desc;
  1059. u16 i = tx_ring->next_to_use;
  1060. context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
  1061. i++;
  1062. tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
  1063. /* set bits to identify this as an advanced context descriptor */
  1064. type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
  1065. context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
  1066. context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
  1067. context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
  1068. context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
  1069. }