ixgbe_lib.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 1999 - 2018 Intel Corporation. */
  3. #include "ixgbe.h"
  4. #include "ixgbe_sriov.h"
  5. #ifdef CONFIG_IXGBE_DCB
  6. /**
  7. * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
  8. * @adapter: board private structure to initialize
  9. *
  10. * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It
  11. * will also try to cache the proper offsets if RSS/FCoE are enabled along
  12. * with VMDq.
  13. *
  14. **/
  15. static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
  16. {
  17. #ifdef IXGBE_FCOE
  18. struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
  19. #endif /* IXGBE_FCOE */
  20. struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
  21. int i;
  22. u16 reg_idx, pool;
  23. u8 tcs = adapter->hw_tcs;
  24. /* verify we have DCB queueing enabled before proceeding */
  25. if (tcs <= 1)
  26. return false;
  27. /* verify we have VMDq enabled before proceeding */
  28. if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  29. return false;
  30. /* start at VMDq register offset for SR-IOV enabled setups */
  31. reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
  32. for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
  33. /* If we are greater than indices move to next pool */
  34. if ((reg_idx & ~vmdq->mask) >= tcs) {
  35. pool++;
  36. reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
  37. }
  38. adapter->rx_ring[i]->reg_idx = reg_idx;
  39. adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
  40. }
  41. reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
  42. for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
  43. /* If we are greater than indices move to next pool */
  44. if ((reg_idx & ~vmdq->mask) >= tcs)
  45. reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
  46. adapter->tx_ring[i]->reg_idx = reg_idx;
  47. }
  48. #ifdef IXGBE_FCOE
  49. /* nothing to do if FCoE is disabled */
  50. if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
  51. return true;
  52. /* The work is already done if the FCoE ring is shared */
  53. if (fcoe->offset < tcs)
  54. return true;
  55. /* The FCoE rings exist separately, we need to move their reg_idx */
  56. if (fcoe->indices) {
  57. u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
  58. u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
  59. reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
  60. for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
  61. reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
  62. adapter->rx_ring[i]->reg_idx = reg_idx;
  63. adapter->rx_ring[i]->netdev = adapter->netdev;
  64. reg_idx++;
  65. }
  66. reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
  67. for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
  68. reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
  69. adapter->tx_ring[i]->reg_idx = reg_idx;
  70. reg_idx++;
  71. }
  72. }
  73. #endif /* IXGBE_FCOE */
  74. return true;
  75. }
  76. /* ixgbe_get_first_reg_idx - Return first register index associated with ring */
  77. static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
  78. unsigned int *tx, unsigned int *rx)
  79. {
  80. struct ixgbe_hw *hw = &adapter->hw;
  81. u8 num_tcs = adapter->hw_tcs;
  82. *tx = 0;
  83. *rx = 0;
  84. switch (hw->mac.type) {
  85. case ixgbe_mac_82598EB:
  86. /* TxQs/TC: 4 RxQs/TC: 8 */
  87. *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */
  88. *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
  89. break;
  90. case ixgbe_mac_82599EB:
  91. case ixgbe_mac_X540:
  92. case ixgbe_mac_X550:
  93. case ixgbe_mac_X550EM_x:
  94. case ixgbe_mac_x550em_a:
  95. if (num_tcs > 4) {
  96. /*
  97. * TCs : TC0/1 TC2/3 TC4-7
  98. * TxQs/TC: 32 16 8
  99. * RxQs/TC: 16 16 16
  100. */
  101. *rx = tc << 4;
  102. if (tc < 3)
  103. *tx = tc << 5; /* 0, 32, 64 */
  104. else if (tc < 5)
  105. *tx = (tc + 2) << 4; /* 80, 96 */
  106. else
  107. *tx = (tc + 8) << 3; /* 104, 112, 120 */
  108. } else {
  109. /*
  110. * TCs : TC0 TC1 TC2/3
  111. * TxQs/TC: 64 32 16
  112. * RxQs/TC: 32 32 32
  113. */
  114. *rx = tc << 5;
  115. if (tc < 2)
  116. *tx = tc << 6; /* 0, 64 */
  117. else
  118. *tx = (tc + 4) << 4; /* 96, 112 */
  119. }
  120. default:
  121. break;
  122. }
  123. }
  124. /**
  125. * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
  126. * @adapter: board private structure to initialize
  127. *
  128. * Cache the descriptor ring offsets for DCB to the assigned rings.
  129. *
  130. **/
  131. static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
  132. {
  133. u8 num_tcs = adapter->hw_tcs;
  134. unsigned int tx_idx, rx_idx;
  135. int tc, offset, rss_i, i;
  136. /* verify we have DCB queueing enabled before proceeding */
  137. if (num_tcs <= 1)
  138. return false;
  139. rss_i = adapter->ring_feature[RING_F_RSS].indices;
  140. for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
  141. ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
  142. for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
  143. adapter->tx_ring[offset + i]->reg_idx = tx_idx;
  144. adapter->rx_ring[offset + i]->reg_idx = rx_idx;
  145. adapter->rx_ring[offset + i]->netdev = adapter->netdev;
  146. adapter->tx_ring[offset + i]->dcb_tc = tc;
  147. adapter->rx_ring[offset + i]->dcb_tc = tc;
  148. }
  149. }
  150. return true;
  151. }
  152. #endif
  153. /**
  154. * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
  155. * @adapter: board private structure to initialize
  156. *
  157. * SR-IOV doesn't use any descriptor rings but changes the default if
  158. * no other mapping is used.
  159. *
  160. */
  161. static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
  162. {
  163. #ifdef IXGBE_FCOE
  164. struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
  165. #endif /* IXGBE_FCOE */
  166. struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
  167. struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
  168. u16 reg_idx, pool;
  169. int i;
  170. /* only proceed if VMDq is enabled */
  171. if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
  172. return false;
  173. /* start at VMDq register offset for SR-IOV enabled setups */
  174. pool = 0;
  175. reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
  176. for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
  177. #ifdef IXGBE_FCOE
  178. /* Allow first FCoE queue to be mapped as RSS */
  179. if (fcoe->offset && (i > fcoe->offset))
  180. break;
  181. #endif
  182. /* If we are greater than indices move to next pool */
  183. if ((reg_idx & ~vmdq->mask) >= rss->indices) {
  184. pool++;
  185. reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
  186. }
  187. adapter->rx_ring[i]->reg_idx = reg_idx;
  188. adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
  189. }
  190. #ifdef IXGBE_FCOE
  191. /* FCoE uses a linear block of queues so just assigning 1:1 */
  192. for (; i < adapter->num_rx_queues; i++, reg_idx++) {
  193. adapter->rx_ring[i]->reg_idx = reg_idx;
  194. adapter->rx_ring[i]->netdev = adapter->netdev;
  195. }
  196. #endif
  197. reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
  198. for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
  199. #ifdef IXGBE_FCOE
  200. /* Allow first FCoE queue to be mapped as RSS */
  201. if (fcoe->offset && (i > fcoe->offset))
  202. break;
  203. #endif
  204. /* If we are greater than indices move to next pool */
  205. if ((reg_idx & rss->mask) >= rss->indices)
  206. reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
  207. adapter->tx_ring[i]->reg_idx = reg_idx;
  208. }
  209. #ifdef IXGBE_FCOE
  210. /* FCoE uses a linear block of queues so just assigning 1:1 */
  211. for (; i < adapter->num_tx_queues; i++, reg_idx++)
  212. adapter->tx_ring[i]->reg_idx = reg_idx;
  213. #endif
  214. return true;
  215. }
  216. /**
  217. * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
  218. * @adapter: board private structure to initialize
  219. *
  220. * Cache the descriptor ring offsets for RSS to the assigned rings.
  221. *
  222. **/
  223. static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
  224. {
  225. int i, reg_idx;
  226. for (i = 0; i < adapter->num_rx_queues; i++) {
  227. adapter->rx_ring[i]->reg_idx = i;
  228. adapter->rx_ring[i]->netdev = adapter->netdev;
  229. }
  230. for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
  231. adapter->tx_ring[i]->reg_idx = reg_idx;
  232. for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
  233. adapter->xdp_ring[i]->reg_idx = reg_idx;
  234. return true;
  235. }
  236. /**
  237. * ixgbe_cache_ring_register - Descriptor ring to register mapping
  238. * @adapter: board private structure to initialize
  239. *
  240. * Once we know the feature-set enabled for the device, we'll cache
  241. * the register offset the descriptor ring is assigned to.
  242. *
  243. * Note, the order the various feature calls is important. It must start with
  244. * the "most" features enabled at the same time, then trickle down to the
  245. * least amount of features turned on at once.
  246. **/
  247. static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
  248. {
  249. /* start with default case */
  250. adapter->rx_ring[0]->reg_idx = 0;
  251. adapter->tx_ring[0]->reg_idx = 0;
  252. #ifdef CONFIG_IXGBE_DCB
  253. if (ixgbe_cache_ring_dcb_sriov(adapter))
  254. return;
  255. if (ixgbe_cache_ring_dcb(adapter))
  256. return;
  257. #endif
  258. if (ixgbe_cache_ring_sriov(adapter))
  259. return;
  260. ixgbe_cache_ring_rss(adapter);
  261. }
  262. static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
  263. {
  264. return adapter->xdp_prog ? nr_cpu_ids : 0;
  265. }
  266. #define IXGBE_RSS_64Q_MASK 0x3F
  267. #define IXGBE_RSS_16Q_MASK 0xF
  268. #define IXGBE_RSS_8Q_MASK 0x7
  269. #define IXGBE_RSS_4Q_MASK 0x3
  270. #define IXGBE_RSS_2Q_MASK 0x1
  271. #define IXGBE_RSS_DISABLED_MASK 0x0
  272. #ifdef CONFIG_IXGBE_DCB
  273. /**
  274. * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
  275. * @adapter: board private structure to initialize
  276. *
  277. * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
  278. * and VM pools where appropriate. Also assign queues based on DCB
  279. * priorities and map accordingly..
  280. *
  281. **/
  282. static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
  283. {
  284. int i;
  285. u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
  286. u16 vmdq_m = 0;
  287. #ifdef IXGBE_FCOE
  288. u16 fcoe_i = 0;
  289. #endif
  290. u8 tcs = adapter->hw_tcs;
  291. /* verify we have DCB queueing enabled before proceeding */
  292. if (tcs <= 1)
  293. return false;
  294. /* verify we have VMDq enabled before proceeding */
  295. if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  296. return false;
  297. /* limit VMDq instances on the PF by number of Tx queues */
  298. vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
  299. /* Add starting offset to total pool count */
  300. vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
  301. /* 16 pools w/ 8 TC per pool */
  302. if (tcs > 4) {
  303. vmdq_i = min_t(u16, vmdq_i, 16);
  304. vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
  305. /* 32 pools w/ 4 TC per pool */
  306. } else {
  307. vmdq_i = min_t(u16, vmdq_i, 32);
  308. vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
  309. }
  310. #ifdef IXGBE_FCOE
  311. /* queues in the remaining pools are available for FCoE */
  312. fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
  313. #endif
  314. /* remove the starting offset from the pool count */
  315. vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
  316. /* save features for later use */
  317. adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
  318. adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
  319. /*
  320. * We do not support DCB, VMDq, and RSS all simultaneously
  321. * so we will disable RSS since it is the lowest priority
  322. */
  323. adapter->ring_feature[RING_F_RSS].indices = 1;
  324. adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
  325. /* disable ATR as it is not supported when VMDq is enabled */
  326. adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
  327. adapter->num_rx_pools = vmdq_i;
  328. adapter->num_rx_queues_per_pool = tcs;
  329. adapter->num_tx_queues = vmdq_i * tcs;
  330. adapter->num_xdp_queues = 0;
  331. adapter->num_rx_queues = vmdq_i * tcs;
  332. #ifdef IXGBE_FCOE
  333. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
  334. struct ixgbe_ring_feature *fcoe;
  335. fcoe = &adapter->ring_feature[RING_F_FCOE];
  336. /* limit ourselves based on feature limits */
  337. fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
  338. if (fcoe_i) {
  339. /* alloc queues for FCoE separately */
  340. fcoe->indices = fcoe_i;
  341. fcoe->offset = vmdq_i * tcs;
  342. /* add queues to adapter */
  343. adapter->num_tx_queues += fcoe_i;
  344. adapter->num_rx_queues += fcoe_i;
  345. } else if (tcs > 1) {
  346. /* use queue belonging to FcoE TC */
  347. fcoe->indices = 1;
  348. fcoe->offset = ixgbe_fcoe_get_tc(adapter);
  349. } else {
  350. adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
  351. fcoe->indices = 0;
  352. fcoe->offset = 0;
  353. }
  354. }
  355. #endif /* IXGBE_FCOE */
  356. /* configure TC to queue mapping */
  357. for (i = 0; i < tcs; i++)
  358. netdev_set_tc_queue(adapter->netdev, i, 1, i);
  359. return true;
  360. }
  361. static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
  362. {
  363. struct net_device *dev = adapter->netdev;
  364. struct ixgbe_ring_feature *f;
  365. int rss_i, rss_m, i;
  366. int tcs;
  367. /* Map queue offset and counts onto allocated tx queues */
  368. tcs = adapter->hw_tcs;
  369. /* verify we have DCB queueing enabled before proceeding */
  370. if (tcs <= 1)
  371. return false;
  372. /* determine the upper limit for our current DCB mode */
  373. rss_i = dev->num_tx_queues / tcs;
  374. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  375. /* 8 TC w/ 4 queues per TC */
  376. rss_i = min_t(u16, rss_i, 4);
  377. rss_m = IXGBE_RSS_4Q_MASK;
  378. } else if (tcs > 4) {
  379. /* 8 TC w/ 8 queues per TC */
  380. rss_i = min_t(u16, rss_i, 8);
  381. rss_m = IXGBE_RSS_8Q_MASK;
  382. } else {
  383. /* 4 TC w/ 16 queues per TC */
  384. rss_i = min_t(u16, rss_i, 16);
  385. rss_m = IXGBE_RSS_16Q_MASK;
  386. }
  387. /* set RSS mask and indices */
  388. f = &adapter->ring_feature[RING_F_RSS];
  389. rss_i = min_t(int, rss_i, f->limit);
  390. f->indices = rss_i;
  391. f->mask = rss_m;
  392. /* disable ATR as it is not supported when multiple TCs are enabled */
  393. adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
  394. #ifdef IXGBE_FCOE
  395. /* FCoE enabled queues require special configuration indexed
  396. * by feature specific indices and offset. Here we map FCoE
  397. * indices onto the DCB queue pairs allowing FCoE to own
  398. * configuration later.
  399. */
  400. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
  401. u8 tc = ixgbe_fcoe_get_tc(adapter);
  402. f = &adapter->ring_feature[RING_F_FCOE];
  403. f->indices = min_t(u16, rss_i, f->limit);
  404. f->offset = rss_i * tc;
  405. }
  406. #endif /* IXGBE_FCOE */
  407. for (i = 0; i < tcs; i++)
  408. netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
  409. adapter->num_tx_queues = rss_i * tcs;
  410. adapter->num_xdp_queues = 0;
  411. adapter->num_rx_queues = rss_i * tcs;
  412. return true;
  413. }
  414. #endif
  415. /**
  416. * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
  417. * @adapter: board private structure to initialize
  418. *
  419. * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
  420. * and VM pools where appropriate. If RSS is available, then also try and
  421. * enable RSS and map accordingly.
  422. *
  423. **/
  424. static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
  425. {
  426. u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
  427. u16 vmdq_m = 0;
  428. u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
  429. u16 rss_m = IXGBE_RSS_DISABLED_MASK;
  430. #ifdef IXGBE_FCOE
  431. u16 fcoe_i = 0;
  432. #endif
  433. /* only proceed if SR-IOV is enabled */
  434. if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  435. return false;
  436. /* limit l2fwd RSS based on total Tx queue limit */
  437. rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
  438. /* Add starting offset to total pool count */
  439. vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
  440. /* double check we are limited to maximum pools */
  441. vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
  442. /* 64 pool mode with 2 queues per pool */
  443. if (vmdq_i > 32) {
  444. vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
  445. rss_m = IXGBE_RSS_2Q_MASK;
  446. rss_i = min_t(u16, rss_i, 2);
  447. /* 32 pool mode with up to 4 queues per pool */
  448. } else {
  449. vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
  450. rss_m = IXGBE_RSS_4Q_MASK;
  451. /* We can support 4, 2, or 1 queues */
  452. rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
  453. }
  454. #ifdef IXGBE_FCOE
  455. /* queues in the remaining pools are available for FCoE */
  456. fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
  457. #endif
  458. /* remove the starting offset from the pool count */
  459. vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
  460. /* save features for later use */
  461. adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
  462. adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
  463. /* limit RSS based on user input and save for later use */
  464. adapter->ring_feature[RING_F_RSS].indices = rss_i;
  465. adapter->ring_feature[RING_F_RSS].mask = rss_m;
  466. adapter->num_rx_pools = vmdq_i;
  467. adapter->num_rx_queues_per_pool = rss_i;
  468. adapter->num_rx_queues = vmdq_i * rss_i;
  469. adapter->num_tx_queues = vmdq_i * rss_i;
  470. adapter->num_xdp_queues = 0;
  471. /* disable ATR as it is not supported when VMDq is enabled */
  472. adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
  473. #ifdef IXGBE_FCOE
  474. /*
  475. * FCoE can use rings from adjacent buffers to allow RSS
  476. * like behavior. To account for this we need to add the
  477. * FCoE indices to the total ring count.
  478. */
  479. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
  480. struct ixgbe_ring_feature *fcoe;
  481. fcoe = &adapter->ring_feature[RING_F_FCOE];
  482. /* limit ourselves based on feature limits */
  483. fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
  484. if (vmdq_i > 1 && fcoe_i) {
  485. /* alloc queues for FCoE separately */
  486. fcoe->indices = fcoe_i;
  487. fcoe->offset = vmdq_i * rss_i;
  488. } else {
  489. /* merge FCoE queues with RSS queues */
  490. fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
  491. /* limit indices to rss_i if MSI-X is disabled */
  492. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
  493. fcoe_i = rss_i;
  494. /* attempt to reserve some queues for just FCoE */
  495. fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
  496. fcoe->offset = fcoe_i - fcoe->indices;
  497. fcoe_i -= rss_i;
  498. }
  499. /* add queues to adapter */
  500. adapter->num_tx_queues += fcoe_i;
  501. adapter->num_rx_queues += fcoe_i;
  502. }
  503. #endif
  504. /* populate TC0 for use by pool 0 */
  505. netdev_set_tc_queue(adapter->netdev, 0,
  506. adapter->num_rx_queues_per_pool, 0);
  507. return true;
  508. }
  509. /**
  510. * ixgbe_set_rss_queues - Allocate queues for RSS
  511. * @adapter: board private structure to initialize
  512. *
  513. * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
  514. * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
  515. *
  516. **/
  517. static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
  518. {
  519. struct ixgbe_hw *hw = &adapter->hw;
  520. struct ixgbe_ring_feature *f;
  521. u16 rss_i;
  522. /* set mask for 16 queue limit of RSS */
  523. f = &adapter->ring_feature[RING_F_RSS];
  524. rss_i = f->limit;
  525. f->indices = rss_i;
  526. if (hw->mac.type < ixgbe_mac_X550)
  527. f->mask = IXGBE_RSS_16Q_MASK;
  528. else
  529. f->mask = IXGBE_RSS_64Q_MASK;
  530. /* disable ATR by default, it will be configured below */
  531. adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
  532. /*
  533. * Use Flow Director in addition to RSS to ensure the best
  534. * distribution of flows across cores, even when an FDIR flow
  535. * isn't matched.
  536. */
  537. if (rss_i > 1 && adapter->atr_sample_rate) {
  538. f = &adapter->ring_feature[RING_F_FDIR];
  539. rss_i = f->indices = f->limit;
  540. if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
  541. adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
  542. }
  543. #ifdef IXGBE_FCOE
  544. /*
  545. * FCoE can exist on the same rings as standard network traffic
  546. * however it is preferred to avoid that if possible. In order
  547. * to get the best performance we allocate as many FCoE queues
  548. * as we can and we place them at the end of the ring array to
  549. * avoid sharing queues with standard RSS on systems with 24 or
  550. * more CPUs.
  551. */
  552. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
  553. struct net_device *dev = adapter->netdev;
  554. u16 fcoe_i;
  555. f = &adapter->ring_feature[RING_F_FCOE];
  556. /* merge FCoE queues with RSS queues */
  557. fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
  558. fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
  559. /* limit indices to rss_i if MSI-X is disabled */
  560. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
  561. fcoe_i = rss_i;
  562. /* attempt to reserve some queues for just FCoE */
  563. f->indices = min_t(u16, fcoe_i, f->limit);
  564. f->offset = fcoe_i - f->indices;
  565. rss_i = max_t(u16, fcoe_i, rss_i);
  566. }
  567. #endif /* IXGBE_FCOE */
  568. adapter->num_rx_queues = rss_i;
  569. adapter->num_tx_queues = rss_i;
  570. adapter->num_xdp_queues = ixgbe_xdp_queues(adapter);
  571. return true;
  572. }
  573. /**
  574. * ixgbe_set_num_queues - Allocate queues for device, feature dependent
  575. * @adapter: board private structure to initialize
  576. *
  577. * This is the top level queue allocation routine. The order here is very
  578. * important, starting with the "most" number of features turned on at once,
  579. * and ending with the smallest set of features. This way large combinations
  580. * can be allocated if they're turned on, and smaller combinations are the
  581. * fallthrough conditions.
  582. *
  583. **/
  584. static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
  585. {
  586. /* Start with base case */
  587. adapter->num_rx_queues = 1;
  588. adapter->num_tx_queues = 1;
  589. adapter->num_xdp_queues = 0;
  590. adapter->num_rx_pools = 1;
  591. adapter->num_rx_queues_per_pool = 1;
  592. #ifdef CONFIG_IXGBE_DCB
  593. if (ixgbe_set_dcb_sriov_queues(adapter))
  594. return;
  595. if (ixgbe_set_dcb_queues(adapter))
  596. return;
  597. #endif
  598. if (ixgbe_set_sriov_queues(adapter))
  599. return;
  600. ixgbe_set_rss_queues(adapter);
  601. }
  602. /**
  603. * ixgbe_acquire_msix_vectors - acquire MSI-X vectors
  604. * @adapter: board private structure
  605. *
  606. * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
  607. * return a negative error code if unable to acquire MSI-X vectors for any
  608. * reason.
  609. */
  610. static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
  611. {
  612. struct ixgbe_hw *hw = &adapter->hw;
  613. int i, vectors, vector_threshold;
  614. /* We start by asking for one vector per queue pair with XDP queues
  615. * being stacked with TX queues.
  616. */
  617. vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
  618. vectors = max(vectors, adapter->num_xdp_queues);
  619. /* It is easy to be greedy for MSI-X vectors. However, it really
  620. * doesn't do much good if we have a lot more vectors than CPUs. We'll
  621. * be somewhat conservative and only ask for (roughly) the same number
  622. * of vectors as there are CPUs.
  623. */
  624. vectors = min_t(int, vectors, num_online_cpus());
  625. /* Some vectors are necessary for non-queue interrupts */
  626. vectors += NON_Q_VECTORS;
  627. /* Hardware can only support a maximum of hw.mac->max_msix_vectors.
  628. * With features such as RSS and VMDq, we can easily surpass the
  629. * number of Rx and Tx descriptor queues supported by our device.
  630. * Thus, we cap the maximum in the rare cases where the CPU count also
  631. * exceeds our vector limit
  632. */
  633. vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
  634. /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0]
  635. * handler, and (2) an Other (Link Status Change, etc.) handler.
  636. */
  637. vector_threshold = MIN_MSIX_COUNT;
  638. adapter->msix_entries = kcalloc(vectors,
  639. sizeof(struct msix_entry),
  640. GFP_KERNEL);
  641. if (!adapter->msix_entries)
  642. return -ENOMEM;
  643. for (i = 0; i < vectors; i++)
  644. adapter->msix_entries[i].entry = i;
  645. vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
  646. vector_threshold, vectors);
  647. if (vectors < 0) {
  648. /* A negative count of allocated vectors indicates an error in
  649. * acquiring within the specified range of MSI-X vectors
  650. */
  651. e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
  652. vectors);
  653. adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
  654. kfree(adapter->msix_entries);
  655. adapter->msix_entries = NULL;
  656. return vectors;
  657. }
  658. /* we successfully allocated some number of vectors within our
  659. * requested range.
  660. */
  661. adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
  662. /* Adjust for only the vectors we'll use, which is minimum
  663. * of max_q_vectors, or the number of vectors we were allocated.
  664. */
  665. vectors -= NON_Q_VECTORS;
  666. adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
  667. return 0;
  668. }
  669. static void ixgbe_add_ring(struct ixgbe_ring *ring,
  670. struct ixgbe_ring_container *head)
  671. {
  672. ring->next = head->ring;
  673. head->ring = ring;
  674. head->count++;
  675. head->next_update = jiffies + 1;
  676. }
  677. /**
  678. * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
  679. * @adapter: board private structure to initialize
  680. * @v_count: q_vectors allocated on adapter, used for ring interleaving
  681. * @v_idx: index of vector in adapter struct
  682. * @txr_count: total number of Tx rings to allocate
  683. * @txr_idx: index of first Tx ring to allocate
  684. * @xdp_count: total number of XDP rings to allocate
  685. * @xdp_idx: index of first XDP ring to allocate
  686. * @rxr_count: total number of Rx rings to allocate
  687. * @rxr_idx: index of first Rx ring to allocate
  688. *
  689. * We allocate one q_vector. If allocation fails we return -ENOMEM.
  690. **/
  691. static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
  692. int v_count, int v_idx,
  693. int txr_count, int txr_idx,
  694. int xdp_count, int xdp_idx,
  695. int rxr_count, int rxr_idx)
  696. {
  697. struct ixgbe_q_vector *q_vector;
  698. struct ixgbe_ring *ring;
  699. int node = NUMA_NO_NODE;
  700. int cpu = -1;
  701. int ring_count, size;
  702. u8 tcs = adapter->hw_tcs;
  703. ring_count = txr_count + rxr_count + xdp_count;
  704. size = sizeof(struct ixgbe_q_vector) +
  705. (sizeof(struct ixgbe_ring) * ring_count);
  706. /* customize cpu for Flow Director mapping */
  707. if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
  708. u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
  709. if (rss_i > 1 && adapter->atr_sample_rate) {
  710. if (cpu_online(v_idx)) {
  711. cpu = v_idx;
  712. node = cpu_to_node(cpu);
  713. }
  714. }
  715. }
  716. /* allocate q_vector and rings */
  717. q_vector = kzalloc_node(size, GFP_KERNEL, node);
  718. if (!q_vector)
  719. q_vector = kzalloc(size, GFP_KERNEL);
  720. if (!q_vector)
  721. return -ENOMEM;
  722. /* setup affinity mask and node */
  723. if (cpu != -1)
  724. cpumask_set_cpu(cpu, &q_vector->affinity_mask);
  725. q_vector->numa_node = node;
  726. #ifdef CONFIG_IXGBE_DCA
  727. /* initialize CPU for DCA */
  728. q_vector->cpu = -1;
  729. #endif
  730. /* initialize NAPI */
  731. netif_napi_add(adapter->netdev, &q_vector->napi,
  732. ixgbe_poll, 64);
  733. /* tie q_vector and adapter together */
  734. adapter->q_vector[v_idx] = q_vector;
  735. q_vector->adapter = adapter;
  736. q_vector->v_idx = v_idx;
  737. /* initialize work limits */
  738. q_vector->tx.work_limit = adapter->tx_work_limit;
  739. /* Initialize setting for adaptive ITR */
  740. q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
  741. IXGBE_ITR_ADAPTIVE_LATENCY;
  742. q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
  743. IXGBE_ITR_ADAPTIVE_LATENCY;
  744. /* intialize ITR */
  745. if (txr_count && !rxr_count) {
  746. /* tx only vector */
  747. if (adapter->tx_itr_setting == 1)
  748. q_vector->itr = IXGBE_12K_ITR;
  749. else
  750. q_vector->itr = adapter->tx_itr_setting;
  751. } else {
  752. /* rx or rx/tx vector */
  753. if (adapter->rx_itr_setting == 1)
  754. q_vector->itr = IXGBE_20K_ITR;
  755. else
  756. q_vector->itr = adapter->rx_itr_setting;
  757. }
  758. /* initialize pointer to rings */
  759. ring = q_vector->ring;
  760. while (txr_count) {
  761. /* assign generic ring traits */
  762. ring->dev = &adapter->pdev->dev;
  763. ring->netdev = adapter->netdev;
  764. /* configure backlink on ring */
  765. ring->q_vector = q_vector;
  766. /* update q_vector Tx values */
  767. ixgbe_add_ring(ring, &q_vector->tx);
  768. /* apply Tx specific ring traits */
  769. ring->count = adapter->tx_ring_count;
  770. ring->queue_index = txr_idx;
  771. /* assign ring to adapter */
  772. adapter->tx_ring[txr_idx] = ring;
  773. /* update count and index */
  774. txr_count--;
  775. txr_idx += v_count;
  776. /* push pointer to next ring */
  777. ring++;
  778. }
  779. while (xdp_count) {
  780. /* assign generic ring traits */
  781. ring->dev = &adapter->pdev->dev;
  782. ring->netdev = adapter->netdev;
  783. /* configure backlink on ring */
  784. ring->q_vector = q_vector;
  785. /* update q_vector Tx values */
  786. ixgbe_add_ring(ring, &q_vector->tx);
  787. /* apply Tx specific ring traits */
  788. ring->count = adapter->tx_ring_count;
  789. ring->queue_index = xdp_idx;
  790. set_ring_xdp(ring);
  791. /* assign ring to adapter */
  792. adapter->xdp_ring[xdp_idx] = ring;
  793. /* update count and index */
  794. xdp_count--;
  795. xdp_idx++;
  796. /* push pointer to next ring */
  797. ring++;
  798. }
  799. while (rxr_count) {
  800. /* assign generic ring traits */
  801. ring->dev = &adapter->pdev->dev;
  802. ring->netdev = adapter->netdev;
  803. /* configure backlink on ring */
  804. ring->q_vector = q_vector;
  805. /* update q_vector Rx values */
  806. ixgbe_add_ring(ring, &q_vector->rx);
  807. /*
  808. * 82599 errata, UDP frames with a 0 checksum
  809. * can be marked as checksum errors.
  810. */
  811. if (adapter->hw.mac.type == ixgbe_mac_82599EB)
  812. set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
  813. #ifdef IXGBE_FCOE
  814. if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
  815. struct ixgbe_ring_feature *f;
  816. f = &adapter->ring_feature[RING_F_FCOE];
  817. if ((rxr_idx >= f->offset) &&
  818. (rxr_idx < f->offset + f->indices))
  819. set_bit(__IXGBE_RX_FCOE, &ring->state);
  820. }
  821. #endif /* IXGBE_FCOE */
  822. /* apply Rx specific ring traits */
  823. ring->count = adapter->rx_ring_count;
  824. ring->queue_index = rxr_idx;
  825. /* assign ring to adapter */
  826. adapter->rx_ring[rxr_idx] = ring;
  827. /* update count and index */
  828. rxr_count--;
  829. rxr_idx += v_count;
  830. /* push pointer to next ring */
  831. ring++;
  832. }
  833. return 0;
  834. }
  835. /**
  836. * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
  837. * @adapter: board private structure to initialize
  838. * @v_idx: Index of vector to be freed
  839. *
  840. * This function frees the memory allocated to the q_vector. In addition if
  841. * NAPI is enabled it will delete any references to the NAPI struct prior
  842. * to freeing the q_vector.
  843. **/
  844. static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
  845. {
  846. struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
  847. struct ixgbe_ring *ring;
  848. ixgbe_for_each_ring(ring, q_vector->tx) {
  849. if (ring_is_xdp(ring))
  850. adapter->xdp_ring[ring->queue_index] = NULL;
  851. else
  852. adapter->tx_ring[ring->queue_index] = NULL;
  853. }
  854. ixgbe_for_each_ring(ring, q_vector->rx)
  855. adapter->rx_ring[ring->queue_index] = NULL;
  856. adapter->q_vector[v_idx] = NULL;
  857. napi_hash_del(&q_vector->napi);
  858. netif_napi_del(&q_vector->napi);
  859. /*
  860. * ixgbe_get_stats64() might access the rings on this vector,
  861. * we must wait a grace period before freeing it.
  862. */
  863. kfree_rcu(q_vector, rcu);
  864. }
  865. /**
  866. * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
  867. * @adapter: board private structure to initialize
  868. *
  869. * We allocate one q_vector per queue interrupt. If allocation fails we
  870. * return -ENOMEM.
  871. **/
  872. static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
  873. {
  874. int q_vectors = adapter->num_q_vectors;
  875. int rxr_remaining = adapter->num_rx_queues;
  876. int txr_remaining = adapter->num_tx_queues;
  877. int xdp_remaining = adapter->num_xdp_queues;
  878. int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
  879. int err;
  880. /* only one q_vector if MSI-X is disabled. */
  881. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
  882. q_vectors = 1;
  883. if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
  884. for (; rxr_remaining; v_idx++) {
  885. err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
  886. 0, 0, 0, 0, 1, rxr_idx);
  887. if (err)
  888. goto err_out;
  889. /* update counts and index */
  890. rxr_remaining--;
  891. rxr_idx++;
  892. }
  893. }
  894. for (; v_idx < q_vectors; v_idx++) {
  895. int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
  896. int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
  897. int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx);
  898. err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
  899. tqpv, txr_idx,
  900. xqpv, xdp_idx,
  901. rqpv, rxr_idx);
  902. if (err)
  903. goto err_out;
  904. /* update counts and index */
  905. rxr_remaining -= rqpv;
  906. txr_remaining -= tqpv;
  907. xdp_remaining -= xqpv;
  908. rxr_idx++;
  909. txr_idx++;
  910. xdp_idx += xqpv;
  911. }
  912. return 0;
  913. err_out:
  914. adapter->num_tx_queues = 0;
  915. adapter->num_xdp_queues = 0;
  916. adapter->num_rx_queues = 0;
  917. adapter->num_q_vectors = 0;
  918. while (v_idx--)
  919. ixgbe_free_q_vector(adapter, v_idx);
  920. return -ENOMEM;
  921. }
  922. /**
  923. * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
  924. * @adapter: board private structure to initialize
  925. *
  926. * This function frees the memory allocated to the q_vectors. In addition if
  927. * NAPI is enabled it will delete any references to the NAPI struct prior
  928. * to freeing the q_vector.
  929. **/
  930. static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
  931. {
  932. int v_idx = adapter->num_q_vectors;
  933. adapter->num_tx_queues = 0;
  934. adapter->num_xdp_queues = 0;
  935. adapter->num_rx_queues = 0;
  936. adapter->num_q_vectors = 0;
  937. while (v_idx--)
  938. ixgbe_free_q_vector(adapter, v_idx);
  939. }
  940. static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
  941. {
  942. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  943. adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
  944. pci_disable_msix(adapter->pdev);
  945. kfree(adapter->msix_entries);
  946. adapter->msix_entries = NULL;
  947. } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
  948. adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
  949. pci_disable_msi(adapter->pdev);
  950. }
  951. }
  952. /**
  953. * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
  954. * @adapter: board private structure to initialize
  955. *
  956. * Attempt to configure the interrupts using the best available
  957. * capabilities of the hardware and the kernel.
  958. **/
  959. static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
  960. {
  961. int err;
  962. /* We will try to get MSI-X interrupts first */
  963. if (!ixgbe_acquire_msix_vectors(adapter))
  964. return;
  965. /* At this point, we do not have MSI-X capabilities. We need to
  966. * reconfigure or disable various features which require MSI-X
  967. * capability.
  968. */
  969. /* Disable DCB unless we only have a single traffic class */
  970. if (adapter->hw_tcs > 1) {
  971. e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
  972. netdev_reset_tc(adapter->netdev);
  973. if (adapter->hw.mac.type == ixgbe_mac_82598EB)
  974. adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
  975. adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
  976. adapter->temp_dcb_cfg.pfc_mode_enable = false;
  977. adapter->dcb_cfg.pfc_mode_enable = false;
  978. }
  979. adapter->hw_tcs = 0;
  980. adapter->dcb_cfg.num_tcs.pg_tcs = 1;
  981. adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
  982. /* Disable SR-IOV support */
  983. e_dev_warn("Disabling SR-IOV support\n");
  984. ixgbe_disable_sriov(adapter);
  985. /* Disable RSS */
  986. e_dev_warn("Disabling RSS support\n");
  987. adapter->ring_feature[RING_F_RSS].limit = 1;
  988. /* recalculate number of queues now that many features have been
  989. * changed or disabled.
  990. */
  991. ixgbe_set_num_queues(adapter);
  992. adapter->num_q_vectors = 1;
  993. err = pci_enable_msi(adapter->pdev);
  994. if (err)
  995. e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
  996. err);
  997. else
  998. adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
  999. }
  1000. /**
  1001. * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
  1002. * @adapter: board private structure to initialize
  1003. *
  1004. * We determine which interrupt scheme to use based on...
  1005. * - Kernel support (MSI, MSI-X)
  1006. * - which can be user-defined (via MODULE_PARAM)
  1007. * - Hardware queue count (num_*_queues)
  1008. * - defined by miscellaneous hardware support/features (RSS, etc.)
  1009. **/
  1010. int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
  1011. {
  1012. int err;
  1013. /* Number of supported queues */
  1014. ixgbe_set_num_queues(adapter);
  1015. /* Set interrupt mode */
  1016. ixgbe_set_interrupt_capability(adapter);
  1017. err = ixgbe_alloc_q_vectors(adapter);
  1018. if (err) {
  1019. e_dev_err("Unable to allocate memory for queue vectors\n");
  1020. goto err_alloc_q_vectors;
  1021. }
  1022. ixgbe_cache_ring_register(adapter);
  1023. e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n",
  1024. (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
  1025. adapter->num_rx_queues, adapter->num_tx_queues,
  1026. adapter->num_xdp_queues);
  1027. set_bit(__IXGBE_DOWN, &adapter->state);
  1028. return 0;
  1029. err_alloc_q_vectors:
  1030. ixgbe_reset_interrupt_capability(adapter);
  1031. return err;
  1032. }
  1033. /**
  1034. * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
  1035. * @adapter: board private structure to clear interrupt scheme on
  1036. *
  1037. * We go through and clear interrupt specific resources and reset the structure
  1038. * to pre-load conditions
  1039. **/
  1040. void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
  1041. {
  1042. adapter->num_tx_queues = 0;
  1043. adapter->num_xdp_queues = 0;
  1044. adapter->num_rx_queues = 0;
  1045. ixgbe_free_q_vectors(adapter);
  1046. ixgbe_reset_interrupt_capability(adapter);
  1047. }
  1048. void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
  1049. u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx)
  1050. {
  1051. struct ixgbe_adv_tx_context_desc *context_desc;
  1052. u16 i = tx_ring->next_to_use;
  1053. context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
  1054. i++;
  1055. tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
  1056. /* set bits to identify this as an advanced context descriptor */
  1057. type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
  1058. context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
  1059. context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
  1060. context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
  1061. context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
  1062. }