igc_main.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2018 Intel Corporation */
  3. #include <linux/module.h>
  4. #include <linux/types.h>
  5. #include <linux/if_vlan.h>
  6. #include <linux/aer.h>
  7. #include "igc.h"
  8. #include "igc_hw.h"
  9. #define DRV_VERSION "0.0.1-k"
  10. #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
  11. static int debug = -1;
  12. MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  13. MODULE_DESCRIPTION(DRV_SUMMARY);
  14. MODULE_LICENSE("GPL v2");
  15. MODULE_VERSION(DRV_VERSION);
  16. module_param(debug, int, 0);
  17. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  18. char igc_driver_name[] = "igc";
  19. char igc_driver_version[] = DRV_VERSION;
  20. static const char igc_driver_string[] = DRV_SUMMARY;
  21. static const char igc_copyright[] =
  22. "Copyright(c) 2018 Intel Corporation.";
  23. static const struct pci_device_id igc_pci_tbl[] = {
  24. { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM) },
  25. { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V) },
  26. /* required last entry */
  27. {0, }
  28. };
  29. MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
  30. /* forward declaration */
  31. static int igc_sw_init(struct igc_adapter *);
  32. static void igc_configure(struct igc_adapter *adapter);
  33. static void igc_power_down_link(struct igc_adapter *adapter);
  34. static void igc_set_default_mac_filter(struct igc_adapter *adapter);
  35. static void igc_write_itr(struct igc_q_vector *q_vector);
  36. static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector);
  37. static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx);
  38. static void igc_set_interrupt_capability(struct igc_adapter *adapter,
  39. bool msix);
  40. static void igc_free_q_vectors(struct igc_adapter *adapter);
  41. static void igc_irq_disable(struct igc_adapter *adapter);
  42. static void igc_irq_enable(struct igc_adapter *adapter);
  43. static void igc_configure_msix(struct igc_adapter *adapter);
  44. enum latency_range {
  45. lowest_latency = 0,
  46. low_latency = 1,
  47. bulk_latency = 2,
  48. latency_invalid = 255
  49. };
  50. static void igc_reset(struct igc_adapter *adapter)
  51. {
  52. if (!netif_running(adapter->netdev))
  53. igc_power_down_link(adapter);
  54. }
  55. /**
  56. * igc_power_up_link - Power up the phy/serdes link
  57. * @adapter: address of board private structure
  58. */
  59. static void igc_power_up_link(struct igc_adapter *adapter)
  60. {
  61. }
  62. /**
  63. * igc_power_down_link - Power down the phy/serdes link
  64. * @adapter: address of board private structure
  65. */
  66. static void igc_power_down_link(struct igc_adapter *adapter)
  67. {
  68. }
  69. /**
  70. * igc_release_hw_control - release control of the h/w to f/w
  71. * @adapter: address of board private structure
  72. *
  73. * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
  74. * For ASF and Pass Through versions of f/w this means that the
  75. * driver is no longer loaded.
  76. */
  77. static void igc_release_hw_control(struct igc_adapter *adapter)
  78. {
  79. struct igc_hw *hw = &adapter->hw;
  80. u32 ctrl_ext;
  81. /* Let firmware take over control of h/w */
  82. ctrl_ext = rd32(IGC_CTRL_EXT);
  83. wr32(IGC_CTRL_EXT,
  84. ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
  85. }
  86. /**
  87. * igc_get_hw_control - get control of the h/w from f/w
  88. * @adapter: address of board private structure
  89. *
  90. * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
  91. * For ASF and Pass Through versions of f/w this means that
  92. * the driver is loaded.
  93. */
  94. static void igc_get_hw_control(struct igc_adapter *adapter)
  95. {
  96. struct igc_hw *hw = &adapter->hw;
  97. u32 ctrl_ext;
  98. /* Let firmware know the driver has taken over */
  99. ctrl_ext = rd32(IGC_CTRL_EXT);
  100. wr32(IGC_CTRL_EXT,
  101. ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
  102. }
  103. /**
  104. * igc_set_mac - Change the Ethernet Address of the NIC
  105. * @netdev: network interface device structure
  106. * @p: pointer to an address structure
  107. *
  108. * Returns 0 on success, negative on failure
  109. */
  110. static int igc_set_mac(struct net_device *netdev, void *p)
  111. {
  112. struct igc_adapter *adapter = netdev_priv(netdev);
  113. struct igc_hw *hw = &adapter->hw;
  114. struct sockaddr *addr = p;
  115. if (!is_valid_ether_addr(addr->sa_data))
  116. return -EADDRNOTAVAIL;
  117. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  118. memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
  119. /* set the correct pool for the new PF MAC address in entry 0 */
  120. igc_set_default_mac_filter(adapter);
  121. return 0;
  122. }
  123. static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
  124. struct net_device *netdev)
  125. {
  126. dev_kfree_skb_any(skb);
  127. return NETDEV_TX_OK;
  128. }
  129. /**
  130. * igc_ioctl - I/O control method
  131. * @netdev: network interface device structure
  132. * @ifreq: frequency
  133. * @cmd: command
  134. */
  135. static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  136. {
  137. switch (cmd) {
  138. default:
  139. return -EOPNOTSUPP;
  140. }
  141. }
  142. /**
  143. * igc_up - Open the interface and prepare it to handle traffic
  144. * @adapter: board private structure
  145. */
  146. static void igc_up(struct igc_adapter *adapter)
  147. {
  148. struct igc_hw *hw = &adapter->hw;
  149. int i = 0;
  150. /* hardware has been reset, we need to reload some things */
  151. igc_configure(adapter);
  152. clear_bit(__IGC_DOWN, &adapter->state);
  153. for (i = 0; i < adapter->num_q_vectors; i++)
  154. napi_enable(&adapter->q_vector[i]->napi);
  155. if (adapter->msix_entries)
  156. igc_configure_msix(adapter);
  157. else
  158. igc_assign_vector(adapter->q_vector[0], 0);
  159. /* Clear any pending interrupts. */
  160. rd32(IGC_ICR);
  161. igc_irq_enable(adapter);
  162. }
  163. /**
  164. * igc_update_stats - Update the board statistics counters
  165. * @adapter: board private structure
  166. */
  167. static void igc_update_stats(struct igc_adapter *adapter)
  168. {
  169. }
  170. /**
  171. * igc_down - Close the interface
  172. * @adapter: board private structure
  173. */
  174. static void igc_down(struct igc_adapter *adapter)
  175. {
  176. struct net_device *netdev = adapter->netdev;
  177. int i = 0;
  178. set_bit(__IGC_DOWN, &adapter->state);
  179. /* set trans_start so we don't get spurious watchdogs during reset */
  180. netif_trans_update(netdev);
  181. netif_carrier_off(netdev);
  182. netif_tx_stop_all_queues(netdev);
  183. for (i = 0; i < adapter->num_q_vectors; i++)
  184. napi_disable(&adapter->q_vector[i]->napi);
  185. adapter->link_speed = 0;
  186. adapter->link_duplex = 0;
  187. }
  188. /**
  189. * igc_change_mtu - Change the Maximum Transfer Unit
  190. * @netdev: network interface device structure
  191. * @new_mtu: new value for maximum frame size
  192. *
  193. * Returns 0 on success, negative on failure
  194. */
  195. static int igc_change_mtu(struct net_device *netdev, int new_mtu)
  196. {
  197. int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
  198. struct igc_adapter *adapter = netdev_priv(netdev);
  199. struct pci_dev *pdev = adapter->pdev;
  200. /* adjust max frame to be at least the size of a standard frame */
  201. if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
  202. max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
  203. while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
  204. usleep_range(1000, 2000);
  205. /* igc_down has a dependency on max_frame_size */
  206. adapter->max_frame_size = max_frame;
  207. if (netif_running(netdev))
  208. igc_down(adapter);
  209. dev_info(&pdev->dev, "changing MTU from %d to %d\n",
  210. netdev->mtu, new_mtu);
  211. netdev->mtu = new_mtu;
  212. if (netif_running(netdev))
  213. igc_up(adapter);
  214. else
  215. igc_reset(adapter);
  216. clear_bit(__IGC_RESETTING, &adapter->state);
  217. return 0;
  218. }
  219. /**
  220. * igc_get_stats - Get System Network Statistics
  221. * @netdev: network interface device structure
  222. *
  223. * Returns the address of the device statistics structure.
  224. * The statistics are updated here and also from the timer callback.
  225. */
  226. static struct net_device_stats *igc_get_stats(struct net_device *netdev)
  227. {
  228. struct igc_adapter *adapter = netdev_priv(netdev);
  229. if (!test_bit(__IGC_RESETTING, &adapter->state))
  230. igc_update_stats(adapter);
  231. /* only return the current stats */
  232. return &netdev->stats;
  233. }
  234. /**
  235. * igc_configure - configure the hardware for RX and TX
  236. * @adapter: private board structure
  237. */
  238. static void igc_configure(struct igc_adapter *adapter)
  239. {
  240. igc_get_hw_control(adapter);
  241. }
  242. /**
  243. * igc_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
  244. * @adapter: Pointer to adapter structure
  245. * @index: Index of the RAR entry which need to be synced with MAC table
  246. */
  247. static void igc_rar_set_index(struct igc_adapter *adapter, u32 index)
  248. {
  249. u8 *addr = adapter->mac_table[index].addr;
  250. struct igc_hw *hw = &adapter->hw;
  251. u32 rar_low, rar_high;
  252. /* HW expects these to be in network order when they are plugged
  253. * into the registers which are little endian. In order to guarantee
  254. * that ordering we need to do an leXX_to_cpup here in order to be
  255. * ready for the byteswap that occurs with writel
  256. */
  257. rar_low = le32_to_cpup((__le32 *)(addr));
  258. rar_high = le16_to_cpup((__le16 *)(addr + 4));
  259. /* Indicate to hardware the Address is Valid. */
  260. if (adapter->mac_table[index].state & IGC_MAC_STATE_IN_USE) {
  261. if (is_valid_ether_addr(addr))
  262. rar_high |= IGC_RAH_AV;
  263. rar_high |= IGC_RAH_POOL_1 <<
  264. adapter->mac_table[index].queue;
  265. }
  266. wr32(IGC_RAL(index), rar_low);
  267. wrfl();
  268. wr32(IGC_RAH(index), rar_high);
  269. wrfl();
  270. }
  271. /* Set default MAC address for the PF in the first RAR entry */
  272. static void igc_set_default_mac_filter(struct igc_adapter *adapter)
  273. {
  274. struct igc_mac_addr *mac_table = &adapter->mac_table[0];
  275. ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
  276. mac_table->state = IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
  277. igc_rar_set_index(adapter, 0);
  278. }
  279. /**
  280. * igc_msix_other - msix other interrupt handler
  281. * @irq: interrupt number
  282. * @data: pointer to a q_vector
  283. */
  284. static irqreturn_t igc_msix_other(int irq, void *data)
  285. {
  286. struct igc_adapter *adapter = data;
  287. struct igc_hw *hw = &adapter->hw;
  288. u32 icr = rd32(IGC_ICR);
  289. /* reading ICR causes bit 31 of EICR to be cleared */
  290. if (icr & IGC_ICR_DRSTA)
  291. schedule_work(&adapter->reset_task);
  292. if (icr & IGC_ICR_DOUTSYNC) {
  293. /* HW is reporting DMA is out of sync */
  294. adapter->stats.doosync++;
  295. }
  296. if (icr & IGC_ICR_LSC) {
  297. hw->mac.get_link_status = 1;
  298. /* guard against interrupt when we're going down */
  299. if (!test_bit(__IGC_DOWN, &adapter->state))
  300. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  301. }
  302. wr32(IGC_EIMS, adapter->eims_other);
  303. return IRQ_HANDLED;
  304. }
  305. /**
  306. * igc_write_ivar - configure ivar for given MSI-X vector
  307. * @hw: pointer to the HW structure
  308. * @msix_vector: vector number we are allocating to a given ring
  309. * @index: row index of IVAR register to write within IVAR table
  310. * @offset: column offset of in IVAR, should be multiple of 8
  311. *
  312. * The IVAR table consists of 2 columns,
  313. * each containing an cause allocation for an Rx and Tx ring, and a
  314. * variable number of rows depending on the number of queues supported.
  315. */
  316. static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
  317. int index, int offset)
  318. {
  319. u32 ivar = array_rd32(IGC_IVAR0, index);
  320. /* clear any bits that are currently set */
  321. ivar &= ~((u32)0xFF << offset);
  322. /* write vector and valid bit */
  323. ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
  324. array_wr32(IGC_IVAR0, index, ivar);
  325. }
  326. static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
  327. {
  328. struct igc_adapter *adapter = q_vector->adapter;
  329. struct igc_hw *hw = &adapter->hw;
  330. int rx_queue = IGC_N0_QUEUE;
  331. int tx_queue = IGC_N0_QUEUE;
  332. if (q_vector->rx.ring)
  333. rx_queue = q_vector->rx.ring->reg_idx;
  334. if (q_vector->tx.ring)
  335. tx_queue = q_vector->tx.ring->reg_idx;
  336. switch (hw->mac.type) {
  337. case igc_i225:
  338. if (rx_queue > IGC_N0_QUEUE)
  339. igc_write_ivar(hw, msix_vector,
  340. rx_queue >> 1,
  341. (rx_queue & 0x1) << 4);
  342. if (tx_queue > IGC_N0_QUEUE)
  343. igc_write_ivar(hw, msix_vector,
  344. tx_queue >> 1,
  345. ((tx_queue & 0x1) << 4) + 8);
  346. q_vector->eims_value = BIT(msix_vector);
  347. break;
  348. default:
  349. WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
  350. break;
  351. }
  352. /* add q_vector eims value to global eims_enable_mask */
  353. adapter->eims_enable_mask |= q_vector->eims_value;
  354. /* configure q_vector to set itr on first interrupt */
  355. q_vector->set_itr = 1;
  356. }
  357. /**
  358. * igc_configure_msix - Configure MSI-X hardware
  359. * @adapter: Pointer to adapter structure
  360. *
  361. * igc_configure_msix sets up the hardware to properly
  362. * generate MSI-X interrupts.
  363. */
  364. static void igc_configure_msix(struct igc_adapter *adapter)
  365. {
  366. struct igc_hw *hw = &adapter->hw;
  367. int i, vector = 0;
  368. u32 tmp;
  369. adapter->eims_enable_mask = 0;
  370. /* set vector for other causes, i.e. link changes */
  371. switch (hw->mac.type) {
  372. case igc_i225:
  373. /* Turn on MSI-X capability first, or our settings
  374. * won't stick. And it will take days to debug.
  375. */
  376. wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
  377. IGC_GPIE_PBA | IGC_GPIE_EIAME |
  378. IGC_GPIE_NSICR);
  379. /* enable msix_other interrupt */
  380. adapter->eims_other = BIT(vector);
  381. tmp = (vector++ | IGC_IVAR_VALID) << 8;
  382. wr32(IGC_IVAR_MISC, tmp);
  383. break;
  384. default:
  385. /* do nothing, since nothing else supports MSI-X */
  386. break;
  387. } /* switch (hw->mac.type) */
  388. adapter->eims_enable_mask |= adapter->eims_other;
  389. for (i = 0; i < adapter->num_q_vectors; i++)
  390. igc_assign_vector(adapter->q_vector[i], vector++);
  391. wrfl();
  392. }
  393. static irqreturn_t igc_msix_ring(int irq, void *data)
  394. {
  395. struct igc_q_vector *q_vector = data;
  396. /* Write the ITR value calculated from the previous interrupt. */
  397. igc_write_itr(q_vector);
  398. napi_schedule(&q_vector->napi);
  399. return IRQ_HANDLED;
  400. }
  401. /**
  402. * igc_request_msix - Initialize MSI-X interrupts
  403. * @adapter: Pointer to adapter structure
  404. *
  405. * igc_request_msix allocates MSI-X vectors and requests interrupts from the
  406. * kernel.
  407. */
  408. static int igc_request_msix(struct igc_adapter *adapter)
  409. {
  410. int i = 0, err = 0, vector = 0, free_vector = 0;
  411. struct net_device *netdev = adapter->netdev;
  412. err = request_irq(adapter->msix_entries[vector].vector,
  413. &igc_msix_other, 0, netdev->name, adapter);
  414. if (err)
  415. goto err_out;
  416. for (i = 0; i < adapter->num_q_vectors; i++) {
  417. struct igc_q_vector *q_vector = adapter->q_vector[i];
  418. vector++;
  419. q_vector->itr_register = adapter->io_addr + IGC_EITR(vector);
  420. if (q_vector->rx.ring && q_vector->tx.ring)
  421. sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
  422. q_vector->rx.ring->queue_index);
  423. else if (q_vector->tx.ring)
  424. sprintf(q_vector->name, "%s-tx-%u", netdev->name,
  425. q_vector->tx.ring->queue_index);
  426. else if (q_vector->rx.ring)
  427. sprintf(q_vector->name, "%s-rx-%u", netdev->name,
  428. q_vector->rx.ring->queue_index);
  429. else
  430. sprintf(q_vector->name, "%s-unused", netdev->name);
  431. err = request_irq(adapter->msix_entries[vector].vector,
  432. igc_msix_ring, 0, q_vector->name,
  433. q_vector);
  434. if (err)
  435. goto err_free;
  436. }
  437. igc_configure_msix(adapter);
  438. return 0;
  439. err_free:
  440. /* free already assigned IRQs */
  441. free_irq(adapter->msix_entries[free_vector++].vector, adapter);
  442. vector--;
  443. for (i = 0; i < vector; i++) {
  444. free_irq(adapter->msix_entries[free_vector++].vector,
  445. adapter->q_vector[i]);
  446. }
  447. err_out:
  448. return err;
  449. }
  450. /**
  451. * igc_reset_q_vector - Reset config for interrupt vector
  452. * @adapter: board private structure to initialize
  453. * @v_idx: Index of vector to be reset
  454. *
  455. * If NAPI is enabled it will delete any references to the
  456. * NAPI struct. This is preparation for igc_free_q_vector.
  457. */
  458. static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
  459. {
  460. struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
  461. /* if we're coming from igc_set_interrupt_capability, the vectors are
  462. * not yet allocated
  463. */
  464. if (!q_vector)
  465. return;
  466. if (q_vector->tx.ring)
  467. adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
  468. if (q_vector->rx.ring)
  469. adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
  470. netif_napi_del(&q_vector->napi);
  471. }
  472. static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
  473. {
  474. int v_idx = adapter->num_q_vectors;
  475. if (adapter->msix_entries) {
  476. pci_disable_msix(adapter->pdev);
  477. kfree(adapter->msix_entries);
  478. adapter->msix_entries = NULL;
  479. } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
  480. pci_disable_msi(adapter->pdev);
  481. }
  482. while (v_idx--)
  483. igc_reset_q_vector(adapter, v_idx);
  484. }
  485. /**
  486. * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
  487. * @adapter: Pointer to adapter structure
  488. *
  489. * This function resets the device so that it has 0 rx queues, tx queues, and
  490. * MSI-X interrupts allocated.
  491. */
  492. static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
  493. {
  494. igc_free_q_vectors(adapter);
  495. igc_reset_interrupt_capability(adapter);
  496. }
  497. /**
  498. * igc_free_q_vectors - Free memory allocated for interrupt vectors
  499. * @adapter: board private structure to initialize
  500. *
  501. * This function frees the memory allocated to the q_vectors. In addition if
  502. * NAPI is enabled it will delete any references to the NAPI struct prior
  503. * to freeing the q_vector.
  504. */
  505. static void igc_free_q_vectors(struct igc_adapter *adapter)
  506. {
  507. int v_idx = adapter->num_q_vectors;
  508. adapter->num_tx_queues = 0;
  509. adapter->num_rx_queues = 0;
  510. adapter->num_q_vectors = 0;
  511. while (v_idx--) {
  512. igc_reset_q_vector(adapter, v_idx);
  513. igc_free_q_vector(adapter, v_idx);
  514. }
  515. }
  516. /**
  517. * igc_free_q_vector - Free memory allocated for specific interrupt vector
  518. * @adapter: board private structure to initialize
  519. * @v_idx: Index of vector to be freed
  520. *
  521. * This function frees the memory allocated to the q_vector.
  522. */
  523. static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
  524. {
  525. struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
  526. adapter->q_vector[v_idx] = NULL;
  527. /* igc_get_stats64() might access the rings on this vector,
  528. * we must wait a grace period before freeing it.
  529. */
  530. if (q_vector)
  531. kfree_rcu(q_vector, rcu);
  532. }
  533. /**
  534. * igc_update_ring_itr - update the dynamic ITR value based on packet size
  535. * @q_vector: pointer to q_vector
  536. *
  537. * Stores a new ITR value based on strictly on packet size. This
  538. * algorithm is less sophisticated than that used in igc_update_itr,
  539. * due to the difficulty of synchronizing statistics across multiple
  540. * receive rings. The divisors and thresholds used by this function
  541. * were determined based on theoretical maximum wire speed and testing
  542. * data, in order to minimize response time while increasing bulk
  543. * throughput.
  544. * NOTE: This function is called only when operating in a multiqueue
  545. * receive environment.
  546. */
  547. static void igc_update_ring_itr(struct igc_q_vector *q_vector)
  548. {
  549. struct igc_adapter *adapter = q_vector->adapter;
  550. int new_val = q_vector->itr_val;
  551. int avg_wire_size = 0;
  552. unsigned int packets;
  553. /* For non-gigabit speeds, just fix the interrupt rate at 4000
  554. * ints/sec - ITR timer value of 120 ticks.
  555. */
  556. switch (adapter->link_speed) {
  557. case SPEED_10:
  558. case SPEED_100:
  559. new_val = IGC_4K_ITR;
  560. goto set_itr_val;
  561. default:
  562. break;
  563. }
  564. packets = q_vector->rx.total_packets;
  565. if (packets)
  566. avg_wire_size = q_vector->rx.total_bytes / packets;
  567. packets = q_vector->tx.total_packets;
  568. if (packets)
  569. avg_wire_size = max_t(u32, avg_wire_size,
  570. q_vector->tx.total_bytes / packets);
  571. /* if avg_wire_size isn't set no work was done */
  572. if (!avg_wire_size)
  573. goto clear_counts;
  574. /* Add 24 bytes to size to account for CRC, preamble, and gap */
  575. avg_wire_size += 24;
  576. /* Don't starve jumbo frames */
  577. avg_wire_size = min(avg_wire_size, 3000);
  578. /* Give a little boost to mid-size frames */
  579. if (avg_wire_size > 300 && avg_wire_size < 1200)
  580. new_val = avg_wire_size / 3;
  581. else
  582. new_val = avg_wire_size / 2;
  583. /* conservative mode (itr 3) eliminates the lowest_latency setting */
  584. if (new_val < IGC_20K_ITR &&
  585. ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
  586. (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
  587. new_val = IGC_20K_ITR;
  588. set_itr_val:
  589. if (new_val != q_vector->itr_val) {
  590. q_vector->itr_val = new_val;
  591. q_vector->set_itr = 1;
  592. }
  593. clear_counts:
  594. q_vector->rx.total_bytes = 0;
  595. q_vector->rx.total_packets = 0;
  596. q_vector->tx.total_bytes = 0;
  597. q_vector->tx.total_packets = 0;
  598. }
  599. /**
  600. * igc_update_itr - update the dynamic ITR value based on statistics
  601. * @q_vector: pointer to q_vector
  602. * @ring_container: ring info to update the itr for
  603. *
  604. * Stores a new ITR value based on packets and byte
  605. * counts during the last interrupt. The advantage of per interrupt
  606. * computation is faster updates and more accurate ITR for the current
  607. * traffic pattern. Constants in this function were computed
  608. * based on theoretical maximum wire speed and thresholds were set based
  609. * on testing data as well as attempting to minimize response time
  610. * while increasing bulk throughput.
  611. * NOTE: These calculations are only valid when operating in a single-
  612. * queue environment.
  613. */
  614. static void igc_update_itr(struct igc_q_vector *q_vector,
  615. struct igc_ring_container *ring_container)
  616. {
  617. unsigned int packets = ring_container->total_packets;
  618. unsigned int bytes = ring_container->total_bytes;
  619. u8 itrval = ring_container->itr;
  620. /* no packets, exit with status unchanged */
  621. if (packets == 0)
  622. return;
  623. switch (itrval) {
  624. case lowest_latency:
  625. /* handle TSO and jumbo frames */
  626. if (bytes / packets > 8000)
  627. itrval = bulk_latency;
  628. else if ((packets < 5) && (bytes > 512))
  629. itrval = low_latency;
  630. break;
  631. case low_latency: /* 50 usec aka 20000 ints/s */
  632. if (bytes > 10000) {
  633. /* this if handles the TSO accounting */
  634. if (bytes / packets > 8000)
  635. itrval = bulk_latency;
  636. else if ((packets < 10) || ((bytes / packets) > 1200))
  637. itrval = bulk_latency;
  638. else if ((packets > 35))
  639. itrval = lowest_latency;
  640. } else if (bytes / packets > 2000) {
  641. itrval = bulk_latency;
  642. } else if (packets <= 2 && bytes < 512) {
  643. itrval = lowest_latency;
  644. }
  645. break;
  646. case bulk_latency: /* 250 usec aka 4000 ints/s */
  647. if (bytes > 25000) {
  648. if (packets > 35)
  649. itrval = low_latency;
  650. } else if (bytes < 1500) {
  651. itrval = low_latency;
  652. }
  653. break;
  654. }
  655. /* clear work counters since we have the values we need */
  656. ring_container->total_bytes = 0;
  657. ring_container->total_packets = 0;
  658. /* write updated itr to ring container */
  659. ring_container->itr = itrval;
  660. }
  661. static void igc_set_itr(struct igc_q_vector *q_vector)
  662. {
  663. struct igc_adapter *adapter = q_vector->adapter;
  664. u32 new_itr = q_vector->itr_val;
  665. u8 current_itr = 0;
  666. /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
  667. switch (adapter->link_speed) {
  668. case SPEED_10:
  669. case SPEED_100:
  670. current_itr = 0;
  671. new_itr = IGC_4K_ITR;
  672. goto set_itr_now;
  673. default:
  674. break;
  675. }
  676. igc_update_itr(q_vector, &q_vector->tx);
  677. igc_update_itr(q_vector, &q_vector->rx);
  678. current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
  679. /* conservative mode (itr 3) eliminates the lowest_latency setting */
  680. if (current_itr == lowest_latency &&
  681. ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
  682. (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
  683. current_itr = low_latency;
  684. switch (current_itr) {
  685. /* counts and packets in update_itr are dependent on these numbers */
  686. case lowest_latency:
  687. new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
  688. break;
  689. case low_latency:
  690. new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
  691. break;
  692. case bulk_latency:
  693. new_itr = IGC_4K_ITR; /* 4,000 ints/sec */
  694. break;
  695. default:
  696. break;
  697. }
  698. set_itr_now:
  699. if (new_itr != q_vector->itr_val) {
  700. /* this attempts to bias the interrupt rate towards Bulk
  701. * by adding intermediate steps when interrupt rate is
  702. * increasing
  703. */
  704. new_itr = new_itr > q_vector->itr_val ?
  705. max((new_itr * q_vector->itr_val) /
  706. (new_itr + (q_vector->itr_val >> 2)),
  707. new_itr) : new_itr;
  708. /* Don't write the value here; it resets the adapter's
  709. * internal timer, and causes us to delay far longer than
  710. * we should between interrupts. Instead, we write the ITR
  711. * value at the beginning of the next interrupt so the timing
  712. * ends up being correct.
  713. */
  714. q_vector->itr_val = new_itr;
  715. q_vector->set_itr = 1;
  716. }
  717. }
  718. static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
  719. {
  720. struct igc_adapter *adapter = q_vector->adapter;
  721. struct igc_hw *hw = &adapter->hw;
  722. if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
  723. (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
  724. if (adapter->num_q_vectors == 1)
  725. igc_set_itr(q_vector);
  726. else
  727. igc_update_ring_itr(q_vector);
  728. }
  729. if (!test_bit(__IGC_DOWN, &adapter->state)) {
  730. if (adapter->msix_entries)
  731. wr32(IGC_EIMS, q_vector->eims_value);
  732. else
  733. igc_irq_enable(adapter);
  734. }
  735. }
  736. /**
  737. * igc_poll - NAPI Rx polling callback
  738. * @napi: napi polling structure
  739. * @budget: count of how many packets we should handle
  740. */
  741. static int igc_poll(struct napi_struct *napi, int budget)
  742. {
  743. struct igc_q_vector *q_vector = container_of(napi,
  744. struct igc_q_vector,
  745. napi);
  746. bool clean_complete = true;
  747. int work_done = 0;
  748. int cleaned = 0;
  749. if (q_vector->rx.ring) {
  750. work_done += cleaned;
  751. if (cleaned >= budget)
  752. clean_complete = false;
  753. }
  754. /* If all work not completed, return budget and keep polling */
  755. if (!clean_complete)
  756. return budget;
  757. /* If not enough Rx work done, exit the polling mode */
  758. napi_complete_done(napi, work_done);
  759. igc_ring_irq_enable(q_vector);
  760. return 0;
  761. }
  762. /**
  763. * igc_set_interrupt_capability - set MSI or MSI-X if supported
  764. * @adapter: Pointer to adapter structure
  765. *
  766. * Attempt to configure interrupts using the best available
  767. * capabilities of the hardware and kernel.
  768. */
  769. static void igc_set_interrupt_capability(struct igc_adapter *adapter,
  770. bool msix)
  771. {
  772. int numvecs, i;
  773. int err;
  774. if (!msix)
  775. goto msi_only;
  776. adapter->flags |= IGC_FLAG_HAS_MSIX;
  777. /* Number of supported queues. */
  778. adapter->num_rx_queues = adapter->rss_queues;
  779. adapter->num_tx_queues = adapter->rss_queues;
  780. /* start with one vector for every Rx queue */
  781. numvecs = adapter->num_rx_queues;
  782. /* if Tx handler is separate add 1 for every Tx queue */
  783. if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
  784. numvecs += adapter->num_tx_queues;
  785. /* store the number of vectors reserved for queues */
  786. adapter->num_q_vectors = numvecs;
  787. /* add 1 vector for link status interrupts */
  788. numvecs++;
  789. adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
  790. GFP_KERNEL);
  791. if (!adapter->msix_entries)
  792. return;
  793. /* populate entry values */
  794. for (i = 0; i < numvecs; i++)
  795. adapter->msix_entries[i].entry = i;
  796. err = pci_enable_msix_range(adapter->pdev,
  797. adapter->msix_entries,
  798. numvecs,
  799. numvecs);
  800. if (err > 0)
  801. return;
  802. kfree(adapter->msix_entries);
  803. adapter->msix_entries = NULL;
  804. igc_reset_interrupt_capability(adapter);
  805. msi_only:
  806. adapter->flags &= ~IGC_FLAG_HAS_MSIX;
  807. adapter->rss_queues = 1;
  808. adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
  809. adapter->num_rx_queues = 1;
  810. adapter->num_tx_queues = 1;
  811. adapter->num_q_vectors = 1;
  812. if (!pci_enable_msi(adapter->pdev))
  813. adapter->flags |= IGC_FLAG_HAS_MSI;
  814. }
  815. static void igc_add_ring(struct igc_ring *ring,
  816. struct igc_ring_container *head)
  817. {
  818. head->ring = ring;
  819. head->count++;
  820. }
  821. /**
  822. * igc_alloc_q_vector - Allocate memory for a single interrupt vector
  823. * @adapter: board private structure to initialize
  824. * @v_count: q_vectors allocated on adapter, used for ring interleaving
  825. * @v_idx: index of vector in adapter struct
  826. * @txr_count: total number of Tx rings to allocate
  827. * @txr_idx: index of first Tx ring to allocate
  828. * @rxr_count: total number of Rx rings to allocate
  829. * @rxr_idx: index of first Rx ring to allocate
  830. *
  831. * We allocate one q_vector. If allocation fails we return -ENOMEM.
  832. */
  833. static int igc_alloc_q_vector(struct igc_adapter *adapter,
  834. unsigned int v_count, unsigned int v_idx,
  835. unsigned int txr_count, unsigned int txr_idx,
  836. unsigned int rxr_count, unsigned int rxr_idx)
  837. {
  838. struct igc_q_vector *q_vector;
  839. struct igc_ring *ring;
  840. int ring_count, size;
  841. /* igc only supports 1 Tx and/or 1 Rx queue per vector */
  842. if (txr_count > 1 || rxr_count > 1)
  843. return -ENOMEM;
  844. ring_count = txr_count + rxr_count;
  845. size = sizeof(struct igc_q_vector) +
  846. (sizeof(struct igc_ring) * ring_count);
  847. /* allocate q_vector and rings */
  848. q_vector = adapter->q_vector[v_idx];
  849. if (!q_vector)
  850. q_vector = kzalloc(size, GFP_KERNEL);
  851. else
  852. memset(q_vector, 0, size);
  853. if (!q_vector)
  854. return -ENOMEM;
  855. /* initialize NAPI */
  856. netif_napi_add(adapter->netdev, &q_vector->napi,
  857. igc_poll, 64);
  858. /* tie q_vector and adapter together */
  859. adapter->q_vector[v_idx] = q_vector;
  860. q_vector->adapter = adapter;
  861. /* initialize work limits */
  862. q_vector->tx.work_limit = adapter->tx_work_limit;
  863. /* initialize ITR configuration */
  864. q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
  865. q_vector->itr_val = IGC_START_ITR;
  866. /* initialize pointer to rings */
  867. ring = q_vector->ring;
  868. /* initialize ITR */
  869. if (rxr_count) {
  870. /* rx or rx/tx vector */
  871. if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
  872. q_vector->itr_val = adapter->rx_itr_setting;
  873. } else {
  874. /* tx only vector */
  875. if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
  876. q_vector->itr_val = adapter->tx_itr_setting;
  877. }
  878. if (txr_count) {
  879. /* assign generic ring traits */
  880. ring->dev = &adapter->pdev->dev;
  881. ring->netdev = adapter->netdev;
  882. /* configure backlink on ring */
  883. ring->q_vector = q_vector;
  884. /* update q_vector Tx values */
  885. igc_add_ring(ring, &q_vector->tx);
  886. /* apply Tx specific ring traits */
  887. ring->count = adapter->tx_ring_count;
  888. ring->queue_index = txr_idx;
  889. /* assign ring to adapter */
  890. adapter->tx_ring[txr_idx] = ring;
  891. /* push pointer to next ring */
  892. ring++;
  893. }
  894. if (rxr_count) {
  895. /* assign generic ring traits */
  896. ring->dev = &adapter->pdev->dev;
  897. ring->netdev = adapter->netdev;
  898. /* configure backlink on ring */
  899. ring->q_vector = q_vector;
  900. /* update q_vector Rx values */
  901. igc_add_ring(ring, &q_vector->rx);
  902. /* apply Rx specific ring traits */
  903. ring->count = adapter->rx_ring_count;
  904. ring->queue_index = rxr_idx;
  905. /* assign ring to adapter */
  906. adapter->rx_ring[rxr_idx] = ring;
  907. }
  908. return 0;
  909. }
  910. /**
  911. * igc_alloc_q_vectors - Allocate memory for interrupt vectors
  912. * @adapter: board private structure to initialize
  913. *
  914. * We allocate one q_vector per queue interrupt. If allocation fails we
  915. * return -ENOMEM.
  916. */
  917. static int igc_alloc_q_vectors(struct igc_adapter *adapter)
  918. {
  919. int rxr_remaining = adapter->num_rx_queues;
  920. int txr_remaining = adapter->num_tx_queues;
  921. int rxr_idx = 0, txr_idx = 0, v_idx = 0;
  922. int q_vectors = adapter->num_q_vectors;
  923. int err;
  924. if (q_vectors >= (rxr_remaining + txr_remaining)) {
  925. for (; rxr_remaining; v_idx++) {
  926. err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
  927. 0, 0, 1, rxr_idx);
  928. if (err)
  929. goto err_out;
  930. /* update counts and index */
  931. rxr_remaining--;
  932. rxr_idx++;
  933. }
  934. }
  935. for (; v_idx < q_vectors; v_idx++) {
  936. int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
  937. int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
  938. err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
  939. tqpv, txr_idx, rqpv, rxr_idx);
  940. if (err)
  941. goto err_out;
  942. /* update counts and index */
  943. rxr_remaining -= rqpv;
  944. txr_remaining -= tqpv;
  945. rxr_idx++;
  946. txr_idx++;
  947. }
  948. return 0;
  949. err_out:
  950. adapter->num_tx_queues = 0;
  951. adapter->num_rx_queues = 0;
  952. adapter->num_q_vectors = 0;
  953. while (v_idx--)
  954. igc_free_q_vector(adapter, v_idx);
  955. return -ENOMEM;
  956. }
  957. /**
  958. * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
  959. * @adapter: Pointer to adapter structure
  960. *
  961. * This function initializes the interrupts and allocates all of the queues.
  962. */
  963. static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
  964. {
  965. struct pci_dev *pdev = adapter->pdev;
  966. int err = 0;
  967. igc_set_interrupt_capability(adapter, msix);
  968. err = igc_alloc_q_vectors(adapter);
  969. if (err) {
  970. dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
  971. goto err_alloc_q_vectors;
  972. }
  973. return 0;
  974. err_alloc_q_vectors:
  975. igc_reset_interrupt_capability(adapter);
  976. return err;
  977. }
  978. static void igc_free_irq(struct igc_adapter *adapter)
  979. {
  980. if (adapter->msix_entries) {
  981. int vector = 0, i;
  982. free_irq(adapter->msix_entries[vector++].vector, adapter);
  983. for (i = 0; i < adapter->num_q_vectors; i++)
  984. free_irq(adapter->msix_entries[vector++].vector,
  985. adapter->q_vector[i]);
  986. } else {
  987. free_irq(adapter->pdev->irq, adapter);
  988. }
  989. }
  990. /**
  991. * igc_irq_disable - Mask off interrupt generation on the NIC
  992. * @adapter: board private structure
  993. */
  994. static void igc_irq_disable(struct igc_adapter *adapter)
  995. {
  996. struct igc_hw *hw = &adapter->hw;
  997. if (adapter->msix_entries) {
  998. u32 regval = rd32(IGC_EIAM);
  999. wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
  1000. wr32(IGC_EIMC, adapter->eims_enable_mask);
  1001. regval = rd32(IGC_EIAC);
  1002. wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
  1003. }
  1004. wr32(IGC_IAM, 0);
  1005. wr32(IGC_IMC, ~0);
  1006. wrfl();
  1007. if (adapter->msix_entries) {
  1008. int vector = 0, i;
  1009. synchronize_irq(adapter->msix_entries[vector++].vector);
  1010. for (i = 0; i < adapter->num_q_vectors; i++)
  1011. synchronize_irq(adapter->msix_entries[vector++].vector);
  1012. } else {
  1013. synchronize_irq(adapter->pdev->irq);
  1014. }
  1015. }
  1016. /**
  1017. * igc_irq_enable - Enable default interrupt generation settings
  1018. * @adapter: board private structure
  1019. */
  1020. static void igc_irq_enable(struct igc_adapter *adapter)
  1021. {
  1022. struct igc_hw *hw = &adapter->hw;
  1023. if (adapter->msix_entries) {
  1024. u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
  1025. u32 regval = rd32(IGC_EIAC);
  1026. wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
  1027. regval = rd32(IGC_EIAM);
  1028. wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
  1029. wr32(IGC_EIMS, adapter->eims_enable_mask);
  1030. wr32(IGC_IMS, ims);
  1031. } else {
  1032. wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
  1033. wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
  1034. }
  1035. }
  1036. /**
  1037. * igc_request_irq - initialize interrupts
  1038. * @adapter: Pointer to adapter structure
  1039. *
  1040. * Attempts to configure interrupts using the best available
  1041. * capabilities of the hardware and kernel.
  1042. */
  1043. static int igc_request_irq(struct igc_adapter *adapter)
  1044. {
  1045. int err = 0;
  1046. if (adapter->flags & IGC_FLAG_HAS_MSIX) {
  1047. err = igc_request_msix(adapter);
  1048. if (!err)
  1049. goto request_done;
  1050. /* fall back to MSI */
  1051. igc_clear_interrupt_scheme(adapter);
  1052. err = igc_init_interrupt_scheme(adapter, false);
  1053. if (err)
  1054. goto request_done;
  1055. igc_configure(adapter);
  1056. }
  1057. request_done:
  1058. return err;
  1059. }
  1060. static void igc_write_itr(struct igc_q_vector *q_vector)
  1061. {
  1062. u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
  1063. if (!q_vector->set_itr)
  1064. return;
  1065. if (!itr_val)
  1066. itr_val = IGC_ITR_VAL_MASK;
  1067. itr_val |= IGC_EITR_CNT_IGNR;
  1068. writel(itr_val, q_vector->itr_register);
  1069. q_vector->set_itr = 0;
  1070. }
  1071. /**
  1072. * igc_open - Called when a network interface is made active
  1073. * @netdev: network interface device structure
  1074. *
  1075. * Returns 0 on success, negative value on failure
  1076. *
  1077. * The open entry point is called when a network interface is made
  1078. * active by the system (IFF_UP). At this point all resources needed
  1079. * for transmit and receive operations are allocated, the interrupt
  1080. * handler is registered with the OS, the watchdog timer is started,
  1081. * and the stack is notified that the interface is ready.
  1082. */
  1083. static int __igc_open(struct net_device *netdev, bool resuming)
  1084. {
  1085. struct igc_adapter *adapter = netdev_priv(netdev);
  1086. struct igc_hw *hw = &adapter->hw;
  1087. int err = 0;
  1088. int i = 0;
  1089. /* disallow open during test */
  1090. if (test_bit(__IGC_TESTING, &adapter->state)) {
  1091. WARN_ON(resuming);
  1092. return -EBUSY;
  1093. }
  1094. netif_carrier_off(netdev);
  1095. igc_power_up_link(adapter);
  1096. igc_configure(adapter);
  1097. err = igc_request_irq(adapter);
  1098. if (err)
  1099. goto err_req_irq;
  1100. /* Notify the stack of the actual queue counts. */
  1101. netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
  1102. if (err)
  1103. goto err_set_queues;
  1104. err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
  1105. if (err)
  1106. goto err_set_queues;
  1107. clear_bit(__IGC_DOWN, &adapter->state);
  1108. for (i = 0; i < adapter->num_q_vectors; i++)
  1109. napi_enable(&adapter->q_vector[i]->napi);
  1110. /* Clear any pending interrupts. */
  1111. rd32(IGC_ICR);
  1112. igc_irq_enable(adapter);
  1113. /* start the watchdog. */
  1114. hw->mac.get_link_status = 1;
  1115. return IGC_SUCCESS;
  1116. err_set_queues:
  1117. igc_free_irq(adapter);
  1118. err_req_irq:
  1119. igc_release_hw_control(adapter);
  1120. igc_power_down_link(adapter);
  1121. return err;
  1122. }
  1123. static int igc_open(struct net_device *netdev)
  1124. {
  1125. return __igc_open(netdev, false);
  1126. }
  1127. /**
  1128. * igc_close - Disables a network interface
  1129. * @netdev: network interface device structure
  1130. *
  1131. * Returns 0, this is not allowed to fail
  1132. *
  1133. * The close entry point is called when an interface is de-activated
  1134. * by the OS. The hardware is still under the driver's control, but
  1135. * needs to be disabled. A global MAC reset is issued to stop the
  1136. * hardware, and all transmit and receive resources are freed.
  1137. */
  1138. static int __igc_close(struct net_device *netdev, bool suspending)
  1139. {
  1140. struct igc_adapter *adapter = netdev_priv(netdev);
  1141. WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
  1142. igc_down(adapter);
  1143. igc_release_hw_control(adapter);
  1144. igc_free_irq(adapter);
  1145. return 0;
  1146. }
  1147. static int igc_close(struct net_device *netdev)
  1148. {
  1149. if (netif_device_present(netdev) || netdev->dismantle)
  1150. return __igc_close(netdev, false);
  1151. return 0;
  1152. }
  1153. static const struct net_device_ops igc_netdev_ops = {
  1154. .ndo_open = igc_open,
  1155. .ndo_stop = igc_close,
  1156. .ndo_start_xmit = igc_xmit_frame,
  1157. .ndo_set_mac_address = igc_set_mac,
  1158. .ndo_change_mtu = igc_change_mtu,
  1159. .ndo_get_stats = igc_get_stats,
  1160. .ndo_do_ioctl = igc_ioctl,
  1161. };
  1162. /* PCIe configuration access */
  1163. void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
  1164. {
  1165. struct igc_adapter *adapter = hw->back;
  1166. pci_read_config_word(adapter->pdev, reg, value);
  1167. }
  1168. void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
  1169. {
  1170. struct igc_adapter *adapter = hw->back;
  1171. pci_write_config_word(adapter->pdev, reg, *value);
  1172. }
  1173. s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
  1174. {
  1175. struct igc_adapter *adapter = hw->back;
  1176. u16 cap_offset;
  1177. cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
  1178. if (!cap_offset)
  1179. return -IGC_ERR_CONFIG;
  1180. pci_read_config_word(adapter->pdev, cap_offset + reg, value);
  1181. return IGC_SUCCESS;
  1182. }
  1183. s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
  1184. {
  1185. struct igc_adapter *adapter = hw->back;
  1186. u16 cap_offset;
  1187. cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
  1188. if (!cap_offset)
  1189. return -IGC_ERR_CONFIG;
  1190. pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
  1191. return IGC_SUCCESS;
  1192. }
  1193. u32 igc_rd32(struct igc_hw *hw, u32 reg)
  1194. {
  1195. struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
  1196. u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
  1197. u32 value = 0;
  1198. if (IGC_REMOVED(hw_addr))
  1199. return ~value;
  1200. value = readl(&hw_addr[reg]);
  1201. /* reads should not return all F's */
  1202. if (!(~value) && (!reg || !(~readl(hw_addr)))) {
  1203. struct net_device *netdev = igc->netdev;
  1204. hw->hw_addr = NULL;
  1205. netif_device_detach(netdev);
  1206. netdev_err(netdev, "PCIe link lost, device now detached\n");
  1207. }
  1208. return value;
  1209. }
  1210. /**
  1211. * igc_probe - Device Initialization Routine
  1212. * @pdev: PCI device information struct
  1213. * @ent: entry in igc_pci_tbl
  1214. *
  1215. * Returns 0 on success, negative on failure
  1216. *
  1217. * igc_probe initializes an adapter identified by a pci_dev structure.
  1218. * The OS initialization, configuring the adapter private structure,
  1219. * and a hardware reset occur.
  1220. */
  1221. static int igc_probe(struct pci_dev *pdev,
  1222. const struct pci_device_id *ent)
  1223. {
  1224. struct igc_adapter *adapter;
  1225. struct net_device *netdev;
  1226. struct igc_hw *hw;
  1227. int err, pci_using_dac;
  1228. err = pci_enable_device_mem(pdev);
  1229. if (err)
  1230. return err;
  1231. pci_using_dac = 0;
  1232. err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
  1233. if (!err) {
  1234. err = dma_set_coherent_mask(&pdev->dev,
  1235. DMA_BIT_MASK(64));
  1236. if (!err)
  1237. pci_using_dac = 1;
  1238. } else {
  1239. err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
  1240. if (err) {
  1241. err = dma_set_coherent_mask(&pdev->dev,
  1242. DMA_BIT_MASK(32));
  1243. if (err) {
  1244. IGC_ERR("Wrong DMA configuration, aborting\n");
  1245. goto err_dma;
  1246. }
  1247. }
  1248. }
  1249. err = pci_request_selected_regions(pdev,
  1250. pci_select_bars(pdev,
  1251. IORESOURCE_MEM),
  1252. igc_driver_name);
  1253. if (err)
  1254. goto err_pci_reg;
  1255. pci_enable_pcie_error_reporting(pdev);
  1256. pci_set_master(pdev);
  1257. err = -ENOMEM;
  1258. netdev = alloc_etherdev_mq(sizeof(struct igc_adapter),
  1259. IGC_MAX_TX_QUEUES);
  1260. if (!netdev)
  1261. goto err_alloc_etherdev;
  1262. SET_NETDEV_DEV(netdev, &pdev->dev);
  1263. pci_set_drvdata(pdev, netdev);
  1264. adapter = netdev_priv(netdev);
  1265. adapter->netdev = netdev;
  1266. adapter->pdev = pdev;
  1267. hw = &adapter->hw;
  1268. hw->back = adapter;
  1269. adapter->port_num = hw->bus.func;
  1270. adapter->msg_enable = GENMASK(debug - 1, 0);
  1271. err = pci_save_state(pdev);
  1272. if (err)
  1273. goto err_ioremap;
  1274. err = -EIO;
  1275. adapter->io_addr = ioremap(pci_resource_start(pdev, 0),
  1276. pci_resource_len(pdev, 0));
  1277. if (!adapter->io_addr)
  1278. goto err_ioremap;
  1279. /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */
  1280. hw->hw_addr = adapter->io_addr;
  1281. netdev->netdev_ops = &igc_netdev_ops;
  1282. netdev->watchdog_timeo = 5 * HZ;
  1283. netdev->mem_start = pci_resource_start(pdev, 0);
  1284. netdev->mem_end = pci_resource_end(pdev, 0);
  1285. /* PCI config space info */
  1286. hw->vendor_id = pdev->vendor;
  1287. hw->device_id = pdev->device;
  1288. hw->revision_id = pdev->revision;
  1289. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  1290. hw->subsystem_device_id = pdev->subsystem_device;
  1291. /* setup the private structure */
  1292. err = igc_sw_init(adapter);
  1293. if (err)
  1294. goto err_sw_init;
  1295. /* MTU range: 68 - 9216 */
  1296. netdev->min_mtu = ETH_MIN_MTU;
  1297. netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
  1298. /* reset the hardware with the new settings */
  1299. igc_reset(adapter);
  1300. /* let the f/w know that the h/w is now under the control of the
  1301. * driver.
  1302. */
  1303. igc_get_hw_control(adapter);
  1304. strncpy(netdev->name, "eth%d", IFNAMSIZ);
  1305. err = register_netdev(netdev);
  1306. if (err)
  1307. goto err_register;
  1308. /* carrier off reporting is important to ethtool even BEFORE open */
  1309. netif_carrier_off(netdev);
  1310. /* print pcie link status and MAC address */
  1311. pcie_print_link_status(pdev);
  1312. netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
  1313. return 0;
  1314. err_register:
  1315. igc_release_hw_control(adapter);
  1316. err_sw_init:
  1317. igc_clear_interrupt_scheme(adapter);
  1318. iounmap(adapter->io_addr);
  1319. err_ioremap:
  1320. free_netdev(netdev);
  1321. err_alloc_etherdev:
  1322. pci_release_selected_regions(pdev,
  1323. pci_select_bars(pdev, IORESOURCE_MEM));
  1324. err_pci_reg:
  1325. err_dma:
  1326. pci_disable_device(pdev);
  1327. return err;
  1328. }
  1329. /**
  1330. * igc_remove - Device Removal Routine
  1331. * @pdev: PCI device information struct
  1332. *
  1333. * igc_remove is called by the PCI subsystem to alert the driver
  1334. * that it should release a PCI device. This could be caused by a
  1335. * Hot-Plug event, or because the driver is going to be removed from
  1336. * memory.
  1337. */
  1338. static void igc_remove(struct pci_dev *pdev)
  1339. {
  1340. struct net_device *netdev = pci_get_drvdata(pdev);
  1341. struct igc_adapter *adapter = netdev_priv(netdev);
  1342. set_bit(__IGC_DOWN, &adapter->state);
  1343. flush_scheduled_work();
  1344. /* Release control of h/w to f/w. If f/w is AMT enabled, this
  1345. * would have already happened in close and is redundant.
  1346. */
  1347. igc_release_hw_control(adapter);
  1348. unregister_netdev(netdev);
  1349. pci_release_selected_regions(pdev,
  1350. pci_select_bars(pdev, IORESOURCE_MEM));
  1351. free_netdev(netdev);
  1352. pci_disable_device(pdev);
  1353. }
  1354. static struct pci_driver igc_driver = {
  1355. .name = igc_driver_name,
  1356. .id_table = igc_pci_tbl,
  1357. .probe = igc_probe,
  1358. .remove = igc_remove,
  1359. };
  1360. /**
  1361. * igc_sw_init - Initialize general software structures (struct igc_adapter)
  1362. * @adapter: board private structure to initialize
  1363. *
  1364. * igc_sw_init initializes the Adapter private data structure.
  1365. * Fields are initialized based on PCI device information and
  1366. * OS network device settings (MTU size).
  1367. */
  1368. static int igc_sw_init(struct igc_adapter *adapter)
  1369. {
  1370. struct net_device *netdev = adapter->netdev;
  1371. struct pci_dev *pdev = adapter->pdev;
  1372. struct igc_hw *hw = &adapter->hw;
  1373. /* PCI config space info */
  1374. hw->vendor_id = pdev->vendor;
  1375. hw->device_id = pdev->device;
  1376. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  1377. hw->subsystem_device_id = pdev->subsystem_device;
  1378. pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
  1379. pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
  1380. /* adjust max frame to be at least the size of a standard frame */
  1381. adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
  1382. VLAN_HLEN;
  1383. if (igc_init_interrupt_scheme(adapter, true)) {
  1384. dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
  1385. return -ENOMEM;
  1386. }
  1387. /* Explicitly disable IRQ since the NIC can be in any state. */
  1388. igc_irq_disable(adapter);
  1389. set_bit(__IGC_DOWN, &adapter->state);
  1390. return 0;
  1391. }
  1392. /**
  1393. * igc_init_module - Driver Registration Routine
  1394. *
  1395. * igc_init_module is the first routine called when the driver is
  1396. * loaded. All it does is register with the PCI subsystem.
  1397. */
  1398. static int __init igc_init_module(void)
  1399. {
  1400. int ret;
  1401. pr_info("%s - version %s\n",
  1402. igc_driver_string, igc_driver_version);
  1403. pr_info("%s\n", igc_copyright);
  1404. ret = pci_register_driver(&igc_driver);
  1405. return ret;
  1406. }
  1407. module_init(igc_init_module);
  1408. /**
  1409. * igc_exit_module - Driver Exit Cleanup Routine
  1410. *
  1411. * igc_exit_module is called just before the driver is removed
  1412. * from memory.
  1413. */
  1414. static void __exit igc_exit_module(void)
  1415. {
  1416. pci_unregister_driver(&igc_driver);
  1417. }
  1418. module_exit(igc_exit_module);
  1419. /* igc_main.c */