qede_main.c 59 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236
  1. /* QLogic qede NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/pci.h>
  34. #include <linux/version.h>
  35. #include <linux/device.h>
  36. #include <linux/netdevice.h>
  37. #include <linux/etherdevice.h>
  38. #include <linux/skbuff.h>
  39. #include <linux/errno.h>
  40. #include <linux/list.h>
  41. #include <linux/string.h>
  42. #include <linux/dma-mapping.h>
  43. #include <linux/interrupt.h>
  44. #include <asm/byteorder.h>
  45. #include <asm/param.h>
  46. #include <linux/io.h>
  47. #include <linux/netdev_features.h>
  48. #include <linux/udp.h>
  49. #include <linux/tcp.h>
  50. #include <net/udp_tunnel.h>
  51. #include <linux/ip.h>
  52. #include <net/ipv6.h>
  53. #include <net/tcp.h>
  54. #include <linux/if_ether.h>
  55. #include <linux/if_vlan.h>
  56. #include <linux/pkt_sched.h>
  57. #include <linux/ethtool.h>
  58. #include <linux/in.h>
  59. #include <linux/random.h>
  60. #include <net/ip6_checksum.h>
  61. #include <linux/bitops.h>
  62. #include <linux/vmalloc.h>
  63. #include "qede.h"
  64. #include "qede_ptp.h"
  65. static char version[] =
  66. "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
  67. MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
  68. MODULE_LICENSE("GPL");
  69. MODULE_VERSION(DRV_MODULE_VERSION);
  70. static uint debug;
  71. module_param(debug, uint, 0);
  72. MODULE_PARM_DESC(debug, " Default debug msglevel");
  73. static const struct qed_eth_ops *qed_ops;
  74. #define CHIP_NUM_57980S_40 0x1634
  75. #define CHIP_NUM_57980S_10 0x1666
  76. #define CHIP_NUM_57980S_MF 0x1636
  77. #define CHIP_NUM_57980S_100 0x1644
  78. #define CHIP_NUM_57980S_50 0x1654
  79. #define CHIP_NUM_57980S_25 0x1656
  80. #define CHIP_NUM_57980S_IOV 0x1664
  81. #define CHIP_NUM_AH 0x8070
  82. #define CHIP_NUM_AH_IOV 0x8090
  83. #ifndef PCI_DEVICE_ID_NX2_57980E
  84. #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
  85. #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
  86. #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
  87. #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
  88. #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
  89. #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
  90. #define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
  91. #define PCI_DEVICE_ID_AH CHIP_NUM_AH
  92. #define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV
  93. #endif
  94. enum qede_pci_private {
  95. QEDE_PRIVATE_PF,
  96. QEDE_PRIVATE_VF
  97. };
  98. static const struct pci_device_id qede_pci_tbl[] = {
  99. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
  100. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
  101. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
  102. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
  103. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
  104. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
  105. #ifdef CONFIG_QED_SRIOV
  106. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
  107. #endif
  108. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
  109. #ifdef CONFIG_QED_SRIOV
  110. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
  111. #endif
  112. { 0 }
  113. };
  114. MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
  115. static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
  116. #define TX_TIMEOUT (5 * HZ)
  117. /* Utilize last protocol index for XDP */
  118. #define XDP_PI 11
  119. static void qede_remove(struct pci_dev *pdev);
  120. static void qede_shutdown(struct pci_dev *pdev);
  121. static void qede_link_update(void *dev, struct qed_link_output *link);
  122. static void qede_get_eth_tlv_data(void *edev, void *data);
  123. static void qede_get_generic_tlv_data(void *edev,
  124. struct qed_generic_tlvs *data);
  125. /* The qede lock is used to protect driver state change and driver flows that
  126. * are not reentrant.
  127. */
  128. void __qede_lock(struct qede_dev *edev)
  129. {
  130. mutex_lock(&edev->qede_lock);
  131. }
  132. void __qede_unlock(struct qede_dev *edev)
  133. {
  134. mutex_unlock(&edev->qede_lock);
  135. }
  136. #ifdef CONFIG_QED_SRIOV
  137. static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
  138. __be16 vlan_proto)
  139. {
  140. struct qede_dev *edev = netdev_priv(ndev);
  141. if (vlan > 4095) {
  142. DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
  143. return -EINVAL;
  144. }
  145. if (vlan_proto != htons(ETH_P_8021Q))
  146. return -EPROTONOSUPPORT;
  147. DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
  148. vlan, vf);
  149. return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
  150. }
  151. static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
  152. {
  153. struct qede_dev *edev = netdev_priv(ndev);
  154. DP_VERBOSE(edev, QED_MSG_IOV,
  155. "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
  156. mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
  157. if (!is_valid_ether_addr(mac)) {
  158. DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
  159. return -EINVAL;
  160. }
  161. return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
  162. }
  163. static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
  164. {
  165. struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
  166. struct qed_dev_info *qed_info = &edev->dev_info.common;
  167. struct qed_update_vport_params *vport_params;
  168. int rc;
  169. vport_params = vzalloc(sizeof(*vport_params));
  170. if (!vport_params)
  171. return -ENOMEM;
  172. DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
  173. rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
  174. /* Enable/Disable Tx switching for PF */
  175. if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
  176. !qed_info->b_inter_pf_switch && qed_info->tx_switching) {
  177. vport_params->vport_id = 0;
  178. vport_params->update_tx_switching_flg = 1;
  179. vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
  180. edev->ops->vport_update(edev->cdev, vport_params);
  181. }
  182. vfree(vport_params);
  183. return rc;
  184. }
  185. #endif
  186. static struct pci_driver qede_pci_driver = {
  187. .name = "qede",
  188. .id_table = qede_pci_tbl,
  189. .probe = qede_probe,
  190. .remove = qede_remove,
  191. .shutdown = qede_shutdown,
  192. #ifdef CONFIG_QED_SRIOV
  193. .sriov_configure = qede_sriov_configure,
  194. #endif
  195. };
  196. static struct qed_eth_cb_ops qede_ll_ops = {
  197. {
  198. #ifdef CONFIG_RFS_ACCEL
  199. .arfs_filter_op = qede_arfs_filter_op,
  200. #endif
  201. .link_update = qede_link_update,
  202. .get_generic_tlv_data = qede_get_generic_tlv_data,
  203. .get_protocol_tlv_data = qede_get_eth_tlv_data,
  204. },
  205. .force_mac = qede_force_mac,
  206. .ports_update = qede_udp_ports_update,
  207. };
  208. static int qede_netdev_event(struct notifier_block *this, unsigned long event,
  209. void *ptr)
  210. {
  211. struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
  212. struct ethtool_drvinfo drvinfo;
  213. struct qede_dev *edev;
  214. if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
  215. goto done;
  216. /* Check whether this is a qede device */
  217. if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
  218. goto done;
  219. memset(&drvinfo, 0, sizeof(drvinfo));
  220. ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
  221. if (strcmp(drvinfo.driver, "qede"))
  222. goto done;
  223. edev = netdev_priv(ndev);
  224. switch (event) {
  225. case NETDEV_CHANGENAME:
  226. /* Notify qed of the name change */
  227. if (!edev->ops || !edev->ops->common)
  228. goto done;
  229. edev->ops->common->set_name(edev->cdev, edev->ndev->name);
  230. break;
  231. case NETDEV_CHANGEADDR:
  232. edev = netdev_priv(ndev);
  233. qede_rdma_event_changeaddr(edev);
  234. break;
  235. }
  236. done:
  237. return NOTIFY_DONE;
  238. }
  239. static struct notifier_block qede_netdev_notifier = {
  240. .notifier_call = qede_netdev_event,
  241. };
  242. static
  243. int __init qede_init(void)
  244. {
  245. int ret;
  246. pr_info("qede_init: %s\n", version);
  247. qed_ops = qed_get_eth_ops();
  248. if (!qed_ops) {
  249. pr_notice("Failed to get qed ethtool operations\n");
  250. return -EINVAL;
  251. }
  252. /* Must register notifier before pci ops, since we might miss
  253. * interface rename after pci probe and netdev registration.
  254. */
  255. ret = register_netdevice_notifier(&qede_netdev_notifier);
  256. if (ret) {
  257. pr_notice("Failed to register netdevice_notifier\n");
  258. qed_put_eth_ops();
  259. return -EINVAL;
  260. }
  261. ret = pci_register_driver(&qede_pci_driver);
  262. if (ret) {
  263. pr_notice("Failed to register driver\n");
  264. unregister_netdevice_notifier(&qede_netdev_notifier);
  265. qed_put_eth_ops();
  266. return -EINVAL;
  267. }
  268. return 0;
  269. }
  270. static void __exit qede_cleanup(void)
  271. {
  272. if (debug & QED_LOG_INFO_MASK)
  273. pr_info("qede_cleanup called\n");
  274. unregister_netdevice_notifier(&qede_netdev_notifier);
  275. pci_unregister_driver(&qede_pci_driver);
  276. qed_put_eth_ops();
  277. }
  278. module_init(qede_init);
  279. module_exit(qede_cleanup);
  280. static int qede_open(struct net_device *ndev);
  281. static int qede_close(struct net_device *ndev);
  282. void qede_fill_by_demand_stats(struct qede_dev *edev)
  283. {
  284. struct qede_stats_common *p_common = &edev->stats.common;
  285. struct qed_eth_stats stats;
  286. edev->ops->get_vport_stats(edev->cdev, &stats);
  287. p_common->no_buff_discards = stats.common.no_buff_discards;
  288. p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
  289. p_common->ttl0_discard = stats.common.ttl0_discard;
  290. p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
  291. p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
  292. p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
  293. p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
  294. p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
  295. p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
  296. p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
  297. p_common->mac_filter_discards = stats.common.mac_filter_discards;
  298. p_common->gft_filter_drop = stats.common.gft_filter_drop;
  299. p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
  300. p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
  301. p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
  302. p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
  303. p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
  304. p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
  305. p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
  306. p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
  307. p_common->coalesced_events = stats.common.tpa_coalesced_events;
  308. p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
  309. p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
  310. p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
  311. p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
  312. p_common->rx_65_to_127_byte_packets =
  313. stats.common.rx_65_to_127_byte_packets;
  314. p_common->rx_128_to_255_byte_packets =
  315. stats.common.rx_128_to_255_byte_packets;
  316. p_common->rx_256_to_511_byte_packets =
  317. stats.common.rx_256_to_511_byte_packets;
  318. p_common->rx_512_to_1023_byte_packets =
  319. stats.common.rx_512_to_1023_byte_packets;
  320. p_common->rx_1024_to_1518_byte_packets =
  321. stats.common.rx_1024_to_1518_byte_packets;
  322. p_common->rx_crc_errors = stats.common.rx_crc_errors;
  323. p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
  324. p_common->rx_pause_frames = stats.common.rx_pause_frames;
  325. p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
  326. p_common->rx_align_errors = stats.common.rx_align_errors;
  327. p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
  328. p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
  329. p_common->rx_jabbers = stats.common.rx_jabbers;
  330. p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
  331. p_common->rx_fragments = stats.common.rx_fragments;
  332. p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
  333. p_common->tx_65_to_127_byte_packets =
  334. stats.common.tx_65_to_127_byte_packets;
  335. p_common->tx_128_to_255_byte_packets =
  336. stats.common.tx_128_to_255_byte_packets;
  337. p_common->tx_256_to_511_byte_packets =
  338. stats.common.tx_256_to_511_byte_packets;
  339. p_common->tx_512_to_1023_byte_packets =
  340. stats.common.tx_512_to_1023_byte_packets;
  341. p_common->tx_1024_to_1518_byte_packets =
  342. stats.common.tx_1024_to_1518_byte_packets;
  343. p_common->tx_pause_frames = stats.common.tx_pause_frames;
  344. p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
  345. p_common->brb_truncates = stats.common.brb_truncates;
  346. p_common->brb_discards = stats.common.brb_discards;
  347. p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
  348. p_common->link_change_count = stats.common.link_change_count;
  349. if (QEDE_IS_BB(edev)) {
  350. struct qede_stats_bb *p_bb = &edev->stats.bb;
  351. p_bb->rx_1519_to_1522_byte_packets =
  352. stats.bb.rx_1519_to_1522_byte_packets;
  353. p_bb->rx_1519_to_2047_byte_packets =
  354. stats.bb.rx_1519_to_2047_byte_packets;
  355. p_bb->rx_2048_to_4095_byte_packets =
  356. stats.bb.rx_2048_to_4095_byte_packets;
  357. p_bb->rx_4096_to_9216_byte_packets =
  358. stats.bb.rx_4096_to_9216_byte_packets;
  359. p_bb->rx_9217_to_16383_byte_packets =
  360. stats.bb.rx_9217_to_16383_byte_packets;
  361. p_bb->tx_1519_to_2047_byte_packets =
  362. stats.bb.tx_1519_to_2047_byte_packets;
  363. p_bb->tx_2048_to_4095_byte_packets =
  364. stats.bb.tx_2048_to_4095_byte_packets;
  365. p_bb->tx_4096_to_9216_byte_packets =
  366. stats.bb.tx_4096_to_9216_byte_packets;
  367. p_bb->tx_9217_to_16383_byte_packets =
  368. stats.bb.tx_9217_to_16383_byte_packets;
  369. p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
  370. p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
  371. } else {
  372. struct qede_stats_ah *p_ah = &edev->stats.ah;
  373. p_ah->rx_1519_to_max_byte_packets =
  374. stats.ah.rx_1519_to_max_byte_packets;
  375. p_ah->tx_1519_to_max_byte_packets =
  376. stats.ah.tx_1519_to_max_byte_packets;
  377. }
  378. }
  379. static void qede_get_stats64(struct net_device *dev,
  380. struct rtnl_link_stats64 *stats)
  381. {
  382. struct qede_dev *edev = netdev_priv(dev);
  383. struct qede_stats_common *p_common;
  384. qede_fill_by_demand_stats(edev);
  385. p_common = &edev->stats.common;
  386. stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
  387. p_common->rx_bcast_pkts;
  388. stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
  389. p_common->tx_bcast_pkts;
  390. stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
  391. p_common->rx_bcast_bytes;
  392. stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
  393. p_common->tx_bcast_bytes;
  394. stats->tx_errors = p_common->tx_err_drop_pkts;
  395. stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
  396. stats->rx_fifo_errors = p_common->no_buff_discards;
  397. if (QEDE_IS_BB(edev))
  398. stats->collisions = edev->stats.bb.tx_total_collisions;
  399. stats->rx_crc_errors = p_common->rx_crc_errors;
  400. stats->rx_frame_errors = p_common->rx_align_errors;
  401. }
  402. #ifdef CONFIG_QED_SRIOV
  403. static int qede_get_vf_config(struct net_device *dev, int vfidx,
  404. struct ifla_vf_info *ivi)
  405. {
  406. struct qede_dev *edev = netdev_priv(dev);
  407. if (!edev->ops)
  408. return -EINVAL;
  409. return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
  410. }
  411. static int qede_set_vf_rate(struct net_device *dev, int vfidx,
  412. int min_tx_rate, int max_tx_rate)
  413. {
  414. struct qede_dev *edev = netdev_priv(dev);
  415. return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
  416. max_tx_rate);
  417. }
  418. static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
  419. {
  420. struct qede_dev *edev = netdev_priv(dev);
  421. if (!edev->ops)
  422. return -EINVAL;
  423. return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
  424. }
  425. static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
  426. int link_state)
  427. {
  428. struct qede_dev *edev = netdev_priv(dev);
  429. if (!edev->ops)
  430. return -EINVAL;
  431. return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
  432. }
  433. static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
  434. {
  435. struct qede_dev *edev = netdev_priv(dev);
  436. if (!edev->ops)
  437. return -EINVAL;
  438. return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
  439. }
  440. #endif
  441. static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  442. {
  443. struct qede_dev *edev = netdev_priv(dev);
  444. if (!netif_running(dev))
  445. return -EAGAIN;
  446. switch (cmd) {
  447. case SIOCSHWTSTAMP:
  448. return qede_ptp_hw_ts(edev, ifr);
  449. default:
  450. DP_VERBOSE(edev, QED_MSG_DEBUG,
  451. "default IOCTL cmd 0x%x\n", cmd);
  452. return -EOPNOTSUPP;
  453. }
  454. return 0;
  455. }
  456. static const struct net_device_ops qede_netdev_ops = {
  457. .ndo_open = qede_open,
  458. .ndo_stop = qede_close,
  459. .ndo_start_xmit = qede_start_xmit,
  460. .ndo_set_rx_mode = qede_set_rx_mode,
  461. .ndo_set_mac_address = qede_set_mac_addr,
  462. .ndo_validate_addr = eth_validate_addr,
  463. .ndo_change_mtu = qede_change_mtu,
  464. .ndo_do_ioctl = qede_ioctl,
  465. #ifdef CONFIG_QED_SRIOV
  466. .ndo_set_vf_mac = qede_set_vf_mac,
  467. .ndo_set_vf_vlan = qede_set_vf_vlan,
  468. .ndo_set_vf_trust = qede_set_vf_trust,
  469. #endif
  470. .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
  471. .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
  472. .ndo_fix_features = qede_fix_features,
  473. .ndo_set_features = qede_set_features,
  474. .ndo_get_stats64 = qede_get_stats64,
  475. #ifdef CONFIG_QED_SRIOV
  476. .ndo_set_vf_link_state = qede_set_vf_link_state,
  477. .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
  478. .ndo_get_vf_config = qede_get_vf_config,
  479. .ndo_set_vf_rate = qede_set_vf_rate,
  480. #endif
  481. .ndo_udp_tunnel_add = qede_udp_tunnel_add,
  482. .ndo_udp_tunnel_del = qede_udp_tunnel_del,
  483. .ndo_features_check = qede_features_check,
  484. .ndo_bpf = qede_xdp,
  485. #ifdef CONFIG_RFS_ACCEL
  486. .ndo_rx_flow_steer = qede_rx_flow_steer,
  487. #endif
  488. };
  489. static const struct net_device_ops qede_netdev_vf_ops = {
  490. .ndo_open = qede_open,
  491. .ndo_stop = qede_close,
  492. .ndo_start_xmit = qede_start_xmit,
  493. .ndo_set_rx_mode = qede_set_rx_mode,
  494. .ndo_set_mac_address = qede_set_mac_addr,
  495. .ndo_validate_addr = eth_validate_addr,
  496. .ndo_change_mtu = qede_change_mtu,
  497. .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
  498. .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
  499. .ndo_fix_features = qede_fix_features,
  500. .ndo_set_features = qede_set_features,
  501. .ndo_get_stats64 = qede_get_stats64,
  502. .ndo_udp_tunnel_add = qede_udp_tunnel_add,
  503. .ndo_udp_tunnel_del = qede_udp_tunnel_del,
  504. .ndo_features_check = qede_features_check,
  505. };
  506. static const struct net_device_ops qede_netdev_vf_xdp_ops = {
  507. .ndo_open = qede_open,
  508. .ndo_stop = qede_close,
  509. .ndo_start_xmit = qede_start_xmit,
  510. .ndo_set_rx_mode = qede_set_rx_mode,
  511. .ndo_set_mac_address = qede_set_mac_addr,
  512. .ndo_validate_addr = eth_validate_addr,
  513. .ndo_change_mtu = qede_change_mtu,
  514. .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
  515. .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
  516. .ndo_fix_features = qede_fix_features,
  517. .ndo_set_features = qede_set_features,
  518. .ndo_get_stats64 = qede_get_stats64,
  519. .ndo_udp_tunnel_add = qede_udp_tunnel_add,
  520. .ndo_udp_tunnel_del = qede_udp_tunnel_del,
  521. .ndo_features_check = qede_features_check,
  522. .ndo_bpf = qede_xdp,
  523. };
  524. /* -------------------------------------------------------------------------
  525. * START OF PROBE / REMOVE
  526. * -------------------------------------------------------------------------
  527. */
  528. static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
  529. struct pci_dev *pdev,
  530. struct qed_dev_eth_info *info,
  531. u32 dp_module, u8 dp_level)
  532. {
  533. struct net_device *ndev;
  534. struct qede_dev *edev;
  535. ndev = alloc_etherdev_mqs(sizeof(*edev),
  536. info->num_queues, info->num_queues);
  537. if (!ndev) {
  538. pr_err("etherdev allocation failed\n");
  539. return NULL;
  540. }
  541. edev = netdev_priv(ndev);
  542. edev->ndev = ndev;
  543. edev->cdev = cdev;
  544. edev->pdev = pdev;
  545. edev->dp_module = dp_module;
  546. edev->dp_level = dp_level;
  547. edev->ops = qed_ops;
  548. edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
  549. edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
  550. DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
  551. info->num_queues, info->num_queues);
  552. SET_NETDEV_DEV(ndev, &pdev->dev);
  553. memset(&edev->stats, 0, sizeof(edev->stats));
  554. memcpy(&edev->dev_info, info, sizeof(*info));
  555. /* As ethtool doesn't have the ability to show WoL behavior as
  556. * 'default', if device supports it declare it's enabled.
  557. */
  558. if (edev->dev_info.common.wol_support)
  559. edev->wol_enabled = true;
  560. INIT_LIST_HEAD(&edev->vlan_list);
  561. return edev;
  562. }
  563. static void qede_init_ndev(struct qede_dev *edev)
  564. {
  565. struct net_device *ndev = edev->ndev;
  566. struct pci_dev *pdev = edev->pdev;
  567. bool udp_tunnel_enable = false;
  568. netdev_features_t hw_features;
  569. pci_set_drvdata(pdev, ndev);
  570. ndev->mem_start = edev->dev_info.common.pci_mem_start;
  571. ndev->base_addr = ndev->mem_start;
  572. ndev->mem_end = edev->dev_info.common.pci_mem_end;
  573. ndev->irq = edev->dev_info.common.pci_irq;
  574. ndev->watchdog_timeo = TX_TIMEOUT;
  575. if (IS_VF(edev)) {
  576. if (edev->dev_info.xdp_supported)
  577. ndev->netdev_ops = &qede_netdev_vf_xdp_ops;
  578. else
  579. ndev->netdev_ops = &qede_netdev_vf_ops;
  580. } else {
  581. ndev->netdev_ops = &qede_netdev_ops;
  582. }
  583. qede_set_ethtool_ops(ndev);
  584. ndev->priv_flags |= IFF_UNICAST_FLT;
  585. /* user-changeble features */
  586. hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
  587. NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  588. NETIF_F_TSO | NETIF_F_TSO6;
  589. if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
  590. hw_features |= NETIF_F_NTUPLE;
  591. if (edev->dev_info.common.vxlan_enable ||
  592. edev->dev_info.common.geneve_enable)
  593. udp_tunnel_enable = true;
  594. if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
  595. hw_features |= NETIF_F_TSO_ECN;
  596. ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  597. NETIF_F_SG | NETIF_F_TSO |
  598. NETIF_F_TSO_ECN | NETIF_F_TSO6 |
  599. NETIF_F_RXCSUM;
  600. }
  601. if (udp_tunnel_enable) {
  602. hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
  603. NETIF_F_GSO_UDP_TUNNEL_CSUM);
  604. ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
  605. NETIF_F_GSO_UDP_TUNNEL_CSUM);
  606. }
  607. if (edev->dev_info.common.gre_enable) {
  608. hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
  609. ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
  610. NETIF_F_GSO_GRE_CSUM);
  611. }
  612. ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
  613. NETIF_F_HIGHDMA;
  614. ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
  615. NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
  616. NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
  617. ndev->hw_features = hw_features;
  618. /* MTU range: 46 - 9600 */
  619. ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
  620. ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
  621. /* Set network device HW mac */
  622. ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
  623. ndev->mtu = edev->dev_info.common.mtu;
  624. }
  625. /* This function converts from 32b param to two params of level and module
  626. * Input 32b decoding:
  627. * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
  628. * 'happy' flow, e.g. memory allocation failed.
  629. * b30 - enable all INFO prints. INFO prints are for major steps in the flow
  630. * and provide important parameters.
  631. * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
  632. * module. VERBOSE prints are for tracking the specific flow in low level.
  633. *
  634. * Notice that the level should be that of the lowest required logs.
  635. */
  636. void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
  637. {
  638. *p_dp_level = QED_LEVEL_NOTICE;
  639. *p_dp_module = 0;
  640. if (debug & QED_LOG_VERBOSE_MASK) {
  641. *p_dp_level = QED_LEVEL_VERBOSE;
  642. *p_dp_module = (debug & 0x3FFFFFFF);
  643. } else if (debug & QED_LOG_INFO_MASK) {
  644. *p_dp_level = QED_LEVEL_INFO;
  645. } else if (debug & QED_LOG_NOTICE_MASK) {
  646. *p_dp_level = QED_LEVEL_NOTICE;
  647. }
  648. }
  649. static void qede_free_fp_array(struct qede_dev *edev)
  650. {
  651. if (edev->fp_array) {
  652. struct qede_fastpath *fp;
  653. int i;
  654. for_each_queue(i) {
  655. fp = &edev->fp_array[i];
  656. kfree(fp->sb_info);
  657. /* Handle mem alloc failure case where qede_init_fp
  658. * didn't register xdp_rxq_info yet.
  659. * Implicit only (fp->type & QEDE_FASTPATH_RX)
  660. */
  661. if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
  662. xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
  663. kfree(fp->rxq);
  664. kfree(fp->xdp_tx);
  665. kfree(fp->txq);
  666. }
  667. kfree(edev->fp_array);
  668. }
  669. edev->num_queues = 0;
  670. edev->fp_num_tx = 0;
  671. edev->fp_num_rx = 0;
  672. }
  673. static int qede_alloc_fp_array(struct qede_dev *edev)
  674. {
  675. u8 fp_combined, fp_rx = edev->fp_num_rx;
  676. struct qede_fastpath *fp;
  677. int i;
  678. edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
  679. sizeof(*edev->fp_array), GFP_KERNEL);
  680. if (!edev->fp_array) {
  681. DP_NOTICE(edev, "fp array allocation failed\n");
  682. goto err;
  683. }
  684. fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
  685. /* Allocate the FP elements for Rx queues followed by combined and then
  686. * the Tx. This ordering should be maintained so that the respective
  687. * queues (Rx or Tx) will be together in the fastpath array and the
  688. * associated ids will be sequential.
  689. */
  690. for_each_queue(i) {
  691. fp = &edev->fp_array[i];
  692. fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
  693. if (!fp->sb_info) {
  694. DP_NOTICE(edev, "sb info struct allocation failed\n");
  695. goto err;
  696. }
  697. if (fp_rx) {
  698. fp->type = QEDE_FASTPATH_RX;
  699. fp_rx--;
  700. } else if (fp_combined) {
  701. fp->type = QEDE_FASTPATH_COMBINED;
  702. fp_combined--;
  703. } else {
  704. fp->type = QEDE_FASTPATH_TX;
  705. }
  706. if (fp->type & QEDE_FASTPATH_TX) {
  707. fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL);
  708. if (!fp->txq)
  709. goto err;
  710. }
  711. if (fp->type & QEDE_FASTPATH_RX) {
  712. fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
  713. if (!fp->rxq)
  714. goto err;
  715. if (edev->xdp_prog) {
  716. fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
  717. GFP_KERNEL);
  718. if (!fp->xdp_tx)
  719. goto err;
  720. fp->type |= QEDE_FASTPATH_XDP;
  721. }
  722. }
  723. }
  724. return 0;
  725. err:
  726. qede_free_fp_array(edev);
  727. return -ENOMEM;
  728. }
  729. static void qede_sp_task(struct work_struct *work)
  730. {
  731. struct qede_dev *edev = container_of(work, struct qede_dev,
  732. sp_task.work);
  733. __qede_lock(edev);
  734. if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
  735. if (edev->state == QEDE_STATE_OPEN)
  736. qede_config_rx_mode(edev->ndev);
  737. #ifdef CONFIG_RFS_ACCEL
  738. if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
  739. if (edev->state == QEDE_STATE_OPEN)
  740. qede_process_arfs_filters(edev, false);
  741. }
  742. #endif
  743. __qede_unlock(edev);
  744. }
  745. static void qede_update_pf_params(struct qed_dev *cdev)
  746. {
  747. struct qed_pf_params pf_params;
  748. /* 64 rx + 64 tx + 64 XDP */
  749. memset(&pf_params, 0, sizeof(struct qed_pf_params));
  750. pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3;
  751. /* Same for VFs - make sure they'll have sufficient connections
  752. * to support XDP Tx queues.
  753. */
  754. pf_params.eth_pf_params.num_vf_cons = 48;
  755. pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
  756. qed_ops->common->update_pf_params(cdev, &pf_params);
  757. }
  758. #define QEDE_FW_VER_STR_SIZE 80
  759. static void qede_log_probe(struct qede_dev *edev)
  760. {
  761. struct qed_dev_info *p_dev_info = &edev->dev_info.common;
  762. u8 buf[QEDE_FW_VER_STR_SIZE];
  763. size_t left_size;
  764. snprintf(buf, QEDE_FW_VER_STR_SIZE,
  765. "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
  766. p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev,
  767. p_dev_info->fw_eng,
  768. (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
  769. QED_MFW_VERSION_3_OFFSET,
  770. (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
  771. QED_MFW_VERSION_2_OFFSET,
  772. (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
  773. QED_MFW_VERSION_1_OFFSET,
  774. (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
  775. QED_MFW_VERSION_0_OFFSET);
  776. left_size = QEDE_FW_VER_STR_SIZE - strlen(buf);
  777. if (p_dev_info->mbi_version && left_size)
  778. snprintf(buf + strlen(buf), left_size,
  779. " [MBI %d.%d.%d]",
  780. (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >>
  781. QED_MBI_VERSION_2_OFFSET,
  782. (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >>
  783. QED_MBI_VERSION_1_OFFSET,
  784. (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >>
  785. QED_MBI_VERSION_0_OFFSET);
  786. pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number,
  787. PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn),
  788. buf, edev->ndev->name);
  789. }
  790. enum qede_probe_mode {
  791. QEDE_PROBE_NORMAL,
  792. };
  793. static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
  794. bool is_vf, enum qede_probe_mode mode)
  795. {
  796. struct qed_probe_params probe_params;
  797. struct qed_slowpath_params sp_params;
  798. struct qed_dev_eth_info dev_info;
  799. struct qede_dev *edev;
  800. struct qed_dev *cdev;
  801. int rc;
  802. if (unlikely(dp_level & QED_LEVEL_INFO))
  803. pr_notice("Starting qede probe\n");
  804. memset(&probe_params, 0, sizeof(probe_params));
  805. probe_params.protocol = QED_PROTOCOL_ETH;
  806. probe_params.dp_module = dp_module;
  807. probe_params.dp_level = dp_level;
  808. probe_params.is_vf = is_vf;
  809. cdev = qed_ops->common->probe(pdev, &probe_params);
  810. if (!cdev) {
  811. rc = -ENODEV;
  812. goto err0;
  813. }
  814. qede_update_pf_params(cdev);
  815. /* Start the Slowpath-process */
  816. memset(&sp_params, 0, sizeof(sp_params));
  817. sp_params.int_mode = QED_INT_MODE_MSIX;
  818. sp_params.drv_major = QEDE_MAJOR_VERSION;
  819. sp_params.drv_minor = QEDE_MINOR_VERSION;
  820. sp_params.drv_rev = QEDE_REVISION_VERSION;
  821. sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
  822. strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
  823. rc = qed_ops->common->slowpath_start(cdev, &sp_params);
  824. if (rc) {
  825. pr_notice("Cannot start slowpath\n");
  826. goto err1;
  827. }
  828. /* Learn information crucial for qede to progress */
  829. rc = qed_ops->fill_dev_info(cdev, &dev_info);
  830. if (rc)
  831. goto err2;
  832. edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
  833. dp_level);
  834. if (!edev) {
  835. rc = -ENOMEM;
  836. goto err2;
  837. }
  838. if (is_vf)
  839. edev->flags |= QEDE_FLAG_IS_VF;
  840. qede_init_ndev(edev);
  841. rc = qede_rdma_dev_add(edev);
  842. if (rc)
  843. goto err3;
  844. /* Prepare the lock prior to the registration of the netdev,
  845. * as once it's registered we might reach flows requiring it
  846. * [it's even possible to reach a flow needing it directly
  847. * from there, although it's unlikely].
  848. */
  849. INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
  850. mutex_init(&edev->qede_lock);
  851. rc = register_netdev(edev->ndev);
  852. if (rc) {
  853. DP_NOTICE(edev, "Cannot register net-device\n");
  854. goto err4;
  855. }
  856. edev->ops->common->set_name(cdev, edev->ndev->name);
  857. /* PTP not supported on VFs */
  858. if (!is_vf)
  859. qede_ptp_enable(edev, true);
  860. edev->ops->register_ops(cdev, &qede_ll_ops, edev);
  861. #ifdef CONFIG_DCB
  862. if (!IS_VF(edev))
  863. qede_set_dcbnl_ops(edev->ndev);
  864. #endif
  865. edev->rx_copybreak = QEDE_RX_HDR_SIZE;
  866. qede_log_probe(edev);
  867. return 0;
  868. err4:
  869. qede_rdma_dev_remove(edev);
  870. err3:
  871. free_netdev(edev->ndev);
  872. err2:
  873. qed_ops->common->slowpath_stop(cdev);
  874. err1:
  875. qed_ops->common->remove(cdev);
  876. err0:
  877. return rc;
  878. }
  879. static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  880. {
  881. bool is_vf = false;
  882. u32 dp_module = 0;
  883. u8 dp_level = 0;
  884. switch ((enum qede_pci_private)id->driver_data) {
  885. case QEDE_PRIVATE_VF:
  886. if (debug & QED_LOG_VERBOSE_MASK)
  887. dev_err(&pdev->dev, "Probing a VF\n");
  888. is_vf = true;
  889. break;
  890. default:
  891. if (debug & QED_LOG_VERBOSE_MASK)
  892. dev_err(&pdev->dev, "Probing a PF\n");
  893. }
  894. qede_config_debug(debug, &dp_module, &dp_level);
  895. return __qede_probe(pdev, dp_module, dp_level, is_vf,
  896. QEDE_PROBE_NORMAL);
  897. }
  898. enum qede_remove_mode {
  899. QEDE_REMOVE_NORMAL,
  900. };
  901. static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
  902. {
  903. struct net_device *ndev = pci_get_drvdata(pdev);
  904. struct qede_dev *edev = netdev_priv(ndev);
  905. struct qed_dev *cdev = edev->cdev;
  906. DP_INFO(edev, "Starting qede_remove\n");
  907. qede_rdma_dev_remove(edev);
  908. unregister_netdev(ndev);
  909. cancel_delayed_work_sync(&edev->sp_task);
  910. qede_ptp_disable(edev);
  911. edev->ops->common->set_power_state(cdev, PCI_D0);
  912. pci_set_drvdata(pdev, NULL);
  913. /* Use global ops since we've freed edev */
  914. qed_ops->common->slowpath_stop(cdev);
  915. if (system_state == SYSTEM_POWER_OFF)
  916. return;
  917. qed_ops->common->remove(cdev);
  918. /* Since this can happen out-of-sync with other flows,
  919. * don't release the netdevice until after slowpath stop
  920. * has been called to guarantee various other contexts
  921. * [e.g., QED register callbacks] won't break anything when
  922. * accessing the netdevice.
  923. */
  924. free_netdev(ndev);
  925. dev_info(&pdev->dev, "Ending qede_remove successfully\n");
  926. }
  927. static void qede_remove(struct pci_dev *pdev)
  928. {
  929. __qede_remove(pdev, QEDE_REMOVE_NORMAL);
  930. }
  931. static void qede_shutdown(struct pci_dev *pdev)
  932. {
  933. __qede_remove(pdev, QEDE_REMOVE_NORMAL);
  934. }
  935. /* -------------------------------------------------------------------------
  936. * START OF LOAD / UNLOAD
  937. * -------------------------------------------------------------------------
  938. */
  939. static int qede_set_num_queues(struct qede_dev *edev)
  940. {
  941. int rc;
  942. u16 rss_num;
  943. /* Setup queues according to possible resources*/
  944. if (edev->req_queues)
  945. rss_num = edev->req_queues;
  946. else
  947. rss_num = netif_get_num_default_rss_queues() *
  948. edev->dev_info.common.num_hwfns;
  949. rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
  950. rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
  951. if (rc > 0) {
  952. /* Managed to request interrupts for our queues */
  953. edev->num_queues = rc;
  954. DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
  955. QEDE_QUEUE_CNT(edev), rss_num);
  956. rc = 0;
  957. }
  958. edev->fp_num_tx = edev->req_num_tx;
  959. edev->fp_num_rx = edev->req_num_rx;
  960. return rc;
  961. }
  962. static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
  963. u16 sb_id)
  964. {
  965. if (sb_info->sb_virt) {
  966. edev->ops->common->sb_release(edev->cdev, sb_info, sb_id);
  967. dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
  968. (void *)sb_info->sb_virt, sb_info->sb_phys);
  969. memset(sb_info, 0, sizeof(*sb_info));
  970. }
  971. }
  972. /* This function allocates fast-path status block memory */
  973. static int qede_alloc_mem_sb(struct qede_dev *edev,
  974. struct qed_sb_info *sb_info, u16 sb_id)
  975. {
  976. struct status_block_e4 *sb_virt;
  977. dma_addr_t sb_phys;
  978. int rc;
  979. sb_virt = dma_alloc_coherent(&edev->pdev->dev,
  980. sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
  981. if (!sb_virt) {
  982. DP_ERR(edev, "Status block allocation failed\n");
  983. return -ENOMEM;
  984. }
  985. rc = edev->ops->common->sb_init(edev->cdev, sb_info,
  986. sb_virt, sb_phys, sb_id,
  987. QED_SB_TYPE_L2_QUEUE);
  988. if (rc) {
  989. DP_ERR(edev, "Status block initialization failed\n");
  990. dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
  991. sb_virt, sb_phys);
  992. return rc;
  993. }
  994. return 0;
  995. }
  996. static void qede_free_rx_buffers(struct qede_dev *edev,
  997. struct qede_rx_queue *rxq)
  998. {
  999. u16 i;
  1000. for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
  1001. struct sw_rx_data *rx_buf;
  1002. struct page *data;
  1003. rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
  1004. data = rx_buf->data;
  1005. dma_unmap_page(&edev->pdev->dev,
  1006. rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
  1007. rx_buf->data = NULL;
  1008. __free_page(data);
  1009. }
  1010. }
  1011. static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
  1012. {
  1013. /* Free rx buffers */
  1014. qede_free_rx_buffers(edev, rxq);
  1015. /* Free the parallel SW ring */
  1016. kfree(rxq->sw_rx_ring);
  1017. /* Free the real RQ ring used by FW */
  1018. edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
  1019. edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
  1020. }
  1021. static void qede_set_tpa_param(struct qede_rx_queue *rxq)
  1022. {
  1023. int i;
  1024. for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
  1025. struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
  1026. tpa_info->state = QEDE_AGG_STATE_NONE;
  1027. }
  1028. }
  1029. /* This function allocates all memory needed per Rx queue */
  1030. static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
  1031. {
  1032. int i, rc, size;
  1033. rxq->num_rx_buffers = edev->q_num_rx_buffers;
  1034. rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
  1035. rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD;
  1036. size = rxq->rx_headroom +
  1037. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  1038. /* Make sure that the headroom and payload fit in a single page */
  1039. if (rxq->rx_buf_size + size > PAGE_SIZE)
  1040. rxq->rx_buf_size = PAGE_SIZE - size;
  1041. /* Segment size to spilt a page in multiple equal parts ,
  1042. * unless XDP is used in which case we'd use the entire page.
  1043. */
  1044. if (!edev->xdp_prog) {
  1045. size = size + rxq->rx_buf_size;
  1046. rxq->rx_buf_seg_size = roundup_pow_of_two(size);
  1047. } else {
  1048. rxq->rx_buf_seg_size = PAGE_SIZE;
  1049. }
  1050. /* Allocate the parallel driver ring for Rx buffers */
  1051. size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
  1052. rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
  1053. if (!rxq->sw_rx_ring) {
  1054. DP_ERR(edev, "Rx buffers ring allocation failed\n");
  1055. rc = -ENOMEM;
  1056. goto err;
  1057. }
  1058. /* Allocate FW Rx ring */
  1059. rc = edev->ops->common->chain_alloc(edev->cdev,
  1060. QED_CHAIN_USE_TO_CONSUME_PRODUCE,
  1061. QED_CHAIN_MODE_NEXT_PTR,
  1062. QED_CHAIN_CNT_TYPE_U16,
  1063. RX_RING_SIZE,
  1064. sizeof(struct eth_rx_bd),
  1065. &rxq->rx_bd_ring, NULL);
  1066. if (rc)
  1067. goto err;
  1068. /* Allocate FW completion ring */
  1069. rc = edev->ops->common->chain_alloc(edev->cdev,
  1070. QED_CHAIN_USE_TO_CONSUME,
  1071. QED_CHAIN_MODE_PBL,
  1072. QED_CHAIN_CNT_TYPE_U16,
  1073. RX_RING_SIZE,
  1074. sizeof(union eth_rx_cqe),
  1075. &rxq->rx_comp_ring, NULL);
  1076. if (rc)
  1077. goto err;
  1078. /* Allocate buffers for the Rx ring */
  1079. rxq->filled_buffers = 0;
  1080. for (i = 0; i < rxq->num_rx_buffers; i++) {
  1081. rc = qede_alloc_rx_buffer(rxq, false);
  1082. if (rc) {
  1083. DP_ERR(edev,
  1084. "Rx buffers allocation failed at index %d\n", i);
  1085. goto err;
  1086. }
  1087. }
  1088. if (!edev->gro_disable)
  1089. qede_set_tpa_param(rxq);
  1090. err:
  1091. return rc;
  1092. }
  1093. static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
  1094. {
  1095. /* Free the parallel SW ring */
  1096. if (txq->is_xdp)
  1097. kfree(txq->sw_tx_ring.xdp);
  1098. else
  1099. kfree(txq->sw_tx_ring.skbs);
  1100. /* Free the real RQ ring used by FW */
  1101. edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
  1102. }
  1103. /* This function allocates all memory needed per Tx queue */
  1104. static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
  1105. {
  1106. union eth_tx_bd_types *p_virt;
  1107. int size, rc;
  1108. txq->num_tx_buffers = edev->q_num_tx_buffers;
  1109. /* Allocate the parallel driver ring for Tx buffers */
  1110. if (txq->is_xdp) {
  1111. size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
  1112. txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
  1113. if (!txq->sw_tx_ring.xdp)
  1114. goto err;
  1115. } else {
  1116. size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
  1117. txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
  1118. if (!txq->sw_tx_ring.skbs)
  1119. goto err;
  1120. }
  1121. rc = edev->ops->common->chain_alloc(edev->cdev,
  1122. QED_CHAIN_USE_TO_CONSUME_PRODUCE,
  1123. QED_CHAIN_MODE_PBL,
  1124. QED_CHAIN_CNT_TYPE_U16,
  1125. txq->num_tx_buffers,
  1126. sizeof(*p_virt),
  1127. &txq->tx_pbl, NULL);
  1128. if (rc)
  1129. goto err;
  1130. return 0;
  1131. err:
  1132. qede_free_mem_txq(edev, txq);
  1133. return -ENOMEM;
  1134. }
  1135. /* This function frees all memory of a single fp */
  1136. static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
  1137. {
  1138. qede_free_mem_sb(edev, fp->sb_info, fp->id);
  1139. if (fp->type & QEDE_FASTPATH_RX)
  1140. qede_free_mem_rxq(edev, fp->rxq);
  1141. if (fp->type & QEDE_FASTPATH_XDP)
  1142. qede_free_mem_txq(edev, fp->xdp_tx);
  1143. if (fp->type & QEDE_FASTPATH_TX)
  1144. qede_free_mem_txq(edev, fp->txq);
  1145. }
  1146. /* This function allocates all memory needed for a single fp (i.e. an entity
  1147. * which contains status block, one rx queue and/or multiple per-TC tx queues.
  1148. */
  1149. static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
  1150. {
  1151. int rc = 0;
  1152. rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
  1153. if (rc)
  1154. goto out;
  1155. if (fp->type & QEDE_FASTPATH_RX) {
  1156. rc = qede_alloc_mem_rxq(edev, fp->rxq);
  1157. if (rc)
  1158. goto out;
  1159. }
  1160. if (fp->type & QEDE_FASTPATH_XDP) {
  1161. rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
  1162. if (rc)
  1163. goto out;
  1164. }
  1165. if (fp->type & QEDE_FASTPATH_TX) {
  1166. rc = qede_alloc_mem_txq(edev, fp->txq);
  1167. if (rc)
  1168. goto out;
  1169. }
  1170. out:
  1171. return rc;
  1172. }
  1173. static void qede_free_mem_load(struct qede_dev *edev)
  1174. {
  1175. int i;
  1176. for_each_queue(i) {
  1177. struct qede_fastpath *fp = &edev->fp_array[i];
  1178. qede_free_mem_fp(edev, fp);
  1179. }
  1180. }
  1181. /* This function allocates all qede memory at NIC load. */
  1182. static int qede_alloc_mem_load(struct qede_dev *edev)
  1183. {
  1184. int rc = 0, queue_id;
  1185. for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
  1186. struct qede_fastpath *fp = &edev->fp_array[queue_id];
  1187. rc = qede_alloc_mem_fp(edev, fp);
  1188. if (rc) {
  1189. DP_ERR(edev,
  1190. "Failed to allocate memory for fastpath - rss id = %d\n",
  1191. queue_id);
  1192. qede_free_mem_load(edev);
  1193. return rc;
  1194. }
  1195. }
  1196. return 0;
  1197. }
  1198. /* This function inits fp content and resets the SB, RXQ and TXQ structures */
  1199. static void qede_init_fp(struct qede_dev *edev)
  1200. {
  1201. int queue_id, rxq_index = 0, txq_index = 0;
  1202. struct qede_fastpath *fp;
  1203. for_each_queue(queue_id) {
  1204. fp = &edev->fp_array[queue_id];
  1205. fp->edev = edev;
  1206. fp->id = queue_id;
  1207. if (fp->type & QEDE_FASTPATH_XDP) {
  1208. fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
  1209. rxq_index);
  1210. fp->xdp_tx->is_xdp = 1;
  1211. }
  1212. if (fp->type & QEDE_FASTPATH_RX) {
  1213. fp->rxq->rxq_id = rxq_index++;
  1214. /* Determine how to map buffers for this queue */
  1215. if (fp->type & QEDE_FASTPATH_XDP)
  1216. fp->rxq->data_direction = DMA_BIDIRECTIONAL;
  1217. else
  1218. fp->rxq->data_direction = DMA_FROM_DEVICE;
  1219. fp->rxq->dev = &edev->pdev->dev;
  1220. /* Driver have no error path from here */
  1221. WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
  1222. fp->rxq->rxq_id) < 0);
  1223. }
  1224. if (fp->type & QEDE_FASTPATH_TX) {
  1225. fp->txq->index = txq_index++;
  1226. if (edev->dev_info.is_legacy)
  1227. fp->txq->is_legacy = 1;
  1228. fp->txq->dev = &edev->pdev->dev;
  1229. }
  1230. snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
  1231. edev->ndev->name, queue_id);
  1232. }
  1233. edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
  1234. }
  1235. static int qede_set_real_num_queues(struct qede_dev *edev)
  1236. {
  1237. int rc = 0;
  1238. rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev));
  1239. if (rc) {
  1240. DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
  1241. return rc;
  1242. }
  1243. rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
  1244. if (rc) {
  1245. DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
  1246. return rc;
  1247. }
  1248. return 0;
  1249. }
  1250. static void qede_napi_disable_remove(struct qede_dev *edev)
  1251. {
  1252. int i;
  1253. for_each_queue(i) {
  1254. napi_disable(&edev->fp_array[i].napi);
  1255. netif_napi_del(&edev->fp_array[i].napi);
  1256. }
  1257. }
  1258. static void qede_napi_add_enable(struct qede_dev *edev)
  1259. {
  1260. int i;
  1261. /* Add NAPI objects */
  1262. for_each_queue(i) {
  1263. netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
  1264. qede_poll, NAPI_POLL_WEIGHT);
  1265. napi_enable(&edev->fp_array[i].napi);
  1266. }
  1267. }
  1268. static void qede_sync_free_irqs(struct qede_dev *edev)
  1269. {
  1270. int i;
  1271. for (i = 0; i < edev->int_info.used_cnt; i++) {
  1272. if (edev->int_info.msix_cnt) {
  1273. synchronize_irq(edev->int_info.msix[i].vector);
  1274. free_irq(edev->int_info.msix[i].vector,
  1275. &edev->fp_array[i]);
  1276. } else {
  1277. edev->ops->common->simd_handler_clean(edev->cdev, i);
  1278. }
  1279. }
  1280. edev->int_info.used_cnt = 0;
  1281. }
  1282. static int qede_req_msix_irqs(struct qede_dev *edev)
  1283. {
  1284. int i, rc;
  1285. /* Sanitize number of interrupts == number of prepared RSS queues */
  1286. if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
  1287. DP_ERR(edev,
  1288. "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
  1289. QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
  1290. return -EINVAL;
  1291. }
  1292. for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
  1293. #ifdef CONFIG_RFS_ACCEL
  1294. struct qede_fastpath *fp = &edev->fp_array[i];
  1295. if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
  1296. rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
  1297. edev->int_info.msix[i].vector);
  1298. if (rc) {
  1299. DP_ERR(edev, "Failed to add CPU rmap\n");
  1300. qede_free_arfs(edev);
  1301. }
  1302. }
  1303. #endif
  1304. rc = request_irq(edev->int_info.msix[i].vector,
  1305. qede_msix_fp_int, 0, edev->fp_array[i].name,
  1306. &edev->fp_array[i]);
  1307. if (rc) {
  1308. DP_ERR(edev, "Request fp %d irq failed\n", i);
  1309. qede_sync_free_irqs(edev);
  1310. return rc;
  1311. }
  1312. DP_VERBOSE(edev, NETIF_MSG_INTR,
  1313. "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
  1314. edev->fp_array[i].name, i,
  1315. &edev->fp_array[i]);
  1316. edev->int_info.used_cnt++;
  1317. }
  1318. return 0;
  1319. }
  1320. static void qede_simd_fp_handler(void *cookie)
  1321. {
  1322. struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
  1323. napi_schedule_irqoff(&fp->napi);
  1324. }
  1325. static int qede_setup_irqs(struct qede_dev *edev)
  1326. {
  1327. int i, rc = 0;
  1328. /* Learn Interrupt configuration */
  1329. rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
  1330. if (rc)
  1331. return rc;
  1332. if (edev->int_info.msix_cnt) {
  1333. rc = qede_req_msix_irqs(edev);
  1334. if (rc)
  1335. return rc;
  1336. edev->ndev->irq = edev->int_info.msix[0].vector;
  1337. } else {
  1338. const struct qed_common_ops *ops;
  1339. /* qed should learn receive the RSS ids and callbacks */
  1340. ops = edev->ops->common;
  1341. for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
  1342. ops->simd_handler_config(edev->cdev,
  1343. &edev->fp_array[i], i,
  1344. qede_simd_fp_handler);
  1345. edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
  1346. }
  1347. return 0;
  1348. }
  1349. static int qede_drain_txq(struct qede_dev *edev,
  1350. struct qede_tx_queue *txq, bool allow_drain)
  1351. {
  1352. int rc, cnt = 1000;
  1353. while (txq->sw_tx_cons != txq->sw_tx_prod) {
  1354. if (!cnt) {
  1355. if (allow_drain) {
  1356. DP_NOTICE(edev,
  1357. "Tx queue[%d] is stuck, requesting MCP to drain\n",
  1358. txq->index);
  1359. rc = edev->ops->common->drain(edev->cdev);
  1360. if (rc)
  1361. return rc;
  1362. return qede_drain_txq(edev, txq, false);
  1363. }
  1364. DP_NOTICE(edev,
  1365. "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
  1366. txq->index, txq->sw_tx_prod,
  1367. txq->sw_tx_cons);
  1368. return -ENODEV;
  1369. }
  1370. cnt--;
  1371. usleep_range(1000, 2000);
  1372. barrier();
  1373. }
  1374. /* FW finished processing, wait for HW to transmit all tx packets */
  1375. usleep_range(1000, 2000);
  1376. return 0;
  1377. }
  1378. static int qede_stop_txq(struct qede_dev *edev,
  1379. struct qede_tx_queue *txq, int rss_id)
  1380. {
  1381. return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
  1382. }
  1383. static int qede_stop_queues(struct qede_dev *edev)
  1384. {
  1385. struct qed_update_vport_params *vport_update_params;
  1386. struct qed_dev *cdev = edev->cdev;
  1387. struct qede_fastpath *fp;
  1388. int rc, i;
  1389. /* Disable the vport */
  1390. vport_update_params = vzalloc(sizeof(*vport_update_params));
  1391. if (!vport_update_params)
  1392. return -ENOMEM;
  1393. vport_update_params->vport_id = 0;
  1394. vport_update_params->update_vport_active_flg = 1;
  1395. vport_update_params->vport_active_flg = 0;
  1396. vport_update_params->update_rss_flg = 0;
  1397. rc = edev->ops->vport_update(cdev, vport_update_params);
  1398. vfree(vport_update_params);
  1399. if (rc) {
  1400. DP_ERR(edev, "Failed to update vport\n");
  1401. return rc;
  1402. }
  1403. /* Flush Tx queues. If needed, request drain from MCP */
  1404. for_each_queue(i) {
  1405. fp = &edev->fp_array[i];
  1406. if (fp->type & QEDE_FASTPATH_TX) {
  1407. rc = qede_drain_txq(edev, fp->txq, true);
  1408. if (rc)
  1409. return rc;
  1410. }
  1411. if (fp->type & QEDE_FASTPATH_XDP) {
  1412. rc = qede_drain_txq(edev, fp->xdp_tx, true);
  1413. if (rc)
  1414. return rc;
  1415. }
  1416. }
  1417. /* Stop all Queues in reverse order */
  1418. for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
  1419. fp = &edev->fp_array[i];
  1420. /* Stop the Tx Queue(s) */
  1421. if (fp->type & QEDE_FASTPATH_TX) {
  1422. rc = qede_stop_txq(edev, fp->txq, i);
  1423. if (rc)
  1424. return rc;
  1425. }
  1426. /* Stop the Rx Queue */
  1427. if (fp->type & QEDE_FASTPATH_RX) {
  1428. rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
  1429. if (rc) {
  1430. DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
  1431. return rc;
  1432. }
  1433. }
  1434. /* Stop the XDP forwarding queue */
  1435. if (fp->type & QEDE_FASTPATH_XDP) {
  1436. rc = qede_stop_txq(edev, fp->xdp_tx, i);
  1437. if (rc)
  1438. return rc;
  1439. bpf_prog_put(fp->rxq->xdp_prog);
  1440. }
  1441. }
  1442. /* Stop the vport */
  1443. rc = edev->ops->vport_stop(cdev, 0);
  1444. if (rc)
  1445. DP_ERR(edev, "Failed to stop VPORT\n");
  1446. return rc;
  1447. }
  1448. static int qede_start_txq(struct qede_dev *edev,
  1449. struct qede_fastpath *fp,
  1450. struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
  1451. {
  1452. dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
  1453. u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
  1454. struct qed_queue_start_common_params params;
  1455. struct qed_txq_start_ret_params ret_params;
  1456. int rc;
  1457. memset(&params, 0, sizeof(params));
  1458. memset(&ret_params, 0, sizeof(ret_params));
  1459. /* Let the XDP queue share the queue-zone with one of the regular txq.
  1460. * We don't really care about its coalescing.
  1461. */
  1462. if (txq->is_xdp)
  1463. params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
  1464. else
  1465. params.queue_id = txq->index;
  1466. params.p_sb = fp->sb_info;
  1467. params.sb_idx = sb_idx;
  1468. rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
  1469. page_cnt, &ret_params);
  1470. if (rc) {
  1471. DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
  1472. return rc;
  1473. }
  1474. txq->doorbell_addr = ret_params.p_doorbell;
  1475. txq->handle = ret_params.p_handle;
  1476. /* Determine the FW consumer address associated */
  1477. txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
  1478. /* Prepare the doorbell parameters */
  1479. SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
  1480. SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
  1481. SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
  1482. DQ_XCM_ETH_TX_BD_PROD_CMD);
  1483. txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
  1484. return rc;
  1485. }
  1486. static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
  1487. {
  1488. int vlan_removal_en = 1;
  1489. struct qed_dev *cdev = edev->cdev;
  1490. struct qed_dev_info *qed_info = &edev->dev_info.common;
  1491. struct qed_update_vport_params *vport_update_params;
  1492. struct qed_queue_start_common_params q_params;
  1493. struct qed_start_vport_params start = {0};
  1494. int rc, i;
  1495. if (!edev->num_queues) {
  1496. DP_ERR(edev,
  1497. "Cannot update V-VPORT as active as there are no Rx queues\n");
  1498. return -EINVAL;
  1499. }
  1500. vport_update_params = vzalloc(sizeof(*vport_update_params));
  1501. if (!vport_update_params)
  1502. return -ENOMEM;
  1503. start.handle_ptp_pkts = !!(edev->ptp);
  1504. start.gro_enable = !edev->gro_disable;
  1505. start.mtu = edev->ndev->mtu;
  1506. start.vport_id = 0;
  1507. start.drop_ttl0 = true;
  1508. start.remove_inner_vlan = vlan_removal_en;
  1509. start.clear_stats = clear_stats;
  1510. rc = edev->ops->vport_start(cdev, &start);
  1511. if (rc) {
  1512. DP_ERR(edev, "Start V-PORT failed %d\n", rc);
  1513. goto out;
  1514. }
  1515. DP_VERBOSE(edev, NETIF_MSG_IFUP,
  1516. "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
  1517. start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
  1518. for_each_queue(i) {
  1519. struct qede_fastpath *fp = &edev->fp_array[i];
  1520. dma_addr_t p_phys_table;
  1521. u32 page_cnt;
  1522. if (fp->type & QEDE_FASTPATH_RX) {
  1523. struct qed_rxq_start_ret_params ret_params;
  1524. struct qede_rx_queue *rxq = fp->rxq;
  1525. __le16 *val;
  1526. memset(&ret_params, 0, sizeof(ret_params));
  1527. memset(&q_params, 0, sizeof(q_params));
  1528. q_params.queue_id = rxq->rxq_id;
  1529. q_params.vport_id = 0;
  1530. q_params.p_sb = fp->sb_info;
  1531. q_params.sb_idx = RX_PI;
  1532. p_phys_table =
  1533. qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
  1534. page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
  1535. rc = edev->ops->q_rx_start(cdev, i, &q_params,
  1536. rxq->rx_buf_size,
  1537. rxq->rx_bd_ring.p_phys_addr,
  1538. p_phys_table,
  1539. page_cnt, &ret_params);
  1540. if (rc) {
  1541. DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
  1542. rc);
  1543. goto out;
  1544. }
  1545. /* Use the return parameters */
  1546. rxq->hw_rxq_prod_addr = ret_params.p_prod;
  1547. rxq->handle = ret_params.p_handle;
  1548. val = &fp->sb_info->sb_virt->pi_array[RX_PI];
  1549. rxq->hw_cons_ptr = val;
  1550. qede_update_rx_prod(edev, rxq);
  1551. }
  1552. if (fp->type & QEDE_FASTPATH_XDP) {
  1553. rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
  1554. if (rc)
  1555. goto out;
  1556. fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
  1557. if (IS_ERR(fp->rxq->xdp_prog)) {
  1558. rc = PTR_ERR(fp->rxq->xdp_prog);
  1559. fp->rxq->xdp_prog = NULL;
  1560. goto out;
  1561. }
  1562. }
  1563. if (fp->type & QEDE_FASTPATH_TX) {
  1564. rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0));
  1565. if (rc)
  1566. goto out;
  1567. }
  1568. }
  1569. /* Prepare and send the vport enable */
  1570. vport_update_params->vport_id = start.vport_id;
  1571. vport_update_params->update_vport_active_flg = 1;
  1572. vport_update_params->vport_active_flg = 1;
  1573. if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) &&
  1574. qed_info->tx_switching) {
  1575. vport_update_params->update_tx_switching_flg = 1;
  1576. vport_update_params->tx_switching_flg = 1;
  1577. }
  1578. qede_fill_rss_params(edev, &vport_update_params->rss_params,
  1579. &vport_update_params->update_rss_flg);
  1580. rc = edev->ops->vport_update(cdev, vport_update_params);
  1581. if (rc)
  1582. DP_ERR(edev, "Update V-PORT failed %d\n", rc);
  1583. out:
  1584. vfree(vport_update_params);
  1585. return rc;
  1586. }
  1587. enum qede_unload_mode {
  1588. QEDE_UNLOAD_NORMAL,
  1589. };
  1590. static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
  1591. bool is_locked)
  1592. {
  1593. struct qed_link_params link_params;
  1594. int rc;
  1595. DP_INFO(edev, "Starting qede unload\n");
  1596. if (!is_locked)
  1597. __qede_lock(edev);
  1598. edev->state = QEDE_STATE_CLOSED;
  1599. qede_rdma_dev_event_close(edev);
  1600. /* Close OS Tx */
  1601. netif_tx_disable(edev->ndev);
  1602. netif_carrier_off(edev->ndev);
  1603. /* Reset the link */
  1604. memset(&link_params, 0, sizeof(link_params));
  1605. link_params.link_up = false;
  1606. edev->ops->common->set_link(edev->cdev, &link_params);
  1607. rc = qede_stop_queues(edev);
  1608. if (rc) {
  1609. qede_sync_free_irqs(edev);
  1610. goto out;
  1611. }
  1612. DP_INFO(edev, "Stopped Queues\n");
  1613. qede_vlan_mark_nonconfigured(edev);
  1614. edev->ops->fastpath_stop(edev->cdev);
  1615. if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
  1616. qede_poll_for_freeing_arfs_filters(edev);
  1617. qede_free_arfs(edev);
  1618. }
  1619. /* Release the interrupts */
  1620. qede_sync_free_irqs(edev);
  1621. edev->ops->common->set_fp_int(edev->cdev, 0);
  1622. qede_napi_disable_remove(edev);
  1623. qede_free_mem_load(edev);
  1624. qede_free_fp_array(edev);
  1625. out:
  1626. if (!is_locked)
  1627. __qede_unlock(edev);
  1628. DP_INFO(edev, "Ending qede unload\n");
  1629. }
  1630. enum qede_load_mode {
  1631. QEDE_LOAD_NORMAL,
  1632. QEDE_LOAD_RELOAD,
  1633. };
  1634. static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
  1635. bool is_locked)
  1636. {
  1637. struct qed_link_params link_params;
  1638. int rc;
  1639. DP_INFO(edev, "Starting qede load\n");
  1640. if (!is_locked)
  1641. __qede_lock(edev);
  1642. rc = qede_set_num_queues(edev);
  1643. if (rc)
  1644. goto out;
  1645. rc = qede_alloc_fp_array(edev);
  1646. if (rc)
  1647. goto out;
  1648. qede_init_fp(edev);
  1649. rc = qede_alloc_mem_load(edev);
  1650. if (rc)
  1651. goto err1;
  1652. DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
  1653. QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
  1654. rc = qede_set_real_num_queues(edev);
  1655. if (rc)
  1656. goto err2;
  1657. if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
  1658. rc = qede_alloc_arfs(edev);
  1659. if (rc)
  1660. DP_NOTICE(edev, "aRFS memory allocation failed\n");
  1661. }
  1662. qede_napi_add_enable(edev);
  1663. DP_INFO(edev, "Napi added and enabled\n");
  1664. rc = qede_setup_irqs(edev);
  1665. if (rc)
  1666. goto err3;
  1667. DP_INFO(edev, "Setup IRQs succeeded\n");
  1668. rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
  1669. if (rc)
  1670. goto err4;
  1671. DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
  1672. /* Program un-configured VLANs */
  1673. qede_configure_vlan_filters(edev);
  1674. /* Ask for link-up using current configuration */
  1675. memset(&link_params, 0, sizeof(link_params));
  1676. link_params.link_up = true;
  1677. edev->ops->common->set_link(edev->cdev, &link_params);
  1678. edev->state = QEDE_STATE_OPEN;
  1679. DP_INFO(edev, "Ending successfully qede load\n");
  1680. goto out;
  1681. err4:
  1682. qede_sync_free_irqs(edev);
  1683. memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
  1684. err3:
  1685. qede_napi_disable_remove(edev);
  1686. err2:
  1687. qede_free_mem_load(edev);
  1688. err1:
  1689. edev->ops->common->set_fp_int(edev->cdev, 0);
  1690. qede_free_fp_array(edev);
  1691. edev->num_queues = 0;
  1692. edev->fp_num_tx = 0;
  1693. edev->fp_num_rx = 0;
  1694. out:
  1695. if (!is_locked)
  1696. __qede_unlock(edev);
  1697. return rc;
  1698. }
  1699. /* 'func' should be able to run between unload and reload assuming interface
  1700. * is actually running, or afterwards in case it's currently DOWN.
  1701. */
  1702. void qede_reload(struct qede_dev *edev,
  1703. struct qede_reload_args *args, bool is_locked)
  1704. {
  1705. if (!is_locked)
  1706. __qede_lock(edev);
  1707. /* Since qede_lock is held, internal state wouldn't change even
  1708. * if netdev state would start transitioning. Check whether current
  1709. * internal configuration indicates device is up, then reload.
  1710. */
  1711. if (edev->state == QEDE_STATE_OPEN) {
  1712. qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
  1713. if (args)
  1714. args->func(edev, args);
  1715. qede_load(edev, QEDE_LOAD_RELOAD, true);
  1716. /* Since no one is going to do it for us, re-configure */
  1717. qede_config_rx_mode(edev->ndev);
  1718. } else if (args) {
  1719. args->func(edev, args);
  1720. }
  1721. if (!is_locked)
  1722. __qede_unlock(edev);
  1723. }
  1724. /* called with rtnl_lock */
  1725. static int qede_open(struct net_device *ndev)
  1726. {
  1727. struct qede_dev *edev = netdev_priv(ndev);
  1728. int rc;
  1729. netif_carrier_off(ndev);
  1730. edev->ops->common->set_power_state(edev->cdev, PCI_D0);
  1731. rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
  1732. if (rc)
  1733. return rc;
  1734. udp_tunnel_get_rx_info(ndev);
  1735. edev->ops->common->update_drv_state(edev->cdev, true);
  1736. return 0;
  1737. }
  1738. static int qede_close(struct net_device *ndev)
  1739. {
  1740. struct qede_dev *edev = netdev_priv(ndev);
  1741. qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
  1742. edev->ops->common->update_drv_state(edev->cdev, false);
  1743. return 0;
  1744. }
  1745. static void qede_link_update(void *dev, struct qed_link_output *link)
  1746. {
  1747. struct qede_dev *edev = dev;
  1748. if (!netif_running(edev->ndev)) {
  1749. DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
  1750. return;
  1751. }
  1752. if (link->link_up) {
  1753. if (!netif_carrier_ok(edev->ndev)) {
  1754. DP_NOTICE(edev, "Link is up\n");
  1755. netif_tx_start_all_queues(edev->ndev);
  1756. netif_carrier_on(edev->ndev);
  1757. qede_rdma_dev_event_open(edev);
  1758. }
  1759. } else {
  1760. if (netif_carrier_ok(edev->ndev)) {
  1761. DP_NOTICE(edev, "Link is down\n");
  1762. netif_tx_disable(edev->ndev);
  1763. netif_carrier_off(edev->ndev);
  1764. qede_rdma_dev_event_close(edev);
  1765. }
  1766. }
  1767. }
  1768. static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
  1769. {
  1770. struct netdev_queue *netdev_txq;
  1771. netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
  1772. if (netif_xmit_stopped(netdev_txq))
  1773. return true;
  1774. return false;
  1775. }
  1776. static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
  1777. {
  1778. struct qede_dev *edev = dev;
  1779. struct netdev_hw_addr *ha;
  1780. int i;
  1781. if (edev->ndev->features & NETIF_F_IP_CSUM)
  1782. data->feat_flags |= QED_TLV_IP_CSUM;
  1783. if (edev->ndev->features & NETIF_F_TSO)
  1784. data->feat_flags |= QED_TLV_LSO;
  1785. ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
  1786. memset(data->mac[1], 0, ETH_ALEN);
  1787. memset(data->mac[2], 0, ETH_ALEN);
  1788. /* Copy the first two UC macs */
  1789. netif_addr_lock_bh(edev->ndev);
  1790. i = 1;
  1791. netdev_for_each_uc_addr(ha, edev->ndev) {
  1792. ether_addr_copy(data->mac[i++], ha->addr);
  1793. if (i == QED_TLV_MAC_COUNT)
  1794. break;
  1795. }
  1796. netif_addr_unlock_bh(edev->ndev);
  1797. }
  1798. static void qede_get_eth_tlv_data(void *dev, void *data)
  1799. {
  1800. struct qed_mfw_tlv_eth *etlv = data;
  1801. struct qede_dev *edev = dev;
  1802. struct qede_fastpath *fp;
  1803. int i;
  1804. etlv->lso_maxoff_size = 0XFFFF;
  1805. etlv->lso_maxoff_size_set = true;
  1806. etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN;
  1807. etlv->lso_minseg_size_set = true;
  1808. etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC);
  1809. etlv->prom_mode_set = true;
  1810. etlv->tx_descr_size = QEDE_TSS_COUNT(edev);
  1811. etlv->tx_descr_size_set = true;
  1812. etlv->rx_descr_size = QEDE_RSS_COUNT(edev);
  1813. etlv->rx_descr_size_set = true;
  1814. etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB;
  1815. etlv->iov_offload_set = true;
  1816. /* Fill information regarding queues; Should be done under the qede
  1817. * lock to guarantee those don't change beneath our feet.
  1818. */
  1819. etlv->txqs_empty = true;
  1820. etlv->rxqs_empty = true;
  1821. etlv->num_txqs_full = 0;
  1822. etlv->num_rxqs_full = 0;
  1823. __qede_lock(edev);
  1824. for_each_queue(i) {
  1825. fp = &edev->fp_array[i];
  1826. if (fp->type & QEDE_FASTPATH_TX) {
  1827. if (fp->txq->sw_tx_cons != fp->txq->sw_tx_prod)
  1828. etlv->txqs_empty = false;
  1829. if (qede_is_txq_full(edev, fp->txq))
  1830. etlv->num_txqs_full++;
  1831. }
  1832. if (fp->type & QEDE_FASTPATH_RX) {
  1833. if (qede_has_rx_work(fp->rxq))
  1834. etlv->rxqs_empty = false;
  1835. /* This one is a bit tricky; Firmware might stop
  1836. * placing packets if ring is not yet full.
  1837. * Give an approximation.
  1838. */
  1839. if (le16_to_cpu(*fp->rxq->hw_cons_ptr) -
  1840. qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) >
  1841. RX_RING_SIZE - 100)
  1842. etlv->num_rxqs_full++;
  1843. }
  1844. }
  1845. __qede_unlock(edev);
  1846. etlv->txqs_empty_set = true;
  1847. etlv->rxqs_empty_set = true;
  1848. etlv->num_txqs_full_set = true;
  1849. etlv->num_rxqs_full_set = true;
  1850. }