qede_main.c 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112
  1. /* QLogic qede NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/pci.h>
  34. #include <linux/version.h>
  35. #include <linux/device.h>
  36. #include <linux/netdevice.h>
  37. #include <linux/etherdevice.h>
  38. #include <linux/skbuff.h>
  39. #include <linux/errno.h>
  40. #include <linux/list.h>
  41. #include <linux/string.h>
  42. #include <linux/dma-mapping.h>
  43. #include <linux/interrupt.h>
  44. #include <asm/byteorder.h>
  45. #include <asm/param.h>
  46. #include <linux/io.h>
  47. #include <linux/netdev_features.h>
  48. #include <linux/udp.h>
  49. #include <linux/tcp.h>
  50. #include <net/udp_tunnel.h>
  51. #include <linux/ip.h>
  52. #include <net/ipv6.h>
  53. #include <net/tcp.h>
  54. #include <linux/if_ether.h>
  55. #include <linux/if_vlan.h>
  56. #include <linux/pkt_sched.h>
  57. #include <linux/ethtool.h>
  58. #include <linux/in.h>
  59. #include <linux/random.h>
  60. #include <net/ip6_checksum.h>
  61. #include <linux/bitops.h>
  62. #include <linux/vmalloc.h>
  63. #include <linux/qed/qede_roce.h>
  64. #include "qede.h"
  65. #include "qede_ptp.h"
  66. static char version[] =
  67. "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
  68. MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
  69. MODULE_LICENSE("GPL");
  70. MODULE_VERSION(DRV_MODULE_VERSION);
  71. static uint debug;
  72. module_param(debug, uint, 0);
  73. MODULE_PARM_DESC(debug, " Default debug msglevel");
  74. static const struct qed_eth_ops *qed_ops;
  75. #define CHIP_NUM_57980S_40 0x1634
  76. #define CHIP_NUM_57980S_10 0x1666
  77. #define CHIP_NUM_57980S_MF 0x1636
  78. #define CHIP_NUM_57980S_100 0x1644
  79. #define CHIP_NUM_57980S_50 0x1654
  80. #define CHIP_NUM_57980S_25 0x1656
  81. #define CHIP_NUM_57980S_IOV 0x1664
  82. #define CHIP_NUM_AH 0x8070
  83. #define CHIP_NUM_AH_IOV 0x8090
  84. #ifndef PCI_DEVICE_ID_NX2_57980E
  85. #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
  86. #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
  87. #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
  88. #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
  89. #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
  90. #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
  91. #define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
  92. #define PCI_DEVICE_ID_AH CHIP_NUM_AH
  93. #define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV
  94. #endif
  95. enum qede_pci_private {
  96. QEDE_PRIVATE_PF,
  97. QEDE_PRIVATE_VF
  98. };
  99. static const struct pci_device_id qede_pci_tbl[] = {
  100. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
  101. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
  102. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
  103. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
  104. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
  105. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
  106. #ifdef CONFIG_QED_SRIOV
  107. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
  108. #endif
  109. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
  110. #ifdef CONFIG_QED_SRIOV
  111. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
  112. #endif
  113. { 0 }
  114. };
  115. MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
  116. static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
  117. #define TX_TIMEOUT (5 * HZ)
  118. /* Utilize last protocol index for XDP */
  119. #define XDP_PI 11
  120. static void qede_remove(struct pci_dev *pdev);
  121. static void qede_shutdown(struct pci_dev *pdev);
  122. static void qede_link_update(void *dev, struct qed_link_output *link);
  123. /* The qede lock is used to protect driver state change and driver flows that
  124. * are not reentrant.
  125. */
  126. void __qede_lock(struct qede_dev *edev)
  127. {
  128. mutex_lock(&edev->qede_lock);
  129. }
  130. void __qede_unlock(struct qede_dev *edev)
  131. {
  132. mutex_unlock(&edev->qede_lock);
  133. }
  134. #ifdef CONFIG_QED_SRIOV
  135. static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
  136. __be16 vlan_proto)
  137. {
  138. struct qede_dev *edev = netdev_priv(ndev);
  139. if (vlan > 4095) {
  140. DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
  141. return -EINVAL;
  142. }
  143. if (vlan_proto != htons(ETH_P_8021Q))
  144. return -EPROTONOSUPPORT;
  145. DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
  146. vlan, vf);
  147. return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
  148. }
  149. static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
  150. {
  151. struct qede_dev *edev = netdev_priv(ndev);
  152. DP_VERBOSE(edev, QED_MSG_IOV,
  153. "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
  154. mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
  155. if (!is_valid_ether_addr(mac)) {
  156. DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
  157. return -EINVAL;
  158. }
  159. return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
  160. }
  161. static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
  162. {
  163. struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
  164. struct qed_dev_info *qed_info = &edev->dev_info.common;
  165. struct qed_update_vport_params *vport_params;
  166. int rc;
  167. vport_params = vzalloc(sizeof(*vport_params));
  168. if (!vport_params)
  169. return -ENOMEM;
  170. DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
  171. rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
  172. /* Enable/Disable Tx switching for PF */
  173. if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
  174. qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
  175. vport_params->vport_id = 0;
  176. vport_params->update_tx_switching_flg = 1;
  177. vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
  178. edev->ops->vport_update(edev->cdev, vport_params);
  179. }
  180. vfree(vport_params);
  181. return rc;
  182. }
  183. #endif
  184. static struct pci_driver qede_pci_driver = {
  185. .name = "qede",
  186. .id_table = qede_pci_tbl,
  187. .probe = qede_probe,
  188. .remove = qede_remove,
  189. .shutdown = qede_shutdown,
  190. #ifdef CONFIG_QED_SRIOV
  191. .sriov_configure = qede_sriov_configure,
  192. #endif
  193. };
  194. static struct qed_eth_cb_ops qede_ll_ops = {
  195. {
  196. #ifdef CONFIG_RFS_ACCEL
  197. .arfs_filter_op = qede_arfs_filter_op,
  198. #endif
  199. .link_update = qede_link_update,
  200. },
  201. .force_mac = qede_force_mac,
  202. .ports_update = qede_udp_ports_update,
  203. };
  204. static int qede_netdev_event(struct notifier_block *this, unsigned long event,
  205. void *ptr)
  206. {
  207. struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
  208. struct ethtool_drvinfo drvinfo;
  209. struct qede_dev *edev;
  210. if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
  211. goto done;
  212. /* Check whether this is a qede device */
  213. if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
  214. goto done;
  215. memset(&drvinfo, 0, sizeof(drvinfo));
  216. ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
  217. if (strcmp(drvinfo.driver, "qede"))
  218. goto done;
  219. edev = netdev_priv(ndev);
  220. switch (event) {
  221. case NETDEV_CHANGENAME:
  222. /* Notify qed of the name change */
  223. if (!edev->ops || !edev->ops->common)
  224. goto done;
  225. edev->ops->common->set_id(edev->cdev, edev->ndev->name, "qede");
  226. break;
  227. case NETDEV_CHANGEADDR:
  228. edev = netdev_priv(ndev);
  229. qede_roce_event_changeaddr(edev);
  230. break;
  231. }
  232. done:
  233. return NOTIFY_DONE;
  234. }
  235. static struct notifier_block qede_netdev_notifier = {
  236. .notifier_call = qede_netdev_event,
  237. };
  238. static
  239. int __init qede_init(void)
  240. {
  241. int ret;
  242. pr_info("qede_init: %s\n", version);
  243. qed_ops = qed_get_eth_ops();
  244. if (!qed_ops) {
  245. pr_notice("Failed to get qed ethtool operations\n");
  246. return -EINVAL;
  247. }
  248. /* Must register notifier before pci ops, since we might miss
  249. * interface rename after pci probe and netdev registeration.
  250. */
  251. ret = register_netdevice_notifier(&qede_netdev_notifier);
  252. if (ret) {
  253. pr_notice("Failed to register netdevice_notifier\n");
  254. qed_put_eth_ops();
  255. return -EINVAL;
  256. }
  257. ret = pci_register_driver(&qede_pci_driver);
  258. if (ret) {
  259. pr_notice("Failed to register driver\n");
  260. unregister_netdevice_notifier(&qede_netdev_notifier);
  261. qed_put_eth_ops();
  262. return -EINVAL;
  263. }
  264. return 0;
  265. }
  266. static void __exit qede_cleanup(void)
  267. {
  268. if (debug & QED_LOG_INFO_MASK)
  269. pr_info("qede_cleanup called\n");
  270. unregister_netdevice_notifier(&qede_netdev_notifier);
  271. pci_unregister_driver(&qede_pci_driver);
  272. qed_put_eth_ops();
  273. }
  274. module_init(qede_init);
  275. module_exit(qede_cleanup);
  276. static int qede_open(struct net_device *ndev);
  277. static int qede_close(struct net_device *ndev);
  278. void qede_fill_by_demand_stats(struct qede_dev *edev)
  279. {
  280. struct qede_stats_common *p_common = &edev->stats.common;
  281. struct qed_eth_stats stats;
  282. edev->ops->get_vport_stats(edev->cdev, &stats);
  283. p_common->no_buff_discards = stats.common.no_buff_discards;
  284. p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
  285. p_common->ttl0_discard = stats.common.ttl0_discard;
  286. p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
  287. p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
  288. p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
  289. p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
  290. p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
  291. p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
  292. p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
  293. p_common->mac_filter_discards = stats.common.mac_filter_discards;
  294. p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
  295. p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
  296. p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
  297. p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
  298. p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
  299. p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
  300. p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
  301. p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
  302. p_common->coalesced_events = stats.common.tpa_coalesced_events;
  303. p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
  304. p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
  305. p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
  306. p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
  307. p_common->rx_65_to_127_byte_packets =
  308. stats.common.rx_65_to_127_byte_packets;
  309. p_common->rx_128_to_255_byte_packets =
  310. stats.common.rx_128_to_255_byte_packets;
  311. p_common->rx_256_to_511_byte_packets =
  312. stats.common.rx_256_to_511_byte_packets;
  313. p_common->rx_512_to_1023_byte_packets =
  314. stats.common.rx_512_to_1023_byte_packets;
  315. p_common->rx_1024_to_1518_byte_packets =
  316. stats.common.rx_1024_to_1518_byte_packets;
  317. p_common->rx_crc_errors = stats.common.rx_crc_errors;
  318. p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
  319. p_common->rx_pause_frames = stats.common.rx_pause_frames;
  320. p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
  321. p_common->rx_align_errors = stats.common.rx_align_errors;
  322. p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
  323. p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
  324. p_common->rx_jabbers = stats.common.rx_jabbers;
  325. p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
  326. p_common->rx_fragments = stats.common.rx_fragments;
  327. p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
  328. p_common->tx_65_to_127_byte_packets =
  329. stats.common.tx_65_to_127_byte_packets;
  330. p_common->tx_128_to_255_byte_packets =
  331. stats.common.tx_128_to_255_byte_packets;
  332. p_common->tx_256_to_511_byte_packets =
  333. stats.common.tx_256_to_511_byte_packets;
  334. p_common->tx_512_to_1023_byte_packets =
  335. stats.common.tx_512_to_1023_byte_packets;
  336. p_common->tx_1024_to_1518_byte_packets =
  337. stats.common.tx_1024_to_1518_byte_packets;
  338. p_common->tx_pause_frames = stats.common.tx_pause_frames;
  339. p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
  340. p_common->brb_truncates = stats.common.brb_truncates;
  341. p_common->brb_discards = stats.common.brb_discards;
  342. p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
  343. if (QEDE_IS_BB(edev)) {
  344. struct qede_stats_bb *p_bb = &edev->stats.bb;
  345. p_bb->rx_1519_to_1522_byte_packets =
  346. stats.bb.rx_1519_to_1522_byte_packets;
  347. p_bb->rx_1519_to_2047_byte_packets =
  348. stats.bb.rx_1519_to_2047_byte_packets;
  349. p_bb->rx_2048_to_4095_byte_packets =
  350. stats.bb.rx_2048_to_4095_byte_packets;
  351. p_bb->rx_4096_to_9216_byte_packets =
  352. stats.bb.rx_4096_to_9216_byte_packets;
  353. p_bb->rx_9217_to_16383_byte_packets =
  354. stats.bb.rx_9217_to_16383_byte_packets;
  355. p_bb->tx_1519_to_2047_byte_packets =
  356. stats.bb.tx_1519_to_2047_byte_packets;
  357. p_bb->tx_2048_to_4095_byte_packets =
  358. stats.bb.tx_2048_to_4095_byte_packets;
  359. p_bb->tx_4096_to_9216_byte_packets =
  360. stats.bb.tx_4096_to_9216_byte_packets;
  361. p_bb->tx_9217_to_16383_byte_packets =
  362. stats.bb.tx_9217_to_16383_byte_packets;
  363. p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
  364. p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
  365. } else {
  366. struct qede_stats_ah *p_ah = &edev->stats.ah;
  367. p_ah->rx_1519_to_max_byte_packets =
  368. stats.ah.rx_1519_to_max_byte_packets;
  369. p_ah->tx_1519_to_max_byte_packets =
  370. stats.ah.tx_1519_to_max_byte_packets;
  371. }
  372. }
  373. static void qede_get_stats64(struct net_device *dev,
  374. struct rtnl_link_stats64 *stats)
  375. {
  376. struct qede_dev *edev = netdev_priv(dev);
  377. struct qede_stats_common *p_common;
  378. qede_fill_by_demand_stats(edev);
  379. p_common = &edev->stats.common;
  380. stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
  381. p_common->rx_bcast_pkts;
  382. stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
  383. p_common->tx_bcast_pkts;
  384. stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
  385. p_common->rx_bcast_bytes;
  386. stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
  387. p_common->tx_bcast_bytes;
  388. stats->tx_errors = p_common->tx_err_drop_pkts;
  389. stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
  390. stats->rx_fifo_errors = p_common->no_buff_discards;
  391. if (QEDE_IS_BB(edev))
  392. stats->collisions = edev->stats.bb.tx_total_collisions;
  393. stats->rx_crc_errors = p_common->rx_crc_errors;
  394. stats->rx_frame_errors = p_common->rx_align_errors;
  395. }
  396. #ifdef CONFIG_QED_SRIOV
  397. static int qede_get_vf_config(struct net_device *dev, int vfidx,
  398. struct ifla_vf_info *ivi)
  399. {
  400. struct qede_dev *edev = netdev_priv(dev);
  401. if (!edev->ops)
  402. return -EINVAL;
  403. return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
  404. }
  405. static int qede_set_vf_rate(struct net_device *dev, int vfidx,
  406. int min_tx_rate, int max_tx_rate)
  407. {
  408. struct qede_dev *edev = netdev_priv(dev);
  409. return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
  410. max_tx_rate);
  411. }
  412. static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
  413. {
  414. struct qede_dev *edev = netdev_priv(dev);
  415. if (!edev->ops)
  416. return -EINVAL;
  417. return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
  418. }
  419. static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
  420. int link_state)
  421. {
  422. struct qede_dev *edev = netdev_priv(dev);
  423. if (!edev->ops)
  424. return -EINVAL;
  425. return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
  426. }
  427. static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
  428. {
  429. struct qede_dev *edev = netdev_priv(dev);
  430. if (!edev->ops)
  431. return -EINVAL;
  432. return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
  433. }
  434. #endif
  435. static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  436. {
  437. struct qede_dev *edev = netdev_priv(dev);
  438. if (!netif_running(dev))
  439. return -EAGAIN;
  440. switch (cmd) {
  441. case SIOCSHWTSTAMP:
  442. return qede_ptp_hw_ts(edev, ifr);
  443. default:
  444. DP_VERBOSE(edev, QED_MSG_DEBUG,
  445. "default IOCTL cmd 0x%x\n", cmd);
  446. return -EOPNOTSUPP;
  447. }
  448. return 0;
  449. }
  450. static const struct net_device_ops qede_netdev_ops = {
  451. .ndo_open = qede_open,
  452. .ndo_stop = qede_close,
  453. .ndo_start_xmit = qede_start_xmit,
  454. .ndo_set_rx_mode = qede_set_rx_mode,
  455. .ndo_set_mac_address = qede_set_mac_addr,
  456. .ndo_validate_addr = eth_validate_addr,
  457. .ndo_change_mtu = qede_change_mtu,
  458. .ndo_do_ioctl = qede_ioctl,
  459. #ifdef CONFIG_QED_SRIOV
  460. .ndo_set_vf_mac = qede_set_vf_mac,
  461. .ndo_set_vf_vlan = qede_set_vf_vlan,
  462. .ndo_set_vf_trust = qede_set_vf_trust,
  463. #endif
  464. .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
  465. .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
  466. .ndo_set_features = qede_set_features,
  467. .ndo_get_stats64 = qede_get_stats64,
  468. #ifdef CONFIG_QED_SRIOV
  469. .ndo_set_vf_link_state = qede_set_vf_link_state,
  470. .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
  471. .ndo_get_vf_config = qede_get_vf_config,
  472. .ndo_set_vf_rate = qede_set_vf_rate,
  473. #endif
  474. .ndo_udp_tunnel_add = qede_udp_tunnel_add,
  475. .ndo_udp_tunnel_del = qede_udp_tunnel_del,
  476. .ndo_features_check = qede_features_check,
  477. .ndo_xdp = qede_xdp,
  478. #ifdef CONFIG_RFS_ACCEL
  479. .ndo_rx_flow_steer = qede_rx_flow_steer,
  480. #endif
  481. };
  482. static const struct net_device_ops qede_netdev_vf_ops = {
  483. .ndo_open = qede_open,
  484. .ndo_stop = qede_close,
  485. .ndo_start_xmit = qede_start_xmit,
  486. .ndo_set_rx_mode = qede_set_rx_mode,
  487. .ndo_set_mac_address = qede_set_mac_addr,
  488. .ndo_validate_addr = eth_validate_addr,
  489. .ndo_change_mtu = qede_change_mtu,
  490. .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
  491. .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
  492. .ndo_set_features = qede_set_features,
  493. .ndo_get_stats64 = qede_get_stats64,
  494. .ndo_udp_tunnel_add = qede_udp_tunnel_add,
  495. .ndo_udp_tunnel_del = qede_udp_tunnel_del,
  496. .ndo_features_check = qede_features_check,
  497. };
  498. /* -------------------------------------------------------------------------
  499. * START OF PROBE / REMOVE
  500. * -------------------------------------------------------------------------
  501. */
  502. static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
  503. struct pci_dev *pdev,
  504. struct qed_dev_eth_info *info,
  505. u32 dp_module, u8 dp_level)
  506. {
  507. struct net_device *ndev;
  508. struct qede_dev *edev;
  509. ndev = alloc_etherdev_mqs(sizeof(*edev),
  510. info->num_queues, info->num_queues);
  511. if (!ndev) {
  512. pr_err("etherdev allocation failed\n");
  513. return NULL;
  514. }
  515. edev = netdev_priv(ndev);
  516. edev->ndev = ndev;
  517. edev->cdev = cdev;
  518. edev->pdev = pdev;
  519. edev->dp_module = dp_module;
  520. edev->dp_level = dp_level;
  521. edev->ops = qed_ops;
  522. edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
  523. edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
  524. DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
  525. info->num_queues, info->num_queues);
  526. SET_NETDEV_DEV(ndev, &pdev->dev);
  527. memset(&edev->stats, 0, sizeof(edev->stats));
  528. memcpy(&edev->dev_info, info, sizeof(*info));
  529. INIT_LIST_HEAD(&edev->vlan_list);
  530. return edev;
  531. }
  532. static void qede_init_ndev(struct qede_dev *edev)
  533. {
  534. struct net_device *ndev = edev->ndev;
  535. struct pci_dev *pdev = edev->pdev;
  536. bool udp_tunnel_enable = false;
  537. netdev_features_t hw_features;
  538. pci_set_drvdata(pdev, ndev);
  539. ndev->mem_start = edev->dev_info.common.pci_mem_start;
  540. ndev->base_addr = ndev->mem_start;
  541. ndev->mem_end = edev->dev_info.common.pci_mem_end;
  542. ndev->irq = edev->dev_info.common.pci_irq;
  543. ndev->watchdog_timeo = TX_TIMEOUT;
  544. if (IS_VF(edev))
  545. ndev->netdev_ops = &qede_netdev_vf_ops;
  546. else
  547. ndev->netdev_ops = &qede_netdev_ops;
  548. qede_set_ethtool_ops(ndev);
  549. ndev->priv_flags |= IFF_UNICAST_FLT;
  550. /* user-changeble features */
  551. hw_features = NETIF_F_GRO | NETIF_F_SG |
  552. NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  553. NETIF_F_TSO | NETIF_F_TSO6;
  554. if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
  555. hw_features |= NETIF_F_NTUPLE;
  556. if (edev->dev_info.common.vxlan_enable ||
  557. edev->dev_info.common.geneve_enable)
  558. udp_tunnel_enable = true;
  559. if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
  560. hw_features |= NETIF_F_TSO_ECN;
  561. ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  562. NETIF_F_SG | NETIF_F_TSO |
  563. NETIF_F_TSO_ECN | NETIF_F_TSO6 |
  564. NETIF_F_RXCSUM;
  565. }
  566. if (udp_tunnel_enable) {
  567. hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
  568. NETIF_F_GSO_UDP_TUNNEL_CSUM);
  569. ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
  570. NETIF_F_GSO_UDP_TUNNEL_CSUM);
  571. }
  572. if (edev->dev_info.common.gre_enable) {
  573. hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
  574. ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
  575. NETIF_F_GSO_GRE_CSUM);
  576. }
  577. ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
  578. NETIF_F_HIGHDMA;
  579. ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
  580. NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
  581. NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
  582. ndev->hw_features = hw_features;
  583. /* MTU range: 46 - 9600 */
  584. ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
  585. ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
  586. /* Set network device HW mac */
  587. ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
  588. ndev->mtu = edev->dev_info.common.mtu;
  589. }
  590. /* This function converts from 32b param to two params of level and module
  591. * Input 32b decoding:
  592. * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
  593. * 'happy' flow, e.g. memory allocation failed.
  594. * b30 - enable all INFO prints. INFO prints are for major steps in the flow
  595. * and provide important parameters.
  596. * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
  597. * module. VERBOSE prints are for tracking the specific flow in low level.
  598. *
  599. * Notice that the level should be that of the lowest required logs.
  600. */
  601. void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
  602. {
  603. *p_dp_level = QED_LEVEL_NOTICE;
  604. *p_dp_module = 0;
  605. if (debug & QED_LOG_VERBOSE_MASK) {
  606. *p_dp_level = QED_LEVEL_VERBOSE;
  607. *p_dp_module = (debug & 0x3FFFFFFF);
  608. } else if (debug & QED_LOG_INFO_MASK) {
  609. *p_dp_level = QED_LEVEL_INFO;
  610. } else if (debug & QED_LOG_NOTICE_MASK) {
  611. *p_dp_level = QED_LEVEL_NOTICE;
  612. }
  613. }
  614. static void qede_free_fp_array(struct qede_dev *edev)
  615. {
  616. if (edev->fp_array) {
  617. struct qede_fastpath *fp;
  618. int i;
  619. for_each_queue(i) {
  620. fp = &edev->fp_array[i];
  621. kfree(fp->sb_info);
  622. kfree(fp->rxq);
  623. kfree(fp->xdp_tx);
  624. kfree(fp->txq);
  625. }
  626. kfree(edev->fp_array);
  627. }
  628. edev->num_queues = 0;
  629. edev->fp_num_tx = 0;
  630. edev->fp_num_rx = 0;
  631. }
  632. static int qede_alloc_fp_array(struct qede_dev *edev)
  633. {
  634. u8 fp_combined, fp_rx = edev->fp_num_rx;
  635. struct qede_fastpath *fp;
  636. int i;
  637. edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
  638. sizeof(*edev->fp_array), GFP_KERNEL);
  639. if (!edev->fp_array) {
  640. DP_NOTICE(edev, "fp array allocation failed\n");
  641. goto err;
  642. }
  643. fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
  644. /* Allocate the FP elements for Rx queues followed by combined and then
  645. * the Tx. This ordering should be maintained so that the respective
  646. * queues (Rx or Tx) will be together in the fastpath array and the
  647. * associated ids will be sequential.
  648. */
  649. for_each_queue(i) {
  650. fp = &edev->fp_array[i];
  651. fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
  652. if (!fp->sb_info) {
  653. DP_NOTICE(edev, "sb info struct allocation failed\n");
  654. goto err;
  655. }
  656. if (fp_rx) {
  657. fp->type = QEDE_FASTPATH_RX;
  658. fp_rx--;
  659. } else if (fp_combined) {
  660. fp->type = QEDE_FASTPATH_COMBINED;
  661. fp_combined--;
  662. } else {
  663. fp->type = QEDE_FASTPATH_TX;
  664. }
  665. if (fp->type & QEDE_FASTPATH_TX) {
  666. fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL);
  667. if (!fp->txq)
  668. goto err;
  669. }
  670. if (fp->type & QEDE_FASTPATH_RX) {
  671. fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
  672. if (!fp->rxq)
  673. goto err;
  674. if (edev->xdp_prog) {
  675. fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
  676. GFP_KERNEL);
  677. if (!fp->xdp_tx)
  678. goto err;
  679. fp->type |= QEDE_FASTPATH_XDP;
  680. }
  681. }
  682. }
  683. return 0;
  684. err:
  685. qede_free_fp_array(edev);
  686. return -ENOMEM;
  687. }
  688. static void qede_sp_task(struct work_struct *work)
  689. {
  690. struct qede_dev *edev = container_of(work, struct qede_dev,
  691. sp_task.work);
  692. __qede_lock(edev);
  693. if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
  694. if (edev->state == QEDE_STATE_OPEN)
  695. qede_config_rx_mode(edev->ndev);
  696. #ifdef CONFIG_RFS_ACCEL
  697. if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
  698. if (edev->state == QEDE_STATE_OPEN)
  699. qede_process_arfs_filters(edev, false);
  700. }
  701. #endif
  702. __qede_unlock(edev);
  703. }
  704. static void qede_update_pf_params(struct qed_dev *cdev)
  705. {
  706. struct qed_pf_params pf_params;
  707. /* 64 rx + 64 tx + 64 XDP */
  708. memset(&pf_params, 0, sizeof(struct qed_pf_params));
  709. pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3;
  710. #ifdef CONFIG_RFS_ACCEL
  711. pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
  712. #endif
  713. qed_ops->common->update_pf_params(cdev, &pf_params);
  714. }
  715. enum qede_probe_mode {
  716. QEDE_PROBE_NORMAL,
  717. };
  718. static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
  719. bool is_vf, enum qede_probe_mode mode)
  720. {
  721. struct qed_probe_params probe_params;
  722. struct qed_slowpath_params sp_params;
  723. struct qed_dev_eth_info dev_info;
  724. struct qede_dev *edev;
  725. struct qed_dev *cdev;
  726. int rc;
  727. if (unlikely(dp_level & QED_LEVEL_INFO))
  728. pr_notice("Starting qede probe\n");
  729. memset(&probe_params, 0, sizeof(probe_params));
  730. probe_params.protocol = QED_PROTOCOL_ETH;
  731. probe_params.dp_module = dp_module;
  732. probe_params.dp_level = dp_level;
  733. probe_params.is_vf = is_vf;
  734. cdev = qed_ops->common->probe(pdev, &probe_params);
  735. if (!cdev) {
  736. rc = -ENODEV;
  737. goto err0;
  738. }
  739. qede_update_pf_params(cdev);
  740. /* Start the Slowpath-process */
  741. memset(&sp_params, 0, sizeof(sp_params));
  742. sp_params.int_mode = QED_INT_MODE_MSIX;
  743. sp_params.drv_major = QEDE_MAJOR_VERSION;
  744. sp_params.drv_minor = QEDE_MINOR_VERSION;
  745. sp_params.drv_rev = QEDE_REVISION_VERSION;
  746. sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
  747. strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
  748. rc = qed_ops->common->slowpath_start(cdev, &sp_params);
  749. if (rc) {
  750. pr_notice("Cannot start slowpath\n");
  751. goto err1;
  752. }
  753. /* Learn information crucial for qede to progress */
  754. rc = qed_ops->fill_dev_info(cdev, &dev_info);
  755. if (rc)
  756. goto err2;
  757. edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
  758. dp_level);
  759. if (!edev) {
  760. rc = -ENOMEM;
  761. goto err2;
  762. }
  763. if (is_vf)
  764. edev->flags |= QEDE_FLAG_IS_VF;
  765. qede_init_ndev(edev);
  766. rc = qede_roce_dev_add(edev);
  767. if (rc)
  768. goto err3;
  769. /* Prepare the lock prior to the registeration of the netdev,
  770. * as once it's registered we might reach flows requiring it
  771. * [it's even possible to reach a flow needing it directly
  772. * from there, although it's unlikely].
  773. */
  774. INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
  775. mutex_init(&edev->qede_lock);
  776. rc = register_netdev(edev->ndev);
  777. if (rc) {
  778. DP_NOTICE(edev, "Cannot register net-device\n");
  779. goto err4;
  780. }
  781. edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
  782. /* PTP not supported on VFs */
  783. if (!is_vf)
  784. qede_ptp_enable(edev, true);
  785. edev->ops->register_ops(cdev, &qede_ll_ops, edev);
  786. #ifdef CONFIG_DCB
  787. if (!IS_VF(edev))
  788. qede_set_dcbnl_ops(edev->ndev);
  789. #endif
  790. edev->rx_copybreak = QEDE_RX_HDR_SIZE;
  791. DP_INFO(edev, "Ending successfully qede probe\n");
  792. return 0;
  793. err4:
  794. qede_roce_dev_remove(edev);
  795. err3:
  796. free_netdev(edev->ndev);
  797. err2:
  798. qed_ops->common->slowpath_stop(cdev);
  799. err1:
  800. qed_ops->common->remove(cdev);
  801. err0:
  802. return rc;
  803. }
  804. static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  805. {
  806. bool is_vf = false;
  807. u32 dp_module = 0;
  808. u8 dp_level = 0;
  809. switch ((enum qede_pci_private)id->driver_data) {
  810. case QEDE_PRIVATE_VF:
  811. if (debug & QED_LOG_VERBOSE_MASK)
  812. dev_err(&pdev->dev, "Probing a VF\n");
  813. is_vf = true;
  814. break;
  815. default:
  816. if (debug & QED_LOG_VERBOSE_MASK)
  817. dev_err(&pdev->dev, "Probing a PF\n");
  818. }
  819. qede_config_debug(debug, &dp_module, &dp_level);
  820. return __qede_probe(pdev, dp_module, dp_level, is_vf,
  821. QEDE_PROBE_NORMAL);
  822. }
  823. enum qede_remove_mode {
  824. QEDE_REMOVE_NORMAL,
  825. };
  826. static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
  827. {
  828. struct net_device *ndev = pci_get_drvdata(pdev);
  829. struct qede_dev *edev = netdev_priv(ndev);
  830. struct qed_dev *cdev = edev->cdev;
  831. DP_INFO(edev, "Starting qede_remove\n");
  832. unregister_netdev(ndev);
  833. cancel_delayed_work_sync(&edev->sp_task);
  834. qede_ptp_disable(edev);
  835. qede_roce_dev_remove(edev);
  836. edev->ops->common->set_power_state(cdev, PCI_D0);
  837. pci_set_drvdata(pdev, NULL);
  838. /* Release edev's reference to XDP's bpf if such exist */
  839. if (edev->xdp_prog)
  840. bpf_prog_put(edev->xdp_prog);
  841. /* Use global ops since we've freed edev */
  842. qed_ops->common->slowpath_stop(cdev);
  843. if (system_state == SYSTEM_POWER_OFF)
  844. return;
  845. qed_ops->common->remove(cdev);
  846. /* Since this can happen out-of-sync with other flows,
  847. * don't release the netdevice until after slowpath stop
  848. * has been called to guarantee various other contexts
  849. * [e.g., QED register callbacks] won't break anything when
  850. * accessing the netdevice.
  851. */
  852. free_netdev(ndev);
  853. dev_info(&pdev->dev, "Ending qede_remove successfully\n");
  854. }
  855. static void qede_remove(struct pci_dev *pdev)
  856. {
  857. __qede_remove(pdev, QEDE_REMOVE_NORMAL);
  858. }
  859. static void qede_shutdown(struct pci_dev *pdev)
  860. {
  861. __qede_remove(pdev, QEDE_REMOVE_NORMAL);
  862. }
  863. /* -------------------------------------------------------------------------
  864. * START OF LOAD / UNLOAD
  865. * -------------------------------------------------------------------------
  866. */
  867. static int qede_set_num_queues(struct qede_dev *edev)
  868. {
  869. int rc;
  870. u16 rss_num;
  871. /* Setup queues according to possible resources*/
  872. if (edev->req_queues)
  873. rss_num = edev->req_queues;
  874. else
  875. rss_num = netif_get_num_default_rss_queues() *
  876. edev->dev_info.common.num_hwfns;
  877. rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
  878. rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
  879. if (rc > 0) {
  880. /* Managed to request interrupts for our queues */
  881. edev->num_queues = rc;
  882. DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
  883. QEDE_QUEUE_CNT(edev), rss_num);
  884. rc = 0;
  885. }
  886. edev->fp_num_tx = edev->req_num_tx;
  887. edev->fp_num_rx = edev->req_num_rx;
  888. return rc;
  889. }
  890. static void qede_free_mem_sb(struct qede_dev *edev,
  891. struct qed_sb_info *sb_info)
  892. {
  893. if (sb_info->sb_virt)
  894. dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
  895. (void *)sb_info->sb_virt, sb_info->sb_phys);
  896. }
  897. /* This function allocates fast-path status block memory */
  898. static int qede_alloc_mem_sb(struct qede_dev *edev,
  899. struct qed_sb_info *sb_info, u16 sb_id)
  900. {
  901. struct status_block *sb_virt;
  902. dma_addr_t sb_phys;
  903. int rc;
  904. sb_virt = dma_alloc_coherent(&edev->pdev->dev,
  905. sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
  906. if (!sb_virt) {
  907. DP_ERR(edev, "Status block allocation failed\n");
  908. return -ENOMEM;
  909. }
  910. rc = edev->ops->common->sb_init(edev->cdev, sb_info,
  911. sb_virt, sb_phys, sb_id,
  912. QED_SB_TYPE_L2_QUEUE);
  913. if (rc) {
  914. DP_ERR(edev, "Status block initialization failed\n");
  915. dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
  916. sb_virt, sb_phys);
  917. return rc;
  918. }
  919. return 0;
  920. }
  921. static void qede_free_rx_buffers(struct qede_dev *edev,
  922. struct qede_rx_queue *rxq)
  923. {
  924. u16 i;
  925. for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
  926. struct sw_rx_data *rx_buf;
  927. struct page *data;
  928. rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
  929. data = rx_buf->data;
  930. dma_unmap_page(&edev->pdev->dev,
  931. rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
  932. rx_buf->data = NULL;
  933. __free_page(data);
  934. }
  935. }
  936. static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
  937. {
  938. int i;
  939. if (edev->gro_disable)
  940. return;
  941. for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
  942. struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
  943. struct sw_rx_data *replace_buf = &tpa_info->buffer;
  944. if (replace_buf->data) {
  945. dma_unmap_page(&edev->pdev->dev,
  946. replace_buf->mapping,
  947. PAGE_SIZE, DMA_FROM_DEVICE);
  948. __free_page(replace_buf->data);
  949. }
  950. }
  951. }
  952. static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
  953. {
  954. qede_free_sge_mem(edev, rxq);
  955. /* Free rx buffers */
  956. qede_free_rx_buffers(edev, rxq);
  957. /* Free the parallel SW ring */
  958. kfree(rxq->sw_rx_ring);
  959. /* Free the real RQ ring used by FW */
  960. edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
  961. edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
  962. }
  963. static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
  964. {
  965. dma_addr_t mapping;
  966. int i;
  967. /* Don't perform FW aggregations in case of XDP */
  968. if (edev->xdp_prog)
  969. edev->gro_disable = 1;
  970. if (edev->gro_disable)
  971. return 0;
  972. if (edev->ndev->mtu > PAGE_SIZE) {
  973. edev->gro_disable = 1;
  974. return 0;
  975. }
  976. for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
  977. struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
  978. struct sw_rx_data *replace_buf = &tpa_info->buffer;
  979. replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
  980. if (unlikely(!replace_buf->data)) {
  981. DP_NOTICE(edev,
  982. "Failed to allocate TPA skb pool [replacement buffer]\n");
  983. goto err;
  984. }
  985. mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
  986. PAGE_SIZE, DMA_FROM_DEVICE);
  987. if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
  988. DP_NOTICE(edev,
  989. "Failed to map TPA replacement buffer\n");
  990. goto err;
  991. }
  992. replace_buf->mapping = mapping;
  993. tpa_info->buffer.page_offset = 0;
  994. tpa_info->buffer_mapping = mapping;
  995. tpa_info->state = QEDE_AGG_STATE_NONE;
  996. }
  997. return 0;
  998. err:
  999. qede_free_sge_mem(edev, rxq);
  1000. edev->gro_disable = 1;
  1001. return -ENOMEM;
  1002. }
  1003. /* This function allocates all memory needed per Rx queue */
  1004. static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
  1005. {
  1006. int i, rc, size;
  1007. rxq->num_rx_buffers = edev->q_num_rx_buffers;
  1008. rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
  1009. rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : 0;
  1010. /* Make sure that the headroom and payload fit in a single page */
  1011. if (rxq->rx_buf_size + rxq->rx_headroom > PAGE_SIZE)
  1012. rxq->rx_buf_size = PAGE_SIZE - rxq->rx_headroom;
  1013. /* Segment size to spilt a page in multiple equal parts,
  1014. * unless XDP is used in which case we'd use the entire page.
  1015. */
  1016. if (!edev->xdp_prog)
  1017. rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
  1018. else
  1019. rxq->rx_buf_seg_size = PAGE_SIZE;
  1020. /* Allocate the parallel driver ring for Rx buffers */
  1021. size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
  1022. rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
  1023. if (!rxq->sw_rx_ring) {
  1024. DP_ERR(edev, "Rx buffers ring allocation failed\n");
  1025. rc = -ENOMEM;
  1026. goto err;
  1027. }
  1028. /* Allocate FW Rx ring */
  1029. rc = edev->ops->common->chain_alloc(edev->cdev,
  1030. QED_CHAIN_USE_TO_CONSUME_PRODUCE,
  1031. QED_CHAIN_MODE_NEXT_PTR,
  1032. QED_CHAIN_CNT_TYPE_U16,
  1033. RX_RING_SIZE,
  1034. sizeof(struct eth_rx_bd),
  1035. &rxq->rx_bd_ring);
  1036. if (rc)
  1037. goto err;
  1038. /* Allocate FW completion ring */
  1039. rc = edev->ops->common->chain_alloc(edev->cdev,
  1040. QED_CHAIN_USE_TO_CONSUME,
  1041. QED_CHAIN_MODE_PBL,
  1042. QED_CHAIN_CNT_TYPE_U16,
  1043. RX_RING_SIZE,
  1044. sizeof(union eth_rx_cqe),
  1045. &rxq->rx_comp_ring);
  1046. if (rc)
  1047. goto err;
  1048. /* Allocate buffers for the Rx ring */
  1049. rxq->filled_buffers = 0;
  1050. for (i = 0; i < rxq->num_rx_buffers; i++) {
  1051. rc = qede_alloc_rx_buffer(rxq, false);
  1052. if (rc) {
  1053. DP_ERR(edev,
  1054. "Rx buffers allocation failed at index %d\n", i);
  1055. goto err;
  1056. }
  1057. }
  1058. rc = qede_alloc_sge_mem(edev, rxq);
  1059. err:
  1060. return rc;
  1061. }
  1062. static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
  1063. {
  1064. /* Free the parallel SW ring */
  1065. if (txq->is_xdp)
  1066. kfree(txq->sw_tx_ring.xdp);
  1067. else
  1068. kfree(txq->sw_tx_ring.skbs);
  1069. /* Free the real RQ ring used by FW */
  1070. edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
  1071. }
  1072. /* This function allocates all memory needed per Tx queue */
  1073. static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
  1074. {
  1075. union eth_tx_bd_types *p_virt;
  1076. int size, rc;
  1077. txq->num_tx_buffers = edev->q_num_tx_buffers;
  1078. /* Allocate the parallel driver ring for Tx buffers */
  1079. if (txq->is_xdp) {
  1080. size = sizeof(*txq->sw_tx_ring.xdp) * TX_RING_SIZE;
  1081. txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
  1082. if (!txq->sw_tx_ring.xdp)
  1083. goto err;
  1084. } else {
  1085. size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE;
  1086. txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
  1087. if (!txq->sw_tx_ring.skbs)
  1088. goto err;
  1089. }
  1090. rc = edev->ops->common->chain_alloc(edev->cdev,
  1091. QED_CHAIN_USE_TO_CONSUME_PRODUCE,
  1092. QED_CHAIN_MODE_PBL,
  1093. QED_CHAIN_CNT_TYPE_U16,
  1094. TX_RING_SIZE,
  1095. sizeof(*p_virt), &txq->tx_pbl);
  1096. if (rc)
  1097. goto err;
  1098. return 0;
  1099. err:
  1100. qede_free_mem_txq(edev, txq);
  1101. return -ENOMEM;
  1102. }
  1103. /* This function frees all memory of a single fp */
  1104. static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
  1105. {
  1106. qede_free_mem_sb(edev, fp->sb_info);
  1107. if (fp->type & QEDE_FASTPATH_RX)
  1108. qede_free_mem_rxq(edev, fp->rxq);
  1109. if (fp->type & QEDE_FASTPATH_XDP)
  1110. qede_free_mem_txq(edev, fp->xdp_tx);
  1111. if (fp->type & QEDE_FASTPATH_TX)
  1112. qede_free_mem_txq(edev, fp->txq);
  1113. }
  1114. /* This function allocates all memory needed for a single fp (i.e. an entity
  1115. * which contains status block, one rx queue and/or multiple per-TC tx queues.
  1116. */
  1117. static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
  1118. {
  1119. int rc = 0;
  1120. rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
  1121. if (rc)
  1122. goto out;
  1123. if (fp->type & QEDE_FASTPATH_RX) {
  1124. rc = qede_alloc_mem_rxq(edev, fp->rxq);
  1125. if (rc)
  1126. goto out;
  1127. }
  1128. if (fp->type & QEDE_FASTPATH_XDP) {
  1129. rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
  1130. if (rc)
  1131. goto out;
  1132. }
  1133. if (fp->type & QEDE_FASTPATH_TX) {
  1134. rc = qede_alloc_mem_txq(edev, fp->txq);
  1135. if (rc)
  1136. goto out;
  1137. }
  1138. out:
  1139. return rc;
  1140. }
  1141. static void qede_free_mem_load(struct qede_dev *edev)
  1142. {
  1143. int i;
  1144. for_each_queue(i) {
  1145. struct qede_fastpath *fp = &edev->fp_array[i];
  1146. qede_free_mem_fp(edev, fp);
  1147. }
  1148. }
  1149. /* This function allocates all qede memory at NIC load. */
  1150. static int qede_alloc_mem_load(struct qede_dev *edev)
  1151. {
  1152. int rc = 0, queue_id;
  1153. for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
  1154. struct qede_fastpath *fp = &edev->fp_array[queue_id];
  1155. rc = qede_alloc_mem_fp(edev, fp);
  1156. if (rc) {
  1157. DP_ERR(edev,
  1158. "Failed to allocate memory for fastpath - rss id = %d\n",
  1159. queue_id);
  1160. qede_free_mem_load(edev);
  1161. return rc;
  1162. }
  1163. }
  1164. return 0;
  1165. }
  1166. /* This function inits fp content and resets the SB, RXQ and TXQ structures */
  1167. static void qede_init_fp(struct qede_dev *edev)
  1168. {
  1169. int queue_id, rxq_index = 0, txq_index = 0;
  1170. struct qede_fastpath *fp;
  1171. for_each_queue(queue_id) {
  1172. fp = &edev->fp_array[queue_id];
  1173. fp->edev = edev;
  1174. fp->id = queue_id;
  1175. if (fp->type & QEDE_FASTPATH_XDP) {
  1176. fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
  1177. rxq_index);
  1178. fp->xdp_tx->is_xdp = 1;
  1179. }
  1180. if (fp->type & QEDE_FASTPATH_RX) {
  1181. fp->rxq->rxq_id = rxq_index++;
  1182. /* Determine how to map buffers for this queue */
  1183. if (fp->type & QEDE_FASTPATH_XDP)
  1184. fp->rxq->data_direction = DMA_BIDIRECTIONAL;
  1185. else
  1186. fp->rxq->data_direction = DMA_FROM_DEVICE;
  1187. fp->rxq->dev = &edev->pdev->dev;
  1188. }
  1189. if (fp->type & QEDE_FASTPATH_TX) {
  1190. fp->txq->index = txq_index++;
  1191. if (edev->dev_info.is_legacy)
  1192. fp->txq->is_legacy = 1;
  1193. fp->txq->dev = &edev->pdev->dev;
  1194. }
  1195. snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
  1196. edev->ndev->name, queue_id);
  1197. }
  1198. edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
  1199. }
  1200. static int qede_set_real_num_queues(struct qede_dev *edev)
  1201. {
  1202. int rc = 0;
  1203. rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev));
  1204. if (rc) {
  1205. DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
  1206. return rc;
  1207. }
  1208. rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
  1209. if (rc) {
  1210. DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
  1211. return rc;
  1212. }
  1213. return 0;
  1214. }
  1215. static void qede_napi_disable_remove(struct qede_dev *edev)
  1216. {
  1217. int i;
  1218. for_each_queue(i) {
  1219. napi_disable(&edev->fp_array[i].napi);
  1220. netif_napi_del(&edev->fp_array[i].napi);
  1221. }
  1222. }
  1223. static void qede_napi_add_enable(struct qede_dev *edev)
  1224. {
  1225. int i;
  1226. /* Add NAPI objects */
  1227. for_each_queue(i) {
  1228. netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
  1229. qede_poll, NAPI_POLL_WEIGHT);
  1230. napi_enable(&edev->fp_array[i].napi);
  1231. }
  1232. }
  1233. static void qede_sync_free_irqs(struct qede_dev *edev)
  1234. {
  1235. int i;
  1236. for (i = 0; i < edev->int_info.used_cnt; i++) {
  1237. if (edev->int_info.msix_cnt) {
  1238. synchronize_irq(edev->int_info.msix[i].vector);
  1239. free_irq(edev->int_info.msix[i].vector,
  1240. &edev->fp_array[i]);
  1241. } else {
  1242. edev->ops->common->simd_handler_clean(edev->cdev, i);
  1243. }
  1244. }
  1245. edev->int_info.used_cnt = 0;
  1246. }
  1247. static int qede_req_msix_irqs(struct qede_dev *edev)
  1248. {
  1249. int i, rc;
  1250. /* Sanitize number of interrupts == number of prepared RSS queues */
  1251. if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
  1252. DP_ERR(edev,
  1253. "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
  1254. QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
  1255. return -EINVAL;
  1256. }
  1257. for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
  1258. #ifdef CONFIG_RFS_ACCEL
  1259. struct qede_fastpath *fp = &edev->fp_array[i];
  1260. if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
  1261. rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
  1262. edev->int_info.msix[i].vector);
  1263. if (rc) {
  1264. DP_ERR(edev, "Failed to add CPU rmap\n");
  1265. qede_free_arfs(edev);
  1266. }
  1267. }
  1268. #endif
  1269. rc = request_irq(edev->int_info.msix[i].vector,
  1270. qede_msix_fp_int, 0, edev->fp_array[i].name,
  1271. &edev->fp_array[i]);
  1272. if (rc) {
  1273. DP_ERR(edev, "Request fp %d irq failed\n", i);
  1274. qede_sync_free_irqs(edev);
  1275. return rc;
  1276. }
  1277. DP_VERBOSE(edev, NETIF_MSG_INTR,
  1278. "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
  1279. edev->fp_array[i].name, i,
  1280. &edev->fp_array[i]);
  1281. edev->int_info.used_cnt++;
  1282. }
  1283. return 0;
  1284. }
  1285. static void qede_simd_fp_handler(void *cookie)
  1286. {
  1287. struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
  1288. napi_schedule_irqoff(&fp->napi);
  1289. }
  1290. static int qede_setup_irqs(struct qede_dev *edev)
  1291. {
  1292. int i, rc = 0;
  1293. /* Learn Interrupt configuration */
  1294. rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
  1295. if (rc)
  1296. return rc;
  1297. if (edev->int_info.msix_cnt) {
  1298. rc = qede_req_msix_irqs(edev);
  1299. if (rc)
  1300. return rc;
  1301. edev->ndev->irq = edev->int_info.msix[0].vector;
  1302. } else {
  1303. const struct qed_common_ops *ops;
  1304. /* qed should learn receive the RSS ids and callbacks */
  1305. ops = edev->ops->common;
  1306. for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
  1307. ops->simd_handler_config(edev->cdev,
  1308. &edev->fp_array[i], i,
  1309. qede_simd_fp_handler);
  1310. edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
  1311. }
  1312. return 0;
  1313. }
  1314. static int qede_drain_txq(struct qede_dev *edev,
  1315. struct qede_tx_queue *txq, bool allow_drain)
  1316. {
  1317. int rc, cnt = 1000;
  1318. while (txq->sw_tx_cons != txq->sw_tx_prod) {
  1319. if (!cnt) {
  1320. if (allow_drain) {
  1321. DP_NOTICE(edev,
  1322. "Tx queue[%d] is stuck, requesting MCP to drain\n",
  1323. txq->index);
  1324. rc = edev->ops->common->drain(edev->cdev);
  1325. if (rc)
  1326. return rc;
  1327. return qede_drain_txq(edev, txq, false);
  1328. }
  1329. DP_NOTICE(edev,
  1330. "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
  1331. txq->index, txq->sw_tx_prod,
  1332. txq->sw_tx_cons);
  1333. return -ENODEV;
  1334. }
  1335. cnt--;
  1336. usleep_range(1000, 2000);
  1337. barrier();
  1338. }
  1339. /* FW finished processing, wait for HW to transmit all tx packets */
  1340. usleep_range(1000, 2000);
  1341. return 0;
  1342. }
  1343. static int qede_stop_txq(struct qede_dev *edev,
  1344. struct qede_tx_queue *txq, int rss_id)
  1345. {
  1346. return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
  1347. }
  1348. static int qede_stop_queues(struct qede_dev *edev)
  1349. {
  1350. struct qed_update_vport_params *vport_update_params;
  1351. struct qed_dev *cdev = edev->cdev;
  1352. struct qede_fastpath *fp;
  1353. int rc, i;
  1354. /* Disable the vport */
  1355. vport_update_params = vzalloc(sizeof(*vport_update_params));
  1356. if (!vport_update_params)
  1357. return -ENOMEM;
  1358. vport_update_params->vport_id = 0;
  1359. vport_update_params->update_vport_active_flg = 1;
  1360. vport_update_params->vport_active_flg = 0;
  1361. vport_update_params->update_rss_flg = 0;
  1362. rc = edev->ops->vport_update(cdev, vport_update_params);
  1363. vfree(vport_update_params);
  1364. if (rc) {
  1365. DP_ERR(edev, "Failed to update vport\n");
  1366. return rc;
  1367. }
  1368. /* Flush Tx queues. If needed, request drain from MCP */
  1369. for_each_queue(i) {
  1370. fp = &edev->fp_array[i];
  1371. if (fp->type & QEDE_FASTPATH_TX) {
  1372. rc = qede_drain_txq(edev, fp->txq, true);
  1373. if (rc)
  1374. return rc;
  1375. }
  1376. if (fp->type & QEDE_FASTPATH_XDP) {
  1377. rc = qede_drain_txq(edev, fp->xdp_tx, true);
  1378. if (rc)
  1379. return rc;
  1380. }
  1381. }
  1382. /* Stop all Queues in reverse order */
  1383. for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
  1384. fp = &edev->fp_array[i];
  1385. /* Stop the Tx Queue(s) */
  1386. if (fp->type & QEDE_FASTPATH_TX) {
  1387. rc = qede_stop_txq(edev, fp->txq, i);
  1388. if (rc)
  1389. return rc;
  1390. }
  1391. /* Stop the Rx Queue */
  1392. if (fp->type & QEDE_FASTPATH_RX) {
  1393. rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
  1394. if (rc) {
  1395. DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
  1396. return rc;
  1397. }
  1398. }
  1399. /* Stop the XDP forwarding queue */
  1400. if (fp->type & QEDE_FASTPATH_XDP) {
  1401. rc = qede_stop_txq(edev, fp->xdp_tx, i);
  1402. if (rc)
  1403. return rc;
  1404. bpf_prog_put(fp->rxq->xdp_prog);
  1405. }
  1406. }
  1407. /* Stop the vport */
  1408. rc = edev->ops->vport_stop(cdev, 0);
  1409. if (rc)
  1410. DP_ERR(edev, "Failed to stop VPORT\n");
  1411. return rc;
  1412. }
  1413. static int qede_start_txq(struct qede_dev *edev,
  1414. struct qede_fastpath *fp,
  1415. struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
  1416. {
  1417. dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
  1418. u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
  1419. struct qed_queue_start_common_params params;
  1420. struct qed_txq_start_ret_params ret_params;
  1421. int rc;
  1422. memset(&params, 0, sizeof(params));
  1423. memset(&ret_params, 0, sizeof(ret_params));
  1424. /* Let the XDP queue share the queue-zone with one of the regular txq.
  1425. * We don't really care about its coalescing.
  1426. */
  1427. if (txq->is_xdp)
  1428. params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
  1429. else
  1430. params.queue_id = txq->index;
  1431. params.sb = fp->sb_info->igu_sb_id;
  1432. params.sb_idx = sb_idx;
  1433. rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
  1434. page_cnt, &ret_params);
  1435. if (rc) {
  1436. DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
  1437. return rc;
  1438. }
  1439. txq->doorbell_addr = ret_params.p_doorbell;
  1440. txq->handle = ret_params.p_handle;
  1441. /* Determine the FW consumer address associated */
  1442. txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
  1443. /* Prepare the doorbell parameters */
  1444. SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
  1445. SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
  1446. SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
  1447. DQ_XCM_ETH_TX_BD_PROD_CMD);
  1448. txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
  1449. return rc;
  1450. }
  1451. static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
  1452. {
  1453. int vlan_removal_en = 1;
  1454. struct qed_dev *cdev = edev->cdev;
  1455. struct qed_dev_info *qed_info = &edev->dev_info.common;
  1456. struct qed_update_vport_params *vport_update_params;
  1457. struct qed_queue_start_common_params q_params;
  1458. struct qed_start_vport_params start = {0};
  1459. int rc, i;
  1460. if (!edev->num_queues) {
  1461. DP_ERR(edev,
  1462. "Cannot update V-VPORT as active as there are no Rx queues\n");
  1463. return -EINVAL;
  1464. }
  1465. vport_update_params = vzalloc(sizeof(*vport_update_params));
  1466. if (!vport_update_params)
  1467. return -ENOMEM;
  1468. start.handle_ptp_pkts = !!(edev->ptp);
  1469. start.gro_enable = !edev->gro_disable;
  1470. start.mtu = edev->ndev->mtu;
  1471. start.vport_id = 0;
  1472. start.drop_ttl0 = true;
  1473. start.remove_inner_vlan = vlan_removal_en;
  1474. start.clear_stats = clear_stats;
  1475. rc = edev->ops->vport_start(cdev, &start);
  1476. if (rc) {
  1477. DP_ERR(edev, "Start V-PORT failed %d\n", rc);
  1478. goto out;
  1479. }
  1480. DP_VERBOSE(edev, NETIF_MSG_IFUP,
  1481. "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
  1482. start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
  1483. for_each_queue(i) {
  1484. struct qede_fastpath *fp = &edev->fp_array[i];
  1485. dma_addr_t p_phys_table;
  1486. u32 page_cnt;
  1487. if (fp->type & QEDE_FASTPATH_RX) {
  1488. struct qed_rxq_start_ret_params ret_params;
  1489. struct qede_rx_queue *rxq = fp->rxq;
  1490. __le16 *val;
  1491. memset(&ret_params, 0, sizeof(ret_params));
  1492. memset(&q_params, 0, sizeof(q_params));
  1493. q_params.queue_id = rxq->rxq_id;
  1494. q_params.vport_id = 0;
  1495. q_params.sb = fp->sb_info->igu_sb_id;
  1496. q_params.sb_idx = RX_PI;
  1497. p_phys_table =
  1498. qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
  1499. page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
  1500. rc = edev->ops->q_rx_start(cdev, i, &q_params,
  1501. rxq->rx_buf_size,
  1502. rxq->rx_bd_ring.p_phys_addr,
  1503. p_phys_table,
  1504. page_cnt, &ret_params);
  1505. if (rc) {
  1506. DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
  1507. rc);
  1508. goto out;
  1509. }
  1510. /* Use the return parameters */
  1511. rxq->hw_rxq_prod_addr = ret_params.p_prod;
  1512. rxq->handle = ret_params.p_handle;
  1513. val = &fp->sb_info->sb_virt->pi_array[RX_PI];
  1514. rxq->hw_cons_ptr = val;
  1515. qede_update_rx_prod(edev, rxq);
  1516. }
  1517. if (fp->type & QEDE_FASTPATH_XDP) {
  1518. rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
  1519. if (rc)
  1520. goto out;
  1521. fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
  1522. if (IS_ERR(fp->rxq->xdp_prog)) {
  1523. rc = PTR_ERR(fp->rxq->xdp_prog);
  1524. fp->rxq->xdp_prog = NULL;
  1525. goto out;
  1526. }
  1527. }
  1528. if (fp->type & QEDE_FASTPATH_TX) {
  1529. rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0));
  1530. if (rc)
  1531. goto out;
  1532. }
  1533. }
  1534. /* Prepare and send the vport enable */
  1535. vport_update_params->vport_id = start.vport_id;
  1536. vport_update_params->update_vport_active_flg = 1;
  1537. vport_update_params->vport_active_flg = 1;
  1538. if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
  1539. qed_info->tx_switching) {
  1540. vport_update_params->update_tx_switching_flg = 1;
  1541. vport_update_params->tx_switching_flg = 1;
  1542. }
  1543. qede_fill_rss_params(edev, &vport_update_params->rss_params,
  1544. &vport_update_params->update_rss_flg);
  1545. rc = edev->ops->vport_update(cdev, vport_update_params);
  1546. if (rc)
  1547. DP_ERR(edev, "Update V-PORT failed %d\n", rc);
  1548. out:
  1549. vfree(vport_update_params);
  1550. return rc;
  1551. }
  1552. enum qede_unload_mode {
  1553. QEDE_UNLOAD_NORMAL,
  1554. };
  1555. static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
  1556. bool is_locked)
  1557. {
  1558. struct qed_link_params link_params;
  1559. int rc;
  1560. DP_INFO(edev, "Starting qede unload\n");
  1561. if (!is_locked)
  1562. __qede_lock(edev);
  1563. qede_roce_dev_event_close(edev);
  1564. edev->state = QEDE_STATE_CLOSED;
  1565. /* Close OS Tx */
  1566. netif_tx_disable(edev->ndev);
  1567. netif_carrier_off(edev->ndev);
  1568. /* Reset the link */
  1569. memset(&link_params, 0, sizeof(link_params));
  1570. link_params.link_up = false;
  1571. edev->ops->common->set_link(edev->cdev, &link_params);
  1572. rc = qede_stop_queues(edev);
  1573. if (rc) {
  1574. qede_sync_free_irqs(edev);
  1575. goto out;
  1576. }
  1577. DP_INFO(edev, "Stopped Queues\n");
  1578. qede_vlan_mark_nonconfigured(edev);
  1579. edev->ops->fastpath_stop(edev->cdev);
  1580. #ifdef CONFIG_RFS_ACCEL
  1581. if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
  1582. qede_poll_for_freeing_arfs_filters(edev);
  1583. qede_free_arfs(edev);
  1584. }
  1585. #endif
  1586. /* Release the interrupts */
  1587. qede_sync_free_irqs(edev);
  1588. edev->ops->common->set_fp_int(edev->cdev, 0);
  1589. qede_napi_disable_remove(edev);
  1590. qede_free_mem_load(edev);
  1591. qede_free_fp_array(edev);
  1592. out:
  1593. if (!is_locked)
  1594. __qede_unlock(edev);
  1595. DP_INFO(edev, "Ending qede unload\n");
  1596. }
  1597. enum qede_load_mode {
  1598. QEDE_LOAD_NORMAL,
  1599. QEDE_LOAD_RELOAD,
  1600. };
  1601. static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
  1602. bool is_locked)
  1603. {
  1604. struct qed_link_params link_params;
  1605. int rc;
  1606. DP_INFO(edev, "Starting qede load\n");
  1607. if (!is_locked)
  1608. __qede_lock(edev);
  1609. rc = qede_set_num_queues(edev);
  1610. if (rc)
  1611. goto out;
  1612. rc = qede_alloc_fp_array(edev);
  1613. if (rc)
  1614. goto out;
  1615. qede_init_fp(edev);
  1616. rc = qede_alloc_mem_load(edev);
  1617. if (rc)
  1618. goto err1;
  1619. DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
  1620. QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
  1621. rc = qede_set_real_num_queues(edev);
  1622. if (rc)
  1623. goto err2;
  1624. #ifdef CONFIG_RFS_ACCEL
  1625. if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
  1626. rc = qede_alloc_arfs(edev);
  1627. if (rc)
  1628. DP_NOTICE(edev, "aRFS memory allocation failed\n");
  1629. }
  1630. #endif
  1631. qede_napi_add_enable(edev);
  1632. DP_INFO(edev, "Napi added and enabled\n");
  1633. rc = qede_setup_irqs(edev);
  1634. if (rc)
  1635. goto err3;
  1636. DP_INFO(edev, "Setup IRQs succeeded\n");
  1637. rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
  1638. if (rc)
  1639. goto err4;
  1640. DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
  1641. /* Add primary mac and set Rx filters */
  1642. ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
  1643. /* Program un-configured VLANs */
  1644. qede_configure_vlan_filters(edev);
  1645. /* Ask for link-up using current configuration */
  1646. memset(&link_params, 0, sizeof(link_params));
  1647. link_params.link_up = true;
  1648. edev->ops->common->set_link(edev->cdev, &link_params);
  1649. qede_roce_dev_event_open(edev);
  1650. edev->state = QEDE_STATE_OPEN;
  1651. DP_INFO(edev, "Ending successfully qede load\n");
  1652. goto out;
  1653. err4:
  1654. qede_sync_free_irqs(edev);
  1655. memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
  1656. err3:
  1657. qede_napi_disable_remove(edev);
  1658. err2:
  1659. qede_free_mem_load(edev);
  1660. err1:
  1661. edev->ops->common->set_fp_int(edev->cdev, 0);
  1662. qede_free_fp_array(edev);
  1663. edev->num_queues = 0;
  1664. edev->fp_num_tx = 0;
  1665. edev->fp_num_rx = 0;
  1666. out:
  1667. if (!is_locked)
  1668. __qede_unlock(edev);
  1669. return rc;
  1670. }
  1671. /* 'func' should be able to run between unload and reload assuming interface
  1672. * is actually running, or afterwards in case it's currently DOWN.
  1673. */
  1674. void qede_reload(struct qede_dev *edev,
  1675. struct qede_reload_args *args, bool is_locked)
  1676. {
  1677. if (!is_locked)
  1678. __qede_lock(edev);
  1679. /* Since qede_lock is held, internal state wouldn't change even
  1680. * if netdev state would start transitioning. Check whether current
  1681. * internal configuration indicates device is up, then reload.
  1682. */
  1683. if (edev->state == QEDE_STATE_OPEN) {
  1684. qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
  1685. if (args)
  1686. args->func(edev, args);
  1687. qede_load(edev, QEDE_LOAD_RELOAD, true);
  1688. /* Since no one is going to do it for us, re-configure */
  1689. qede_config_rx_mode(edev->ndev);
  1690. } else if (args) {
  1691. args->func(edev, args);
  1692. }
  1693. if (!is_locked)
  1694. __qede_unlock(edev);
  1695. }
  1696. /* called with rtnl_lock */
  1697. static int qede_open(struct net_device *ndev)
  1698. {
  1699. struct qede_dev *edev = netdev_priv(ndev);
  1700. int rc;
  1701. netif_carrier_off(ndev);
  1702. edev->ops->common->set_power_state(edev->cdev, PCI_D0);
  1703. rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
  1704. if (rc)
  1705. return rc;
  1706. udp_tunnel_get_rx_info(ndev);
  1707. edev->ops->common->update_drv_state(edev->cdev, true);
  1708. return 0;
  1709. }
  1710. static int qede_close(struct net_device *ndev)
  1711. {
  1712. struct qede_dev *edev = netdev_priv(ndev);
  1713. qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
  1714. edev->ops->common->update_drv_state(edev->cdev, false);
  1715. return 0;
  1716. }
  1717. static void qede_link_update(void *dev, struct qed_link_output *link)
  1718. {
  1719. struct qede_dev *edev = dev;
  1720. if (!netif_running(edev->ndev)) {
  1721. DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
  1722. return;
  1723. }
  1724. if (link->link_up) {
  1725. if (!netif_carrier_ok(edev->ndev)) {
  1726. DP_NOTICE(edev, "Link is up\n");
  1727. netif_tx_start_all_queues(edev->ndev);
  1728. netif_carrier_on(edev->ndev);
  1729. }
  1730. } else {
  1731. if (netif_carrier_ok(edev->ndev)) {
  1732. DP_NOTICE(edev, "Link is down\n");
  1733. netif_tx_disable(edev->ndev);
  1734. netif_carrier_off(edev->ndev);
  1735. }
  1736. }
  1737. }