nfp_net_main.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833
  1. /*
  2. * Copyright (C) 2015-2017 Netronome Systems, Inc.
  3. *
  4. * This software is dual licensed under the GNU General License Version 2,
  5. * June 1991 as shown in the file COPYING in the top-level directory of this
  6. * source tree or the BSD 2-Clause License provided below. You have the
  7. * option to license this software under the complete terms of either license.
  8. *
  9. * The BSD 2-Clause License:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * 1. Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * 2. Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. /*
  34. * nfp_net_main.c
  35. * Netronome network device driver: Main entry point
  36. * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
  37. * Alejandro Lucero <alejandro.lucero@netronome.com>
  38. * Jason McMullan <jason.mcmullan@netronome.com>
  39. * Rolf Neugebauer <rolf.neugebauer@netronome.com>
  40. */
  41. #include <linux/etherdevice.h>
  42. #include <linux/kernel.h>
  43. #include <linux/init.h>
  44. #include <linux/lockdep.h>
  45. #include <linux/pci.h>
  46. #include <linux/pci_regs.h>
  47. #include <linux/msi.h>
  48. #include <linux/random.h>
  49. #include <linux/rtnetlink.h>
  50. #include "nfpcore/nfp.h"
  51. #include "nfpcore/nfp_cpp.h"
  52. #include "nfpcore/nfp_nffw.h"
  53. #include "nfpcore/nfp_nsp.h"
  54. #include "nfpcore/nfp6000_pcie.h"
  55. #include "nfp_app.h"
  56. #include "nfp_net_ctrl.h"
  57. #include "nfp_net_sriov.h"
  58. #include "nfp_net.h"
  59. #include "nfp_main.h"
  60. #include "nfp_port.h"
  61. #define NFP_PF_CSR_SLICE_SIZE (32 * 1024)
  62. /**
  63. * nfp_net_get_mac_addr() - Get the MAC address.
  64. * @pf: NFP PF handle
  65. * @netdev: net_device to set MAC address on
  66. * @port: NFP port structure
  67. *
  68. * First try to get the MAC address from NSP ETH table. If that
  69. * fails generate a random address.
  70. */
  71. void
  72. nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
  73. struct nfp_port *port)
  74. {
  75. struct nfp_eth_table_port *eth_port;
  76. eth_port = __nfp_port_get_eth_port(port);
  77. if (!eth_port) {
  78. eth_hw_addr_random(netdev);
  79. return;
  80. }
  81. ether_addr_copy(netdev->dev_addr, eth_port->mac_addr);
  82. ether_addr_copy(netdev->perm_addr, eth_port->mac_addr);
  83. }
  84. static struct nfp_eth_table_port *
  85. nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int index)
  86. {
  87. int i;
  88. for (i = 0; eth_tbl && i < eth_tbl->count; i++)
  89. if (eth_tbl->ports[i].index == index)
  90. return &eth_tbl->ports[i];
  91. return NULL;
  92. }
  93. static int
  94. nfp_net_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format,
  95. unsigned int default_val)
  96. {
  97. char name[256];
  98. int err = 0;
  99. u64 val;
  100. snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp));
  101. val = nfp_rtsym_read_le(pf->rtbl, name, &err);
  102. if (err) {
  103. if (err == -ENOENT)
  104. return default_val;
  105. nfp_err(pf->cpp, "Unable to read symbol %s\n", name);
  106. return err;
  107. }
  108. return val;
  109. }
  110. static int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
  111. {
  112. return nfp_net_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1);
  113. }
  114. static int nfp_net_pf_get_app_id(struct nfp_pf *pf)
  115. {
  116. return nfp_net_pf_rtsym_read_optional(pf, "_pf%u_net_app_id",
  117. NFP_APP_CORE_NIC);
  118. }
  119. static u8 __iomem *
  120. nfp_net_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
  121. unsigned int min_size, struct nfp_cpp_area **area)
  122. {
  123. char pf_symbol[256];
  124. snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt,
  125. nfp_cppcore_pcie_unit(pf->cpp));
  126. return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area);
  127. }
  128. static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
  129. {
  130. if (nfp_net_is_data_vnic(nn))
  131. nfp_app_vnic_free(pf->app, nn);
  132. nfp_port_free(nn->port);
  133. list_del(&nn->vnic_list);
  134. pf->num_vnics--;
  135. nfp_net_free(nn);
  136. }
  137. static void nfp_net_pf_free_vnics(struct nfp_pf *pf)
  138. {
  139. struct nfp_net *nn, *next;
  140. list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list)
  141. if (nfp_net_is_data_vnic(nn))
  142. nfp_net_pf_free_vnic(pf, nn);
  143. }
  144. static struct nfp_net *
  145. nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
  146. void __iomem *ctrl_bar, void __iomem *qc_bar,
  147. int stride, unsigned int id)
  148. {
  149. u32 tx_base, rx_base, n_tx_rings, n_rx_rings;
  150. struct nfp_net *nn;
  151. int err;
  152. tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
  153. rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
  154. n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
  155. n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
  156. /* Allocate and initialise the vNIC */
  157. nn = nfp_net_alloc(pf->pdev, needs_netdev, n_tx_rings, n_rx_rings);
  158. if (IS_ERR(nn))
  159. return nn;
  160. nn->app = pf->app;
  161. nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
  162. nn->dp.ctrl_bar = ctrl_bar;
  163. nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
  164. nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
  165. nn->dp.is_vf = 0;
  166. nn->stride_rx = stride;
  167. nn->stride_tx = stride;
  168. if (needs_netdev) {
  169. err = nfp_app_vnic_alloc(pf->app, nn, id);
  170. if (err) {
  171. nfp_net_free(nn);
  172. return ERR_PTR(err);
  173. }
  174. }
  175. pf->num_vnics++;
  176. list_add_tail(&nn->vnic_list, &pf->vnics);
  177. return nn;
  178. }
  179. static int
  180. nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
  181. {
  182. int err;
  183. err = nfp_net_init(nn);
  184. if (err)
  185. return err;
  186. nfp_net_debugfs_vnic_add(nn, pf->ddir, id);
  187. if (nn->port) {
  188. err = nfp_devlink_port_register(pf->app, nn->port);
  189. if (err)
  190. goto err_dfs_clean;
  191. }
  192. nfp_net_info(nn);
  193. if (nfp_net_is_data_vnic(nn)) {
  194. err = nfp_app_vnic_init(pf->app, nn);
  195. if (err)
  196. goto err_devlink_port_clean;
  197. }
  198. return 0;
  199. err_devlink_port_clean:
  200. if (nn->port)
  201. nfp_devlink_port_unregister(nn->port);
  202. err_dfs_clean:
  203. nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
  204. nfp_net_clean(nn);
  205. return err;
  206. }
  207. static int
  208. nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
  209. void __iomem *qc_bar, int stride)
  210. {
  211. struct nfp_net *nn;
  212. unsigned int i;
  213. int err;
  214. for (i = 0; i < pf->max_data_vnics; i++) {
  215. nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar,
  216. stride, i);
  217. if (IS_ERR(nn)) {
  218. err = PTR_ERR(nn);
  219. goto err_free_prev;
  220. }
  221. ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
  222. /* Kill the vNIC if app init marked it as invalid */
  223. if (nn->port && nn->port->type == NFP_PORT_INVALID) {
  224. nfp_net_pf_free_vnic(pf, nn);
  225. continue;
  226. }
  227. }
  228. if (list_empty(&pf->vnics))
  229. return -ENODEV;
  230. return 0;
  231. err_free_prev:
  232. nfp_net_pf_free_vnics(pf);
  233. return err;
  234. }
  235. static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
  236. {
  237. if (nfp_net_is_data_vnic(nn))
  238. nfp_app_vnic_clean(pf->app, nn);
  239. if (nn->port)
  240. nfp_devlink_port_unregister(nn->port);
  241. nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
  242. nfp_net_clean(nn);
  243. }
  244. static int nfp_net_pf_alloc_irqs(struct nfp_pf *pf)
  245. {
  246. unsigned int wanted_irqs, num_irqs, vnics_left, irqs_left;
  247. struct nfp_net *nn;
  248. /* Get MSI-X vectors */
  249. wanted_irqs = 0;
  250. list_for_each_entry(nn, &pf->vnics, vnic_list)
  251. wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
  252. pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
  253. GFP_KERNEL);
  254. if (!pf->irq_entries)
  255. return -ENOMEM;
  256. num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
  257. NFP_NET_MIN_VNIC_IRQS * pf->num_vnics,
  258. wanted_irqs);
  259. if (!num_irqs) {
  260. nfp_warn(pf->cpp, "Unable to allocate MSI-X vectors\n");
  261. kfree(pf->irq_entries);
  262. return -ENOMEM;
  263. }
  264. /* Distribute IRQs to vNICs */
  265. irqs_left = num_irqs;
  266. vnics_left = pf->num_vnics;
  267. list_for_each_entry(nn, &pf->vnics, vnic_list) {
  268. unsigned int n;
  269. n = min(NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs,
  270. DIV_ROUND_UP(irqs_left, vnics_left));
  271. nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
  272. n);
  273. irqs_left -= n;
  274. vnics_left--;
  275. }
  276. return 0;
  277. }
  278. static void nfp_net_pf_free_irqs(struct nfp_pf *pf)
  279. {
  280. nfp_net_irqs_disable(pf->pdev);
  281. kfree(pf->irq_entries);
  282. }
  283. static int nfp_net_pf_init_vnics(struct nfp_pf *pf)
  284. {
  285. struct nfp_net *nn;
  286. unsigned int id;
  287. int err;
  288. /* Finish vNIC init and register */
  289. id = 0;
  290. list_for_each_entry(nn, &pf->vnics, vnic_list) {
  291. if (!nfp_net_is_data_vnic(nn))
  292. continue;
  293. err = nfp_net_pf_init_vnic(pf, nn, id);
  294. if (err)
  295. goto err_prev_deinit;
  296. id++;
  297. }
  298. return 0;
  299. err_prev_deinit:
  300. list_for_each_entry_continue_reverse(nn, &pf->vnics, vnic_list)
  301. if (nfp_net_is_data_vnic(nn))
  302. nfp_net_pf_clean_vnic(pf, nn);
  303. return err;
  304. }
  305. static int
  306. nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
  307. {
  308. u8 __iomem *ctrl_bar;
  309. int err;
  310. pf->app = nfp_app_alloc(pf, nfp_net_pf_get_app_id(pf));
  311. if (IS_ERR(pf->app))
  312. return PTR_ERR(pf->app);
  313. mutex_lock(&pf->lock);
  314. err = nfp_app_init(pf->app);
  315. mutex_unlock(&pf->lock);
  316. if (err)
  317. goto err_free;
  318. if (!nfp_app_needs_ctrl_vnic(pf->app))
  319. return 0;
  320. ctrl_bar = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar",
  321. NFP_PF_CSR_SLICE_SIZE,
  322. &pf->ctrl_vnic_bar);
  323. if (IS_ERR(ctrl_bar)) {
  324. nfp_err(pf->cpp, "Failed to find ctrl vNIC memory symbol\n");
  325. err = PTR_ERR(ctrl_bar);
  326. goto err_app_clean;
  327. }
  328. pf->ctrl_vnic = nfp_net_pf_alloc_vnic(pf, false, ctrl_bar, qc_bar,
  329. stride, 0);
  330. if (IS_ERR(pf->ctrl_vnic)) {
  331. err = PTR_ERR(pf->ctrl_vnic);
  332. goto err_unmap;
  333. }
  334. return 0;
  335. err_unmap:
  336. nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
  337. err_app_clean:
  338. mutex_lock(&pf->lock);
  339. nfp_app_clean(pf->app);
  340. mutex_unlock(&pf->lock);
  341. err_free:
  342. nfp_app_free(pf->app);
  343. pf->app = NULL;
  344. return err;
  345. }
  346. static void nfp_net_pf_app_clean(struct nfp_pf *pf)
  347. {
  348. if (pf->ctrl_vnic) {
  349. nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
  350. nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
  351. }
  352. mutex_lock(&pf->lock);
  353. nfp_app_clean(pf->app);
  354. mutex_unlock(&pf->lock);
  355. nfp_app_free(pf->app);
  356. pf->app = NULL;
  357. }
  358. static int nfp_net_pf_app_start_ctrl(struct nfp_pf *pf)
  359. {
  360. int err;
  361. if (!pf->ctrl_vnic)
  362. return 0;
  363. err = nfp_net_pf_init_vnic(pf, pf->ctrl_vnic, 0);
  364. if (err)
  365. return err;
  366. err = nfp_ctrl_open(pf->ctrl_vnic);
  367. if (err)
  368. goto err_clean_ctrl;
  369. return 0;
  370. err_clean_ctrl:
  371. nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
  372. return err;
  373. }
  374. static void nfp_net_pf_app_stop_ctrl(struct nfp_pf *pf)
  375. {
  376. if (!pf->ctrl_vnic)
  377. return;
  378. nfp_ctrl_close(pf->ctrl_vnic);
  379. nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
  380. }
  381. static int nfp_net_pf_app_start(struct nfp_pf *pf)
  382. {
  383. int err;
  384. err = nfp_net_pf_app_start_ctrl(pf);
  385. if (err)
  386. return err;
  387. err = nfp_app_start(pf->app, pf->ctrl_vnic);
  388. if (err)
  389. goto err_ctrl_stop;
  390. if (pf->num_vfs) {
  391. err = nfp_app_sriov_enable(pf->app, pf->num_vfs);
  392. if (err)
  393. goto err_app_stop;
  394. }
  395. return 0;
  396. err_app_stop:
  397. nfp_app_stop(pf->app);
  398. err_ctrl_stop:
  399. nfp_net_pf_app_stop_ctrl(pf);
  400. return err;
  401. }
  402. static void nfp_net_pf_app_stop(struct nfp_pf *pf)
  403. {
  404. if (pf->num_vfs)
  405. nfp_app_sriov_disable(pf->app);
  406. nfp_app_stop(pf->app);
  407. nfp_net_pf_app_stop_ctrl(pf);
  408. }
  409. static void nfp_net_pci_unmap_mem(struct nfp_pf *pf)
  410. {
  411. if (pf->vfcfg_tbl2_area)
  412. nfp_cpp_area_release_free(pf->vfcfg_tbl2_area);
  413. if (pf->vf_cfg_bar)
  414. nfp_cpp_area_release_free(pf->vf_cfg_bar);
  415. if (pf->mac_stats_bar)
  416. nfp_cpp_area_release_free(pf->mac_stats_bar);
  417. nfp_cpp_area_release_free(pf->qc_area);
  418. nfp_cpp_area_release_free(pf->data_vnic_bar);
  419. }
  420. static int nfp_net_pci_map_mem(struct nfp_pf *pf)
  421. {
  422. u8 __iomem *mem;
  423. u32 min_size;
  424. int err;
  425. min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
  426. mem = nfp_net_pf_map_rtsym(pf, "net.bar0", "_pf%d_net_bar0",
  427. min_size, &pf->data_vnic_bar);
  428. if (IS_ERR(mem)) {
  429. nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");
  430. return PTR_ERR(mem);
  431. }
  432. if (pf->eth_tbl) {
  433. min_size = NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1);
  434. pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats",
  435. "net.macstats", min_size,
  436. &pf->mac_stats_bar);
  437. if (IS_ERR(pf->mac_stats_mem)) {
  438. if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) {
  439. err = PTR_ERR(pf->mac_stats_mem);
  440. goto err_unmap_ctrl;
  441. }
  442. pf->mac_stats_mem = NULL;
  443. }
  444. }
  445. pf->vf_cfg_mem = nfp_net_pf_map_rtsym(pf, "net.vfcfg",
  446. "_pf%d_net_vf_bar",
  447. NFP_NET_CFG_BAR_SZ *
  448. pf->limit_vfs, &pf->vf_cfg_bar);
  449. if (IS_ERR(pf->vf_cfg_mem)) {
  450. if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) {
  451. err = PTR_ERR(pf->vf_cfg_mem);
  452. goto err_unmap_mac_stats;
  453. }
  454. pf->vf_cfg_mem = NULL;
  455. }
  456. min_size = NFP_NET_VF_CFG_SZ * pf->limit_vfs + NFP_NET_VF_CFG_MB_SZ;
  457. pf->vfcfg_tbl2 = nfp_net_pf_map_rtsym(pf, "net.vfcfg_tbl2",
  458. "_pf%d_net_vf_cfg2",
  459. min_size, &pf->vfcfg_tbl2_area);
  460. if (IS_ERR(pf->vfcfg_tbl2)) {
  461. if (PTR_ERR(pf->vfcfg_tbl2) != -ENOENT) {
  462. err = PTR_ERR(pf->vfcfg_tbl2);
  463. goto err_unmap_vf_cfg;
  464. }
  465. pf->vfcfg_tbl2 = NULL;
  466. }
  467. mem = nfp_cpp_map_area(pf->cpp, "net.qc", 0, 0,
  468. NFP_PCIE_QUEUE(0), NFP_QCP_QUEUE_AREA_SZ,
  469. &pf->qc_area);
  470. if (IS_ERR(mem)) {
  471. nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
  472. err = PTR_ERR(mem);
  473. goto err_unmap_vfcfg_tbl2;
  474. }
  475. return 0;
  476. err_unmap_vfcfg_tbl2:
  477. if (pf->vfcfg_tbl2_area)
  478. nfp_cpp_area_release_free(pf->vfcfg_tbl2_area);
  479. err_unmap_vf_cfg:
  480. if (pf->vf_cfg_bar)
  481. nfp_cpp_area_release_free(pf->vf_cfg_bar);
  482. err_unmap_mac_stats:
  483. if (pf->mac_stats_bar)
  484. nfp_cpp_area_release_free(pf->mac_stats_bar);
  485. err_unmap_ctrl:
  486. nfp_cpp_area_release_free(pf->data_vnic_bar);
  487. return err;
  488. }
  489. static int
  490. nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
  491. struct nfp_eth_table *eth_table)
  492. {
  493. struct nfp_eth_table_port *eth_port;
  494. ASSERT_RTNL();
  495. eth_port = nfp_net_find_port(eth_table, port->eth_id);
  496. if (!eth_port) {
  497. set_bit(NFP_PORT_CHANGED, &port->flags);
  498. nfp_warn(cpp, "Warning: port #%d not present after reconfig\n",
  499. port->eth_id);
  500. return -EIO;
  501. }
  502. if (eth_port->override_changed) {
  503. nfp_warn(cpp, "Port #%d config changed, unregistering. Driver reload required before port will be operational again.\n", port->eth_id);
  504. port->type = NFP_PORT_INVALID;
  505. }
  506. memcpy(port->eth_port, eth_port, sizeof(*eth_port));
  507. return 0;
  508. }
  509. int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
  510. {
  511. struct nfp_eth_table *eth_table;
  512. struct nfp_net *nn, *next;
  513. struct nfp_port *port;
  514. int err;
  515. lockdep_assert_held(&pf->lock);
  516. /* Check for nfp_net_pci_remove() racing against us */
  517. if (list_empty(&pf->vnics))
  518. return 0;
  519. /* Update state of all ports */
  520. rtnl_lock();
  521. list_for_each_entry(port, &pf->ports, port_list)
  522. clear_bit(NFP_PORT_CHANGED, &port->flags);
  523. eth_table = nfp_eth_read_ports(pf->cpp);
  524. if (!eth_table) {
  525. list_for_each_entry(port, &pf->ports, port_list)
  526. if (__nfp_port_get_eth_port(port))
  527. set_bit(NFP_PORT_CHANGED, &port->flags);
  528. rtnl_unlock();
  529. nfp_err(pf->cpp, "Error refreshing port config!\n");
  530. return -EIO;
  531. }
  532. list_for_each_entry(port, &pf->ports, port_list)
  533. if (__nfp_port_get_eth_port(port))
  534. nfp_net_eth_port_update(pf->cpp, port, eth_table);
  535. rtnl_unlock();
  536. kfree(eth_table);
  537. /* Resync repr state. This may cause reprs to be removed. */
  538. err = nfp_reprs_resync_phys_ports(pf->app);
  539. if (err)
  540. return err;
  541. /* Shoot off the ports which became invalid */
  542. list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
  543. if (!nn->port || nn->port->type != NFP_PORT_INVALID)
  544. continue;
  545. nfp_net_pf_clean_vnic(pf, nn);
  546. nfp_net_pf_free_vnic(pf, nn);
  547. }
  548. return 0;
  549. }
  550. static void nfp_net_refresh_vnics(struct work_struct *work)
  551. {
  552. struct nfp_pf *pf = container_of(work, struct nfp_pf,
  553. port_refresh_work);
  554. mutex_lock(&pf->lock);
  555. nfp_net_refresh_port_table_sync(pf);
  556. mutex_unlock(&pf->lock);
  557. }
  558. void nfp_net_refresh_port_table(struct nfp_port *port)
  559. {
  560. struct nfp_pf *pf = port->app->pf;
  561. set_bit(NFP_PORT_CHANGED, &port->flags);
  562. queue_work(pf->wq, &pf->port_refresh_work);
  563. }
  564. int nfp_net_refresh_eth_port(struct nfp_port *port)
  565. {
  566. struct nfp_cpp *cpp = port->app->cpp;
  567. struct nfp_eth_table *eth_table;
  568. int ret;
  569. clear_bit(NFP_PORT_CHANGED, &port->flags);
  570. eth_table = nfp_eth_read_ports(cpp);
  571. if (!eth_table) {
  572. set_bit(NFP_PORT_CHANGED, &port->flags);
  573. nfp_err(cpp, "Error refreshing port state table!\n");
  574. return -EIO;
  575. }
  576. ret = nfp_net_eth_port_update(cpp, port, eth_table);
  577. kfree(eth_table);
  578. return ret;
  579. }
  580. /*
  581. * PCI device functions
  582. */
  583. int nfp_net_pci_probe(struct nfp_pf *pf)
  584. {
  585. struct devlink *devlink = priv_to_devlink(pf);
  586. struct nfp_net_fw_version fw_ver;
  587. u8 __iomem *ctrl_bar, *qc_bar;
  588. int stride;
  589. int err;
  590. INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics);
  591. if (!pf->rtbl) {
  592. nfp_err(pf->cpp, "No %s, giving up.\n",
  593. pf->fw_loaded ? "symbol table" : "firmware found");
  594. return -EINVAL;
  595. }
  596. pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
  597. if ((int)pf->max_data_vnics < 0)
  598. return pf->max_data_vnics;
  599. err = nfp_net_pci_map_mem(pf);
  600. if (err)
  601. return err;
  602. ctrl_bar = nfp_cpp_area_iomem(pf->data_vnic_bar);
  603. qc_bar = nfp_cpp_area_iomem(pf->qc_area);
  604. if (!ctrl_bar || !qc_bar) {
  605. err = -EIO;
  606. goto err_unmap;
  607. }
  608. nfp_net_get_fw_version(&fw_ver, ctrl_bar);
  609. if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
  610. nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
  611. fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
  612. err = -EINVAL;
  613. goto err_unmap;
  614. }
  615. /* Determine stride */
  616. if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
  617. stride = 2;
  618. nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
  619. } else {
  620. switch (fw_ver.major) {
  621. case 1 ... 5:
  622. stride = 4;
  623. break;
  624. default:
  625. nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
  626. fw_ver.resv, fw_ver.class,
  627. fw_ver.major, fw_ver.minor);
  628. err = -EINVAL;
  629. goto err_unmap;
  630. }
  631. }
  632. err = nfp_net_pf_app_init(pf, qc_bar, stride);
  633. if (err)
  634. goto err_unmap;
  635. err = devlink_register(devlink, &pf->pdev->dev);
  636. if (err)
  637. goto err_app_clean;
  638. mutex_lock(&pf->lock);
  639. pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
  640. /* Allocate the vnics and do basic init */
  641. err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, qc_bar, stride);
  642. if (err)
  643. goto err_clean_ddir;
  644. err = nfp_net_pf_alloc_irqs(pf);
  645. if (err)
  646. goto err_free_vnics;
  647. err = nfp_net_pf_app_start(pf);
  648. if (err)
  649. goto err_free_irqs;
  650. err = nfp_net_pf_init_vnics(pf);
  651. if (err)
  652. goto err_stop_app;
  653. mutex_unlock(&pf->lock);
  654. return 0;
  655. err_stop_app:
  656. nfp_net_pf_app_stop(pf);
  657. err_free_irqs:
  658. nfp_net_pf_free_irqs(pf);
  659. err_free_vnics:
  660. nfp_net_pf_free_vnics(pf);
  661. err_clean_ddir:
  662. nfp_net_debugfs_dir_clean(&pf->ddir);
  663. mutex_unlock(&pf->lock);
  664. cancel_work_sync(&pf->port_refresh_work);
  665. devlink_unregister(devlink);
  666. err_app_clean:
  667. nfp_net_pf_app_clean(pf);
  668. err_unmap:
  669. nfp_net_pci_unmap_mem(pf);
  670. return err;
  671. }
  672. void nfp_net_pci_remove(struct nfp_pf *pf)
  673. {
  674. struct nfp_net *nn, *next;
  675. mutex_lock(&pf->lock);
  676. list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
  677. if (!nfp_net_is_data_vnic(nn))
  678. continue;
  679. nfp_net_pf_clean_vnic(pf, nn);
  680. nfp_net_pf_free_vnic(pf, nn);
  681. }
  682. nfp_net_pf_app_stop(pf);
  683. /* stop app first, to avoid double free of ctrl vNIC's ddir */
  684. nfp_net_debugfs_dir_clean(&pf->ddir);
  685. mutex_unlock(&pf->lock);
  686. devlink_unregister(priv_to_devlink(pf));
  687. nfp_net_pf_free_irqs(pf);
  688. nfp_net_pf_app_clean(pf);
  689. nfp_net_pci_unmap_mem(pf);
  690. cancel_work_sync(&pf->port_refresh_work);
  691. }