nfp_net_main.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817
  1. /*
  2. * Copyright (C) 2015-2017 Netronome Systems, Inc.
  3. *
  4. * This software is dual licensed under the GNU General License Version 2,
  5. * June 1991 as shown in the file COPYING in the top-level directory of this
  6. * source tree or the BSD 2-Clause License provided below. You have the
  7. * option to license this software under the complete terms of either license.
  8. *
  9. * The BSD 2-Clause License:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * 1. Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * 2. Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. /*
  34. * nfp_net_main.c
  35. * Netronome network device driver: Main entry point
  36. * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
  37. * Alejandro Lucero <alejandro.lucero@netronome.com>
  38. * Jason McMullan <jason.mcmullan@netronome.com>
  39. * Rolf Neugebauer <rolf.neugebauer@netronome.com>
  40. */
  41. #include <linux/etherdevice.h>
  42. #include <linux/kernel.h>
  43. #include <linux/init.h>
  44. #include <linux/lockdep.h>
  45. #include <linux/pci.h>
  46. #include <linux/pci_regs.h>
  47. #include <linux/msi.h>
  48. #include <linux/random.h>
  49. #include <linux/rtnetlink.h>
  50. #include "nfpcore/nfp.h"
  51. #include "nfpcore/nfp_cpp.h"
  52. #include "nfpcore/nfp_nffw.h"
  53. #include "nfpcore/nfp_nsp.h"
  54. #include "nfpcore/nfp6000_pcie.h"
  55. #include "nfp_app.h"
  56. #include "nfp_net_ctrl.h"
  57. #include "nfp_net.h"
  58. #include "nfp_main.h"
  59. #include "nfp_port.h"
  60. #define NFP_PF_CSR_SLICE_SIZE (32 * 1024)
  61. static int nfp_is_ready(struct nfp_pf *pf)
  62. {
  63. const char *cp;
  64. long state;
  65. int err;
  66. cp = nfp_hwinfo_lookup(pf->hwinfo, "board.state");
  67. if (!cp)
  68. return 0;
  69. err = kstrtol(cp, 0, &state);
  70. if (err < 0)
  71. return 0;
  72. return state == 15;
  73. }
  74. /**
  75. * nfp_net_get_mac_addr() - Get the MAC address.
  76. * @pf: NFP PF handle
  77. * @port: NFP port structure
  78. *
  79. * First try to get the MAC address from NSP ETH table. If that
  80. * fails generate a random address.
  81. */
  82. void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port)
  83. {
  84. struct nfp_eth_table_port *eth_port;
  85. eth_port = __nfp_port_get_eth_port(port);
  86. if (!eth_port) {
  87. eth_hw_addr_random(port->netdev);
  88. return;
  89. }
  90. ether_addr_copy(port->netdev->dev_addr, eth_port->mac_addr);
  91. ether_addr_copy(port->netdev->perm_addr, eth_port->mac_addr);
  92. }
  93. static struct nfp_eth_table_port *
  94. nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int index)
  95. {
  96. int i;
  97. for (i = 0; eth_tbl && i < eth_tbl->count; i++)
  98. if (eth_tbl->ports[i].index == index)
  99. return &eth_tbl->ports[i];
  100. return NULL;
  101. }
  102. static int
  103. nfp_net_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format,
  104. unsigned int default_val)
  105. {
  106. char name[256];
  107. int err = 0;
  108. u64 val;
  109. snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp));
  110. val = nfp_rtsym_read_le(pf->rtbl, name, &err);
  111. if (err) {
  112. if (err == -ENOENT)
  113. return default_val;
  114. nfp_err(pf->cpp, "Unable to read symbol %s\n", name);
  115. return err;
  116. }
  117. return val;
  118. }
  119. static int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
  120. {
  121. return nfp_net_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1);
  122. }
  123. static int nfp_net_pf_get_app_id(struct nfp_pf *pf)
  124. {
  125. return nfp_net_pf_rtsym_read_optional(pf, "_pf%u_net_app_id",
  126. NFP_APP_CORE_NIC);
  127. }
  128. static u8 __iomem *
  129. nfp_net_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
  130. unsigned int min_size, struct nfp_cpp_area **area)
  131. {
  132. char pf_symbol[256];
  133. snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt,
  134. nfp_cppcore_pcie_unit(pf->cpp));
  135. return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area);
  136. }
  137. static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
  138. {
  139. nfp_port_free(nn->port);
  140. list_del(&nn->vnic_list);
  141. pf->num_vnics--;
  142. nfp_net_free(nn);
  143. }
  144. static void nfp_net_pf_free_vnics(struct nfp_pf *pf)
  145. {
  146. struct nfp_net *nn, *next;
  147. list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list)
  148. if (nfp_net_is_data_vnic(nn))
  149. nfp_net_pf_free_vnic(pf, nn);
  150. }
  151. static struct nfp_net *
  152. nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
  153. void __iomem *ctrl_bar, void __iomem *qc_bar,
  154. int stride, unsigned int id)
  155. {
  156. u32 tx_base, rx_base, n_tx_rings, n_rx_rings;
  157. struct nfp_net *nn;
  158. int err;
  159. tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
  160. rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
  161. n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
  162. n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
  163. /* Allocate and initialise the vNIC */
  164. nn = nfp_net_alloc(pf->pdev, needs_netdev, n_tx_rings, n_rx_rings);
  165. if (IS_ERR(nn))
  166. return nn;
  167. nn->app = pf->app;
  168. nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
  169. nn->dp.ctrl_bar = ctrl_bar;
  170. nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
  171. nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
  172. nn->dp.is_vf = 0;
  173. nn->stride_rx = stride;
  174. nn->stride_tx = stride;
  175. if (needs_netdev) {
  176. err = nfp_app_vnic_init(pf->app, nn, id);
  177. if (err) {
  178. nfp_net_free(nn);
  179. return ERR_PTR(err);
  180. }
  181. }
  182. pf->num_vnics++;
  183. list_add_tail(&nn->vnic_list, &pf->vnics);
  184. return nn;
  185. }
  186. static int
  187. nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
  188. {
  189. int err;
  190. /* Get ME clock frequency from ctrl BAR
  191. * XXX for now frequency is hardcoded until we figure out how
  192. * to get the value from nfp-hwinfo into ctrl bar
  193. */
  194. nn->me_freq_mhz = 1200;
  195. err = nfp_net_init(nn);
  196. if (err)
  197. return err;
  198. nfp_net_debugfs_vnic_add(nn, pf->ddir, id);
  199. if (nn->port) {
  200. err = nfp_devlink_port_register(pf->app, nn->port);
  201. if (err)
  202. goto err_dfs_clean;
  203. }
  204. nfp_net_info(nn);
  205. return 0;
  206. err_dfs_clean:
  207. nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
  208. nfp_net_clean(nn);
  209. return err;
  210. }
  211. static int
  212. nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
  213. void __iomem *qc_bar, int stride)
  214. {
  215. struct nfp_net *nn;
  216. unsigned int i;
  217. int err;
  218. for (i = 0; i < pf->max_data_vnics; i++) {
  219. nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar,
  220. stride, i);
  221. if (IS_ERR(nn)) {
  222. err = PTR_ERR(nn);
  223. goto err_free_prev;
  224. }
  225. ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
  226. /* Kill the vNIC if app init marked it as invalid */
  227. if (nn->port && nn->port->type == NFP_PORT_INVALID) {
  228. nfp_net_pf_free_vnic(pf, nn);
  229. continue;
  230. }
  231. }
  232. if (list_empty(&pf->vnics))
  233. return -ENODEV;
  234. return 0;
  235. err_free_prev:
  236. nfp_net_pf_free_vnics(pf);
  237. return err;
  238. }
  239. static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
  240. {
  241. if (nn->port)
  242. nfp_devlink_port_unregister(nn->port);
  243. nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
  244. nfp_net_clean(nn);
  245. nfp_app_vnic_clean(pf->app, nn);
  246. }
  247. static int nfp_net_pf_alloc_irqs(struct nfp_pf *pf)
  248. {
  249. unsigned int wanted_irqs, num_irqs, vnics_left, irqs_left;
  250. struct nfp_net *nn;
  251. /* Get MSI-X vectors */
  252. wanted_irqs = 0;
  253. list_for_each_entry(nn, &pf->vnics, vnic_list)
  254. wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
  255. pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
  256. GFP_KERNEL);
  257. if (!pf->irq_entries)
  258. return -ENOMEM;
  259. num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
  260. NFP_NET_MIN_VNIC_IRQS * pf->num_vnics,
  261. wanted_irqs);
  262. if (!num_irqs) {
  263. nfp_warn(pf->cpp, "Unable to allocate MSI-X vectors\n");
  264. kfree(pf->irq_entries);
  265. return -ENOMEM;
  266. }
  267. /* Distribute IRQs to vNICs */
  268. irqs_left = num_irqs;
  269. vnics_left = pf->num_vnics;
  270. list_for_each_entry(nn, &pf->vnics, vnic_list) {
  271. unsigned int n;
  272. n = min(NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs,
  273. DIV_ROUND_UP(irqs_left, vnics_left));
  274. nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
  275. n);
  276. irqs_left -= n;
  277. vnics_left--;
  278. }
  279. return 0;
  280. }
  281. static void nfp_net_pf_free_irqs(struct nfp_pf *pf)
  282. {
  283. nfp_net_irqs_disable(pf->pdev);
  284. kfree(pf->irq_entries);
  285. }
  286. static int nfp_net_pf_init_vnics(struct nfp_pf *pf)
  287. {
  288. struct nfp_net *nn;
  289. unsigned int id;
  290. int err;
  291. /* Finish vNIC init and register */
  292. id = 0;
  293. list_for_each_entry(nn, &pf->vnics, vnic_list) {
  294. if (!nfp_net_is_data_vnic(nn))
  295. continue;
  296. err = nfp_net_pf_init_vnic(pf, nn, id);
  297. if (err)
  298. goto err_prev_deinit;
  299. id++;
  300. }
  301. return 0;
  302. err_prev_deinit:
  303. list_for_each_entry_continue_reverse(nn, &pf->vnics, vnic_list)
  304. if (nfp_net_is_data_vnic(nn))
  305. nfp_net_pf_clean_vnic(pf, nn);
  306. return err;
  307. }
  308. static int
  309. nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
  310. {
  311. u8 __iomem *ctrl_bar;
  312. int err;
  313. pf->app = nfp_app_alloc(pf, nfp_net_pf_get_app_id(pf));
  314. if (IS_ERR(pf->app))
  315. return PTR_ERR(pf->app);
  316. err = nfp_app_init(pf->app);
  317. if (err)
  318. goto err_free;
  319. if (!nfp_app_needs_ctrl_vnic(pf->app))
  320. return 0;
  321. ctrl_bar = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar",
  322. NFP_PF_CSR_SLICE_SIZE,
  323. &pf->ctrl_vnic_bar);
  324. if (IS_ERR(ctrl_bar)) {
  325. nfp_err(pf->cpp, "Failed to find ctrl vNIC memory symbol\n");
  326. err = PTR_ERR(ctrl_bar);
  327. goto err_app_clean;
  328. }
  329. pf->ctrl_vnic = nfp_net_pf_alloc_vnic(pf, false, ctrl_bar, qc_bar,
  330. stride, 0);
  331. if (IS_ERR(pf->ctrl_vnic)) {
  332. err = PTR_ERR(pf->ctrl_vnic);
  333. goto err_unmap;
  334. }
  335. return 0;
  336. err_unmap:
  337. nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
  338. err_app_clean:
  339. nfp_app_clean(pf->app);
  340. err_free:
  341. nfp_app_free(pf->app);
  342. pf->app = NULL;
  343. return err;
  344. }
  345. static void nfp_net_pf_app_clean(struct nfp_pf *pf)
  346. {
  347. if (pf->ctrl_vnic) {
  348. nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
  349. nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
  350. }
  351. nfp_app_clean(pf->app);
  352. nfp_app_free(pf->app);
  353. pf->app = NULL;
  354. }
  355. static int nfp_net_pf_app_start_ctrl(struct nfp_pf *pf)
  356. {
  357. int err;
  358. if (!pf->ctrl_vnic)
  359. return 0;
  360. err = nfp_net_pf_init_vnic(pf, pf->ctrl_vnic, 0);
  361. if (err)
  362. return err;
  363. err = nfp_ctrl_open(pf->ctrl_vnic);
  364. if (err)
  365. goto err_clean_ctrl;
  366. return 0;
  367. err_clean_ctrl:
  368. nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
  369. return err;
  370. }
  371. static void nfp_net_pf_app_stop_ctrl(struct nfp_pf *pf)
  372. {
  373. if (!pf->ctrl_vnic)
  374. return;
  375. nfp_ctrl_close(pf->ctrl_vnic);
  376. nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
  377. }
  378. static int nfp_net_pf_app_start(struct nfp_pf *pf)
  379. {
  380. int err;
  381. err = nfp_net_pf_app_start_ctrl(pf);
  382. if (err)
  383. return err;
  384. err = nfp_app_start(pf->app, pf->ctrl_vnic);
  385. if (err)
  386. goto err_ctrl_stop;
  387. if (pf->num_vfs) {
  388. err = nfp_app_sriov_enable(pf->app, pf->num_vfs);
  389. if (err)
  390. goto err_app_stop;
  391. }
  392. return 0;
  393. err_app_stop:
  394. nfp_app_stop(pf->app);
  395. err_ctrl_stop:
  396. nfp_net_pf_app_stop_ctrl(pf);
  397. return err;
  398. }
  399. static void nfp_net_pf_app_stop(struct nfp_pf *pf)
  400. {
  401. if (pf->num_vfs)
  402. nfp_app_sriov_disable(pf->app);
  403. nfp_app_stop(pf->app);
  404. nfp_net_pf_app_stop_ctrl(pf);
  405. }
  406. static void nfp_net_pci_unmap_mem(struct nfp_pf *pf)
  407. {
  408. if (pf->vf_cfg_bar)
  409. nfp_cpp_area_release_free(pf->vf_cfg_bar);
  410. if (pf->mac_stats_bar)
  411. nfp_cpp_area_release_free(pf->mac_stats_bar);
  412. nfp_cpp_area_release_free(pf->qc_area);
  413. nfp_cpp_area_release_free(pf->data_vnic_bar);
  414. }
  415. static int nfp_net_pci_map_mem(struct nfp_pf *pf)
  416. {
  417. u8 __iomem *mem;
  418. u32 min_size;
  419. int err;
  420. min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
  421. mem = nfp_net_pf_map_rtsym(pf, "net.bar0", "_pf%d_net_bar0",
  422. min_size, &pf->data_vnic_bar);
  423. if (IS_ERR(mem)) {
  424. nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");
  425. return PTR_ERR(mem);
  426. }
  427. min_size = NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1);
  428. pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats",
  429. "net.macstats", min_size,
  430. &pf->mac_stats_bar);
  431. if (IS_ERR(pf->mac_stats_mem)) {
  432. if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) {
  433. err = PTR_ERR(pf->mac_stats_mem);
  434. goto err_unmap_ctrl;
  435. }
  436. pf->mac_stats_mem = NULL;
  437. }
  438. pf->vf_cfg_mem = nfp_net_pf_map_rtsym(pf, "net.vfcfg",
  439. "_pf%d_net_vf_bar",
  440. NFP_NET_CFG_BAR_SZ *
  441. pf->limit_vfs, &pf->vf_cfg_bar);
  442. if (IS_ERR(pf->vf_cfg_mem)) {
  443. if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) {
  444. err = PTR_ERR(pf->vf_cfg_mem);
  445. goto err_unmap_mac_stats;
  446. }
  447. pf->vf_cfg_mem = NULL;
  448. }
  449. mem = nfp_cpp_map_area(pf->cpp, "net.qc", 0, 0,
  450. NFP_PCIE_QUEUE(0), NFP_QCP_QUEUE_AREA_SZ,
  451. &pf->qc_area);
  452. if (IS_ERR(mem)) {
  453. nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
  454. err = PTR_ERR(mem);
  455. goto err_unmap_vf_cfg;
  456. }
  457. return 0;
  458. err_unmap_vf_cfg:
  459. if (pf->vf_cfg_bar)
  460. nfp_cpp_area_release_free(pf->vf_cfg_bar);
  461. err_unmap_mac_stats:
  462. if (pf->mac_stats_bar)
  463. nfp_cpp_area_release_free(pf->mac_stats_bar);
  464. err_unmap_ctrl:
  465. nfp_cpp_area_release_free(pf->data_vnic_bar);
  466. return err;
  467. }
  468. static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
  469. {
  470. nfp_net_pf_app_stop(pf);
  471. /* stop app first, to avoid double free of ctrl vNIC's ddir */
  472. nfp_net_debugfs_dir_clean(&pf->ddir);
  473. nfp_net_pf_free_irqs(pf);
  474. nfp_net_pf_app_clean(pf);
  475. nfp_net_pci_unmap_mem(pf);
  476. }
  477. static int
  478. nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
  479. struct nfp_eth_table *eth_table)
  480. {
  481. struct nfp_eth_table_port *eth_port;
  482. ASSERT_RTNL();
  483. eth_port = nfp_net_find_port(eth_table, port->eth_id);
  484. if (!eth_port) {
  485. set_bit(NFP_PORT_CHANGED, &port->flags);
  486. nfp_warn(cpp, "Warning: port #%d not present after reconfig\n",
  487. port->eth_id);
  488. return -EIO;
  489. }
  490. if (eth_port->override_changed) {
  491. nfp_warn(cpp, "Port #%d config changed, unregistering. Reboot required before port will be operational again.\n", port->eth_id);
  492. port->type = NFP_PORT_INVALID;
  493. }
  494. memcpy(port->eth_port, eth_port, sizeof(*eth_port));
  495. return 0;
  496. }
  497. int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
  498. {
  499. struct nfp_eth_table *eth_table;
  500. struct nfp_net *nn, *next;
  501. struct nfp_port *port;
  502. lockdep_assert_held(&pf->lock);
  503. /* Check for nfp_net_pci_remove() racing against us */
  504. if (list_empty(&pf->vnics))
  505. return 0;
  506. /* Update state of all ports */
  507. rtnl_lock();
  508. list_for_each_entry(port, &pf->ports, port_list)
  509. clear_bit(NFP_PORT_CHANGED, &port->flags);
  510. eth_table = nfp_eth_read_ports(pf->cpp);
  511. if (!eth_table) {
  512. list_for_each_entry(port, &pf->ports, port_list)
  513. if (__nfp_port_get_eth_port(port))
  514. set_bit(NFP_PORT_CHANGED, &port->flags);
  515. rtnl_unlock();
  516. nfp_err(pf->cpp, "Error refreshing port config!\n");
  517. return -EIO;
  518. }
  519. list_for_each_entry(port, &pf->ports, port_list)
  520. if (__nfp_port_get_eth_port(port))
  521. nfp_net_eth_port_update(pf->cpp, port, eth_table);
  522. rtnl_unlock();
  523. kfree(eth_table);
  524. /* Shoot off the ports which became invalid */
  525. list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
  526. if (!nn->port || nn->port->type != NFP_PORT_INVALID)
  527. continue;
  528. nfp_net_pf_clean_vnic(pf, nn);
  529. nfp_net_pf_free_vnic(pf, nn);
  530. }
  531. if (list_empty(&pf->vnics))
  532. nfp_net_pci_remove_finish(pf);
  533. return 0;
  534. }
  535. static void nfp_net_refresh_vnics(struct work_struct *work)
  536. {
  537. struct nfp_pf *pf = container_of(work, struct nfp_pf,
  538. port_refresh_work);
  539. mutex_lock(&pf->lock);
  540. nfp_net_refresh_port_table_sync(pf);
  541. mutex_unlock(&pf->lock);
  542. }
  543. void nfp_net_refresh_port_table(struct nfp_port *port)
  544. {
  545. struct nfp_pf *pf = port->app->pf;
  546. set_bit(NFP_PORT_CHANGED, &port->flags);
  547. queue_work(pf->wq, &pf->port_refresh_work);
  548. }
  549. int nfp_net_refresh_eth_port(struct nfp_port *port)
  550. {
  551. struct nfp_cpp *cpp = port->app->cpp;
  552. struct nfp_eth_table *eth_table;
  553. int ret;
  554. clear_bit(NFP_PORT_CHANGED, &port->flags);
  555. eth_table = nfp_eth_read_ports(cpp);
  556. if (!eth_table) {
  557. set_bit(NFP_PORT_CHANGED, &port->flags);
  558. nfp_err(cpp, "Error refreshing port state table!\n");
  559. return -EIO;
  560. }
  561. ret = nfp_net_eth_port_update(cpp, port, eth_table);
  562. kfree(eth_table);
  563. return ret;
  564. }
  565. /*
  566. * PCI device functions
  567. */
  568. int nfp_net_pci_probe(struct nfp_pf *pf)
  569. {
  570. struct nfp_net_fw_version fw_ver;
  571. u8 __iomem *ctrl_bar, *qc_bar;
  572. int stride;
  573. int err;
  574. INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics);
  575. /* Verify that the board has completed initialization */
  576. if (!nfp_is_ready(pf)) {
  577. nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n");
  578. return -EINVAL;
  579. }
  580. if (!pf->rtbl) {
  581. nfp_err(pf->cpp, "No %s, giving up.\n",
  582. pf->fw_loaded ? "symbol table" : "firmware found");
  583. return -EINVAL;
  584. }
  585. mutex_lock(&pf->lock);
  586. pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
  587. if ((int)pf->max_data_vnics < 0) {
  588. err = pf->max_data_vnics;
  589. goto err_unlock;
  590. }
  591. err = nfp_net_pci_map_mem(pf);
  592. if (err)
  593. goto err_unlock;
  594. ctrl_bar = nfp_cpp_area_iomem(pf->data_vnic_bar);
  595. qc_bar = nfp_cpp_area_iomem(pf->qc_area);
  596. if (!ctrl_bar || !qc_bar) {
  597. err = -EIO;
  598. goto err_unmap;
  599. }
  600. nfp_net_get_fw_version(&fw_ver, ctrl_bar);
  601. if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
  602. nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
  603. fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
  604. err = -EINVAL;
  605. goto err_unmap;
  606. }
  607. /* Determine stride */
  608. if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
  609. stride = 2;
  610. nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
  611. } else {
  612. switch (fw_ver.major) {
  613. case 1 ... 5:
  614. stride = 4;
  615. break;
  616. default:
  617. nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
  618. fw_ver.resv, fw_ver.class,
  619. fw_ver.major, fw_ver.minor);
  620. err = -EINVAL;
  621. goto err_unmap;
  622. }
  623. }
  624. err = nfp_net_pf_app_init(pf, qc_bar, stride);
  625. if (err)
  626. goto err_unmap;
  627. pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
  628. /* Allocate the vnics and do basic init */
  629. err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, qc_bar, stride);
  630. if (err)
  631. goto err_clean_ddir;
  632. err = nfp_net_pf_alloc_irqs(pf);
  633. if (err)
  634. goto err_free_vnics;
  635. err = nfp_net_pf_app_start(pf);
  636. if (err)
  637. goto err_free_irqs;
  638. err = nfp_net_pf_init_vnics(pf);
  639. if (err)
  640. goto err_stop_app;
  641. mutex_unlock(&pf->lock);
  642. return 0;
  643. err_stop_app:
  644. nfp_net_pf_app_stop(pf);
  645. err_free_irqs:
  646. nfp_net_pf_free_irqs(pf);
  647. err_free_vnics:
  648. nfp_net_pf_free_vnics(pf);
  649. err_clean_ddir:
  650. nfp_net_debugfs_dir_clean(&pf->ddir);
  651. nfp_net_pf_app_clean(pf);
  652. err_unmap:
  653. nfp_net_pci_unmap_mem(pf);
  654. err_unlock:
  655. mutex_unlock(&pf->lock);
  656. cancel_work_sync(&pf->port_refresh_work);
  657. return err;
  658. }
  659. void nfp_net_pci_remove(struct nfp_pf *pf)
  660. {
  661. struct nfp_net *nn;
  662. mutex_lock(&pf->lock);
  663. if (list_empty(&pf->vnics))
  664. goto out;
  665. list_for_each_entry(nn, &pf->vnics, vnic_list)
  666. if (nfp_net_is_data_vnic(nn))
  667. nfp_net_pf_clean_vnic(pf, nn);
  668. nfp_net_pf_free_vnics(pf);
  669. nfp_net_pci_remove_finish(pf);
  670. out:
  671. mutex_unlock(&pf->lock);
  672. cancel_work_sync(&pf->port_refresh_work);
  673. }