nfp_net_main.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586
  1. /*
  2. * Copyright (C) 2015-2017 Netronome Systems, Inc.
  3. *
  4. * This software is dual licensed under the GNU General License Version 2,
  5. * June 1991 as shown in the file COPYING in the top-level directory of this
  6. * source tree or the BSD 2-Clause License provided below. You have the
  7. * option to license this software under the complete terms of either license.
  8. *
  9. * The BSD 2-Clause License:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * 1. Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * 2. Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. /*
  34. * nfp_net_main.c
  35. * Netronome network device driver: Main entry point
  36. * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
  37. * Alejandro Lucero <alejandro.lucero@netronome.com>
  38. * Jason McMullan <jason.mcmullan@netronome.com>
  39. * Rolf Neugebauer <rolf.neugebauer@netronome.com>
  40. */
  41. #include <linux/etherdevice.h>
  42. #include <linux/kernel.h>
  43. #include <linux/init.h>
  44. #include <linux/pci.h>
  45. #include <linux/pci_regs.h>
  46. #include <linux/msi.h>
  47. #include <linux/random.h>
  48. #include "nfpcore/nfp.h"
  49. #include "nfpcore/nfp_cpp.h"
  50. #include "nfpcore/nfp_nffw.h"
  51. #include "nfpcore/nfp_nsp_eth.h"
  52. #include "nfpcore/nfp6000_pcie.h"
  53. #include "nfp_net_ctrl.h"
  54. #include "nfp_net.h"
  55. #include "nfp_main.h"
  56. #define NFP_PF_CSR_SLICE_SIZE (32 * 1024)
  57. static int nfp_is_ready(struct nfp_cpp *cpp)
  58. {
  59. const char *cp;
  60. long state;
  61. int err;
  62. cp = nfp_hwinfo_lookup(cpp, "board.state");
  63. if (!cp)
  64. return 0;
  65. err = kstrtol(cp, 0, &state);
  66. if (err < 0)
  67. return 0;
  68. return state == 15;
  69. }
  70. /**
  71. * nfp_net_map_area() - Help function to map an area
  72. * @cpp: NFP CPP handler
  73. * @name: Name for the area
  74. * @target: CPP target
  75. * @addr: CPP address
  76. * @size: Size of the area
  77. * @area: Area handle (returned).
  78. *
  79. * This function is primarily to simplify the code in the main probe
  80. * function. To undo the effect of this functions call
  81. * @nfp_cpp_area_release_free(*area);
  82. *
  83. * Return: Pointer to memory mapped area or ERR_PTR
  84. */
  85. static u8 __iomem *nfp_net_map_area(struct nfp_cpp *cpp,
  86. const char *name, int isl, int target,
  87. unsigned long long addr, unsigned long size,
  88. struct nfp_cpp_area **area)
  89. {
  90. u8 __iomem *res;
  91. u32 dest;
  92. int err;
  93. dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, isl);
  94. *area = nfp_cpp_area_alloc_with_name(cpp, dest, name, addr, size);
  95. if (!*area) {
  96. err = -EIO;
  97. goto err_area;
  98. }
  99. err = nfp_cpp_area_acquire(*area);
  100. if (err < 0)
  101. goto err_acquire;
  102. res = nfp_cpp_area_iomem(*area);
  103. if (!res) {
  104. err = -EIO;
  105. goto err_map;
  106. }
  107. return res;
  108. err_map:
  109. nfp_cpp_area_release(*area);
  110. err_acquire:
  111. nfp_cpp_area_free(*area);
  112. err_area:
  113. return (u8 __iomem *)ERR_PTR(err);
  114. }
  115. static void
  116. nfp_net_get_mac_addr_hwinfo(struct nfp_net *nn, struct nfp_cpp *cpp,
  117. unsigned int id)
  118. {
  119. u8 mac_addr[ETH_ALEN];
  120. const char *mac_str;
  121. char name[32];
  122. snprintf(name, sizeof(name), "eth%d.mac", id);
  123. mac_str = nfp_hwinfo_lookup(cpp, name);
  124. if (!mac_str) {
  125. dev_warn(&nn->pdev->dev,
  126. "Can't lookup MAC address. Generate\n");
  127. eth_hw_addr_random(nn->netdev);
  128. return;
  129. }
  130. if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
  131. &mac_addr[0], &mac_addr[1], &mac_addr[2],
  132. &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
  133. dev_warn(&nn->pdev->dev,
  134. "Can't parse MAC address (%s). Generate.\n", mac_str);
  135. eth_hw_addr_random(nn->netdev);
  136. return;
  137. }
  138. ether_addr_copy(nn->netdev->dev_addr, mac_addr);
  139. ether_addr_copy(nn->netdev->perm_addr, mac_addr);
  140. }
  141. /**
  142. * nfp_net_get_mac_addr() - Get the MAC address.
  143. * @nn: NFP Network structure
  144. * @pf: NFP PF device structure
  145. * @id: NFP port id
  146. *
  147. * First try to get the MAC address from NSP ETH table. If that
  148. * fails try HWInfo. As a last resort generate a random address.
  149. */
  150. static void
  151. nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_pf *pf, unsigned int id)
  152. {
  153. int i;
  154. for (i = 0; pf->eth_tbl && i < pf->eth_tbl->count; i++)
  155. if (pf->eth_tbl->ports[i].eth_index == id) {
  156. const u8 *mac_addr = pf->eth_tbl->ports[i].mac_addr;
  157. ether_addr_copy(nn->netdev->dev_addr, mac_addr);
  158. ether_addr_copy(nn->netdev->perm_addr, mac_addr);
  159. return;
  160. }
  161. nfp_net_get_mac_addr_hwinfo(nn, pf->cpp, id);
  162. }
  163. static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
  164. {
  165. char name[256];
  166. u16 interface;
  167. int pcie_pf;
  168. int err = 0;
  169. u64 val;
  170. interface = nfp_cpp_interface(pf->cpp);
  171. pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface);
  172. snprintf(name, sizeof(name), "nfd_cfg_pf%d_num_ports", pcie_pf);
  173. val = nfp_rtsym_read_le(pf->cpp, name, &err);
  174. /* Default to one port */
  175. if (err) {
  176. if (err != -ENOENT)
  177. nfp_err(pf->cpp, "Unable to read adapter port count\n");
  178. val = 1;
  179. }
  180. return val;
  181. }
  182. static unsigned int
  183. nfp_net_pf_total_qcs(struct nfp_pf *pf, void __iomem *ctrl_bar,
  184. unsigned int stride, u32 start_off, u32 num_off)
  185. {
  186. unsigned int i, min_qc, max_qc;
  187. min_qc = readl(ctrl_bar + start_off);
  188. max_qc = min_qc;
  189. for (i = 0; i < pf->num_ports; i++) {
  190. /* To make our lives simpler only accept configuration where
  191. * queues are allocated to PFs in order (queues of PFn all have
  192. * indexes lower than PFn+1).
  193. */
  194. if (max_qc > readl(ctrl_bar + start_off))
  195. return 0;
  196. max_qc = readl(ctrl_bar + start_off);
  197. max_qc += readl(ctrl_bar + num_off) * stride;
  198. ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
  199. }
  200. return max_qc - min_qc;
  201. }
  202. static u8 __iomem *nfp_net_pf_map_ctrl_bar(struct nfp_pf *pf)
  203. {
  204. const struct nfp_rtsym *ctrl_sym;
  205. u8 __iomem *ctrl_bar;
  206. char pf_symbol[256];
  207. u16 interface;
  208. int pcie_pf;
  209. interface = nfp_cpp_interface(pf->cpp);
  210. pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface);
  211. snprintf(pf_symbol, sizeof(pf_symbol), "_pf%d_net_bar0", pcie_pf);
  212. ctrl_sym = nfp_rtsym_lookup(pf->cpp, pf_symbol);
  213. if (!ctrl_sym) {
  214. dev_err(&pf->pdev->dev,
  215. "Failed to find PF BAR0 symbol %s\n", pf_symbol);
  216. return NULL;
  217. }
  218. if (ctrl_sym->size < pf->num_ports * NFP_PF_CSR_SLICE_SIZE) {
  219. dev_err(&pf->pdev->dev,
  220. "PF BAR0 too small to contain %d ports\n",
  221. pf->num_ports);
  222. return NULL;
  223. }
  224. ctrl_bar = nfp_net_map_area(pf->cpp, "net.ctrl",
  225. ctrl_sym->domain, ctrl_sym->target,
  226. ctrl_sym->addr, ctrl_sym->size,
  227. &pf->ctrl_area);
  228. if (IS_ERR(ctrl_bar)) {
  229. dev_err(&pf->pdev->dev, "Failed to map PF BAR0: %ld\n",
  230. PTR_ERR(ctrl_bar));
  231. return NULL;
  232. }
  233. return ctrl_bar;
  234. }
  235. static void nfp_net_pf_free_netdevs(struct nfp_pf *pf)
  236. {
  237. struct nfp_net *nn;
  238. while (!list_empty(&pf->ports)) {
  239. nn = list_first_entry(&pf->ports, struct nfp_net, port_list);
  240. list_del(&nn->port_list);
  241. nfp_net_netdev_free(nn);
  242. }
  243. }
  244. static struct nfp_net *
  245. nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
  246. void __iomem *tx_bar, void __iomem *rx_bar,
  247. int stride, struct nfp_net_fw_version *fw_ver)
  248. {
  249. u32 n_tx_rings, n_rx_rings;
  250. struct nfp_net *nn;
  251. n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
  252. n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
  253. /* Allocate and initialise the netdev */
  254. nn = nfp_net_netdev_alloc(pf->pdev, n_tx_rings, n_rx_rings);
  255. if (IS_ERR(nn))
  256. return nn;
  257. nn->cpp = pf->cpp;
  258. nn->fw_ver = *fw_ver;
  259. nn->ctrl_bar = ctrl_bar;
  260. nn->tx_bar = tx_bar;
  261. nn->rx_bar = rx_bar;
  262. nn->is_vf = 0;
  263. nn->stride_rx = stride;
  264. nn->stride_tx = stride;
  265. return nn;
  266. }
  267. static int
  268. nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
  269. unsigned int id)
  270. {
  271. int err;
  272. /* Get MAC address */
  273. nfp_net_get_mac_addr(nn, pf, id);
  274. /* Get ME clock frequency from ctrl BAR
  275. * XXX for now frequency is hardcoded until we figure out how
  276. * to get the value from nfp-hwinfo into ctrl bar
  277. */
  278. nn->me_freq_mhz = 1200;
  279. err = nfp_net_netdev_init(nn->netdev);
  280. if (err)
  281. return err;
  282. nfp_net_debugfs_port_add(nn, pf->ddir, id);
  283. nfp_net_info(nn);
  284. return 0;
  285. }
  286. static int
  287. nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar,
  288. void __iomem *tx_bar, void __iomem *rx_bar,
  289. int stride, struct nfp_net_fw_version *fw_ver)
  290. {
  291. u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base;
  292. struct nfp_net *nn;
  293. unsigned int i;
  294. int err;
  295. prev_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
  296. prev_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
  297. for (i = 0; i < pf->num_ports; i++) {
  298. tgt_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
  299. tgt_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
  300. tx_bar += (tgt_tx_base - prev_tx_base) * NFP_QCP_QUEUE_ADDR_SZ;
  301. rx_bar += (tgt_rx_base - prev_rx_base) * NFP_QCP_QUEUE_ADDR_SZ;
  302. prev_tx_base = tgt_tx_base;
  303. prev_rx_base = tgt_rx_base;
  304. nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar, rx_bar,
  305. stride, fw_ver);
  306. if (IS_ERR(nn)) {
  307. err = PTR_ERR(nn);
  308. goto err_free_prev;
  309. }
  310. list_add_tail(&nn->port_list, &pf->ports);
  311. ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
  312. }
  313. return 0;
  314. err_free_prev:
  315. nfp_net_pf_free_netdevs(pf);
  316. return err;
  317. }
  318. static int
  319. nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
  320. void __iomem *ctrl_bar, void __iomem *tx_bar,
  321. void __iomem *rx_bar, int stride,
  322. struct nfp_net_fw_version *fw_ver)
  323. {
  324. unsigned int id, wanted_irqs, num_irqs, ports_left, irqs_left;
  325. struct nfp_net *nn;
  326. int err;
  327. /* Allocate the netdevs and do basic init */
  328. err = nfp_net_pf_alloc_netdevs(pf, ctrl_bar, tx_bar, rx_bar,
  329. stride, fw_ver);
  330. if (err)
  331. return err;
  332. /* Get MSI-X vectors */
  333. wanted_irqs = 0;
  334. list_for_each_entry(nn, &pf->ports, port_list)
  335. wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->num_r_vecs;
  336. pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
  337. GFP_KERNEL);
  338. if (!pf->irq_entries) {
  339. err = -ENOMEM;
  340. goto err_nn_free;
  341. }
  342. num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
  343. NFP_NET_MIN_PORT_IRQS * pf->num_ports,
  344. wanted_irqs);
  345. if (!num_irqs) {
  346. nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
  347. err = -ENOMEM;
  348. goto err_vec_free;
  349. }
  350. /* Distribute IRQs to ports */
  351. irqs_left = num_irqs;
  352. ports_left = pf->num_ports;
  353. list_for_each_entry(nn, &pf->ports, port_list) {
  354. unsigned int n;
  355. n = DIV_ROUND_UP(irqs_left, ports_left);
  356. nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
  357. n);
  358. irqs_left -= n;
  359. ports_left--;
  360. }
  361. /* Finish netdev init and register */
  362. id = 0;
  363. list_for_each_entry(nn, &pf->ports, port_list) {
  364. err = nfp_net_pf_init_port_netdev(pf, nn, id);
  365. if (err)
  366. goto err_prev_deinit;
  367. id++;
  368. }
  369. return 0;
  370. err_prev_deinit:
  371. list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) {
  372. nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
  373. nfp_net_netdev_clean(nn->netdev);
  374. }
  375. nfp_net_irqs_disable(pf->pdev);
  376. err_vec_free:
  377. kfree(pf->irq_entries);
  378. err_nn_free:
  379. nfp_net_pf_free_netdevs(pf);
  380. return err;
  381. }
  382. /*
  383. * PCI device functions
  384. */
  385. int nfp_net_pci_probe(struct nfp_pf *pf)
  386. {
  387. u8 __iomem *ctrl_bar, *tx_bar, *rx_bar;
  388. u32 total_tx_qcs, total_rx_qcs;
  389. struct nfp_net_fw_version fw_ver;
  390. u32 tx_area_sz, rx_area_sz;
  391. u32 start_q;
  392. int stride;
  393. int err;
  394. /* Verify that the board has completed initialization */
  395. if (!nfp_is_ready(pf->cpp)) {
  396. nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n");
  397. return -EINVAL;
  398. }
  399. pf->num_ports = nfp_net_pf_get_num_ports(pf);
  400. ctrl_bar = nfp_net_pf_map_ctrl_bar(pf);
  401. if (!ctrl_bar)
  402. return pf->fw_loaded ? -EINVAL : -EPROBE_DEFER;
  403. nfp_net_get_fw_version(&fw_ver, ctrl_bar);
  404. if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
  405. nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
  406. fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
  407. err = -EINVAL;
  408. goto err_ctrl_unmap;
  409. }
  410. /* Determine stride */
  411. if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
  412. stride = 2;
  413. nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
  414. } else {
  415. switch (fw_ver.major) {
  416. case 1 ... 4:
  417. stride = 4;
  418. break;
  419. default:
  420. nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
  421. fw_ver.resv, fw_ver.class,
  422. fw_ver.major, fw_ver.minor);
  423. err = -EINVAL;
  424. goto err_ctrl_unmap;
  425. }
  426. }
  427. /* Find how many QC structs need to be mapped */
  428. total_tx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
  429. NFP_NET_CFG_START_TXQ,
  430. NFP_NET_CFG_MAX_TXRINGS);
  431. total_rx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
  432. NFP_NET_CFG_START_RXQ,
  433. NFP_NET_CFG_MAX_RXRINGS);
  434. if (!total_tx_qcs || !total_rx_qcs) {
  435. nfp_err(pf->cpp, "Invalid PF QC configuration [%d,%d]\n",
  436. total_tx_qcs, total_rx_qcs);
  437. err = -EINVAL;
  438. goto err_ctrl_unmap;
  439. }
  440. tx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_tx_qcs;
  441. rx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_rx_qcs;
  442. /* Map TX queues */
  443. start_q = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
  444. tx_bar = nfp_net_map_area(pf->cpp, "net.tx", 0, 0,
  445. NFP_PCIE_QUEUE(start_q),
  446. tx_area_sz, &pf->tx_area);
  447. if (IS_ERR(tx_bar)) {
  448. nfp_err(pf->cpp, "Failed to map TX area.\n");
  449. err = PTR_ERR(tx_bar);
  450. goto err_ctrl_unmap;
  451. }
  452. /* Map RX queues */
  453. start_q = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
  454. rx_bar = nfp_net_map_area(pf->cpp, "net.rx", 0, 0,
  455. NFP_PCIE_QUEUE(start_q),
  456. rx_area_sz, &pf->rx_area);
  457. if (IS_ERR(rx_bar)) {
  458. nfp_err(pf->cpp, "Failed to map RX area.\n");
  459. err = PTR_ERR(rx_bar);
  460. goto err_unmap_tx;
  461. }
  462. pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
  463. err = nfp_net_pf_spawn_netdevs(pf, ctrl_bar, tx_bar, rx_bar,
  464. stride, &fw_ver);
  465. if (err)
  466. goto err_clean_ddir;
  467. return 0;
  468. err_clean_ddir:
  469. nfp_net_debugfs_dir_clean(&pf->ddir);
  470. nfp_cpp_area_release_free(pf->rx_area);
  471. err_unmap_tx:
  472. nfp_cpp_area_release_free(pf->tx_area);
  473. err_ctrl_unmap:
  474. nfp_cpp_area_release_free(pf->ctrl_area);
  475. return err;
  476. }
  477. void nfp_net_pci_remove(struct nfp_pf *pf)
  478. {
  479. struct nfp_net *nn;
  480. list_for_each_entry(nn, &pf->ports, port_list) {
  481. nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
  482. nfp_net_netdev_clean(nn->netdev);
  483. }
  484. nfp_net_pf_free_netdevs(pf);
  485. nfp_net_debugfs_dir_clean(&pf->ddir);
  486. nfp_net_irqs_disable(pf->pdev);
  487. kfree(pf->irq_entries);
  488. nfp_cpp_area_release_free(pf->rx_area);
  489. nfp_cpp_area_release_free(pf->tx_area);
  490. nfp_cpp_area_release_free(pf->ctrl_area);
  491. }