nfp_netvf_main.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357
  1. /*
  2. * Copyright (C) 2015-2017 Netronome Systems, Inc.
  3. *
  4. * This software is dual licensed under the GNU General License Version 2,
  5. * June 1991 as shown in the file COPYING in the top-level directory of this
  6. * source tree or the BSD 2-Clause License provided below. You have the
  7. * option to license this software under the complete terms of either license.
  8. *
  9. * The BSD 2-Clause License:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * 1. Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * 2. Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. /*
  34. * nfp_netvf_main.c
  35. * Netronome virtual function network device driver: Main entry point
  36. * Author: Jason McMullan <jason.mcmullan@netronome.com>
  37. * Rolf Neugebauer <rolf.neugebauer@netronome.com>
  38. */
  39. #include <linux/module.h>
  40. #include <linux/kernel.h>
  41. #include <linux/init.h>
  42. #include <linux/etherdevice.h>
  43. #include "nfp_net_ctrl.h"
  44. #include "nfp_net.h"
  45. #include "nfp_main.h"
  46. /**
  47. * struct nfp_net_vf - NFP VF-specific device structure
  48. * @nn: NFP Net structure for this device
  49. * @irq_entries: Pre-allocated array of MSI-X entries
  50. * @q_bar: Pointer to mapped QC memory (NULL if TX/RX mapped directly)
  51. * @ddir: Per-device debugfs directory
  52. */
  53. struct nfp_net_vf {
  54. struct nfp_net *nn;
  55. struct msix_entry irq_entries[NFP_NET_NON_Q_VECTORS +
  56. NFP_NET_MAX_TX_RINGS];
  57. u8 __iomem *q_bar;
  58. struct dentry *ddir;
  59. };
  60. static const char nfp_net_driver_name[] = "nfp_netvf";
  61. #define PCI_DEVICE_NFP6000VF 0x6003
  62. static const struct pci_device_id nfp_netvf_pci_device_ids[] = {
  63. { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP6000VF,
  64. PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
  65. PCI_ANY_ID, 0,
  66. },
  67. { 0, } /* Required last entry. */
  68. };
  69. MODULE_DEVICE_TABLE(pci, nfp_netvf_pci_device_ids);
  70. static void nfp_netvf_get_mac_addr(struct nfp_net *nn)
  71. {
  72. u8 mac_addr[ETH_ALEN];
  73. put_unaligned_be32(nn_readl(nn, NFP_NET_CFG_MACADDR + 0), &mac_addr[0]);
  74. put_unaligned_be16(nn_readw(nn, NFP_NET_CFG_MACADDR + 6), &mac_addr[4]);
  75. if (!is_valid_ether_addr(mac_addr)) {
  76. eth_hw_addr_random(nn->dp.netdev);
  77. return;
  78. }
  79. ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
  80. ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
  81. }
  82. static int nfp_netvf_pci_probe(struct pci_dev *pdev,
  83. const struct pci_device_id *pci_id)
  84. {
  85. struct nfp_net_fw_version fw_ver;
  86. int max_tx_rings, max_rx_rings;
  87. u32 tx_bar_off, rx_bar_off;
  88. u32 tx_bar_sz, rx_bar_sz;
  89. int tx_bar_no, rx_bar_no;
  90. struct nfp_net_vf *vf;
  91. unsigned int num_irqs;
  92. u8 __iomem *ctrl_bar;
  93. struct nfp_net *nn;
  94. u32 startq;
  95. int stride;
  96. int err;
  97. vf = kzalloc(sizeof(*vf), GFP_KERNEL);
  98. if (!vf)
  99. return -ENOMEM;
  100. pci_set_drvdata(pdev, vf);
  101. err = pci_enable_device_mem(pdev);
  102. if (err)
  103. goto err_free_vf;
  104. err = pci_request_regions(pdev, nfp_net_driver_name);
  105. if (err) {
  106. dev_err(&pdev->dev, "Unable to allocate device memory.\n");
  107. goto err_pci_disable;
  108. }
  109. pci_set_master(pdev);
  110. err = dma_set_mask_and_coherent(&pdev->dev,
  111. DMA_BIT_MASK(NFP_NET_MAX_DMA_BITS));
  112. if (err)
  113. goto err_pci_regions;
  114. /* Map the Control BAR.
  115. *
  116. * Irrespective of the advertised BAR size we only map the
  117. * first NFP_NET_CFG_BAR_SZ of the BAR. This keeps the code
  118. * the identical for PF and VF drivers.
  119. */
  120. ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CTRL_BAR),
  121. NFP_NET_CFG_BAR_SZ);
  122. if (!ctrl_bar) {
  123. dev_err(&pdev->dev,
  124. "Failed to map resource %d\n", NFP_NET_CTRL_BAR);
  125. err = -EIO;
  126. goto err_pci_regions;
  127. }
  128. nfp_net_get_fw_version(&fw_ver, ctrl_bar);
  129. if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
  130. dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n",
  131. fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
  132. err = -EINVAL;
  133. goto err_ctrl_unmap;
  134. }
  135. /* Determine stride */
  136. if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
  137. stride = 2;
  138. tx_bar_no = NFP_NET_Q0_BAR;
  139. rx_bar_no = NFP_NET_Q1_BAR;
  140. dev_warn(&pdev->dev, "OBSOLETE Firmware detected - VF isolation not available\n");
  141. } else {
  142. switch (fw_ver.major) {
  143. case 1 ... 4:
  144. stride = 4;
  145. tx_bar_no = NFP_NET_Q0_BAR;
  146. rx_bar_no = tx_bar_no;
  147. break;
  148. default:
  149. dev_err(&pdev->dev, "Unsupported Firmware ABI %d.%d.%d.%d\n",
  150. fw_ver.resv, fw_ver.class,
  151. fw_ver.major, fw_ver.minor);
  152. err = -EINVAL;
  153. goto err_ctrl_unmap;
  154. }
  155. }
  156. /* Find out how many rings are supported */
  157. max_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
  158. max_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
  159. tx_bar_sz = NFP_QCP_QUEUE_ADDR_SZ * max_tx_rings * stride;
  160. rx_bar_sz = NFP_QCP_QUEUE_ADDR_SZ * max_rx_rings * stride;
  161. /* Sanity checks */
  162. if (tx_bar_sz > pci_resource_len(pdev, tx_bar_no)) {
  163. dev_err(&pdev->dev,
  164. "TX BAR too small for number of TX rings. Adjusting\n");
  165. tx_bar_sz = pci_resource_len(pdev, tx_bar_no);
  166. max_tx_rings = (tx_bar_sz / NFP_QCP_QUEUE_ADDR_SZ) / 2;
  167. }
  168. if (rx_bar_sz > pci_resource_len(pdev, rx_bar_no)) {
  169. dev_err(&pdev->dev,
  170. "RX BAR too small for number of RX rings. Adjusting\n");
  171. rx_bar_sz = pci_resource_len(pdev, rx_bar_no);
  172. max_rx_rings = (rx_bar_sz / NFP_QCP_QUEUE_ADDR_SZ) / 2;
  173. }
  174. startq = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
  175. tx_bar_off = NFP_PCIE_QUEUE(startq);
  176. startq = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
  177. rx_bar_off = NFP_PCIE_QUEUE(startq);
  178. /* Allocate and initialise the netdev */
  179. nn = nfp_net_netdev_alloc(pdev, max_tx_rings, max_rx_rings);
  180. if (IS_ERR(nn)) {
  181. err = PTR_ERR(nn);
  182. goto err_ctrl_unmap;
  183. }
  184. vf->nn = nn;
  185. nn->fw_ver = fw_ver;
  186. nn->dp.ctrl_bar = ctrl_bar;
  187. nn->dp.is_vf = 1;
  188. nn->stride_tx = stride;
  189. nn->stride_rx = stride;
  190. if (rx_bar_no == tx_bar_no) {
  191. u32 bar_off, bar_sz;
  192. resource_size_t map_addr;
  193. /* Make a single overlapping BAR mapping */
  194. if (tx_bar_off < rx_bar_off)
  195. bar_off = tx_bar_off;
  196. else
  197. bar_off = rx_bar_off;
  198. if ((tx_bar_off + tx_bar_sz) > (rx_bar_off + rx_bar_sz))
  199. bar_sz = (tx_bar_off + tx_bar_sz) - bar_off;
  200. else
  201. bar_sz = (rx_bar_off + rx_bar_sz) - bar_off;
  202. map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off;
  203. vf->q_bar = ioremap_nocache(map_addr, bar_sz);
  204. if (!vf->q_bar) {
  205. nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
  206. err = -EIO;
  207. goto err_netdev_free;
  208. }
  209. /* TX queues */
  210. nn->tx_bar = vf->q_bar + (tx_bar_off - bar_off);
  211. /* RX queues */
  212. nn->rx_bar = vf->q_bar + (rx_bar_off - bar_off);
  213. } else {
  214. resource_size_t map_addr;
  215. /* TX queues */
  216. map_addr = pci_resource_start(pdev, tx_bar_no) + tx_bar_off;
  217. nn->tx_bar = ioremap_nocache(map_addr, tx_bar_sz);
  218. if (!nn->tx_bar) {
  219. nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
  220. err = -EIO;
  221. goto err_netdev_free;
  222. }
  223. /* RX queues */
  224. map_addr = pci_resource_start(pdev, rx_bar_no) + rx_bar_off;
  225. nn->rx_bar = ioremap_nocache(map_addr, rx_bar_sz);
  226. if (!nn->rx_bar) {
  227. nn_err(nn, "Failed to map resource %d\n", rx_bar_no);
  228. err = -EIO;
  229. goto err_unmap_tx;
  230. }
  231. }
  232. nfp_netvf_get_mac_addr(nn);
  233. num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries,
  234. NFP_NET_MIN_PORT_IRQS,
  235. NFP_NET_NON_Q_VECTORS +
  236. nn->dp.num_r_vecs);
  237. if (!num_irqs) {
  238. nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
  239. err = -EIO;
  240. goto err_unmap_rx;
  241. }
  242. nfp_net_irqs_assign(nn, vf->irq_entries, num_irqs);
  243. /* Get ME clock frequency from ctrl BAR
  244. * XXX for now frequency is hardcoded until we figure out how
  245. * to get the value from nfp-hwinfo into ctrl bar
  246. */
  247. nn->me_freq_mhz = 1200;
  248. err = nfp_net_netdev_init(nn->dp.netdev);
  249. if (err)
  250. goto err_irqs_disable;
  251. nfp_net_info(nn);
  252. vf->ddir = nfp_net_debugfs_device_add(pdev);
  253. nfp_net_debugfs_port_add(nn, vf->ddir, 0);
  254. return 0;
  255. err_irqs_disable:
  256. nfp_net_irqs_disable(pdev);
  257. err_unmap_rx:
  258. if (!vf->q_bar)
  259. iounmap(nn->rx_bar);
  260. err_unmap_tx:
  261. if (!vf->q_bar)
  262. iounmap(nn->tx_bar);
  263. else
  264. iounmap(vf->q_bar);
  265. err_netdev_free:
  266. nfp_net_netdev_free(nn);
  267. err_ctrl_unmap:
  268. iounmap(ctrl_bar);
  269. err_pci_regions:
  270. pci_release_regions(pdev);
  271. err_pci_disable:
  272. pci_disable_device(pdev);
  273. err_free_vf:
  274. pci_set_drvdata(pdev, NULL);
  275. kfree(vf);
  276. return err;
  277. }
  278. static void nfp_netvf_pci_remove(struct pci_dev *pdev)
  279. {
  280. struct nfp_net_vf *vf = pci_get_drvdata(pdev);
  281. struct nfp_net *nn = vf->nn;
  282. /* Note, the order is slightly different from above as we need
  283. * to keep the nn pointer around till we have freed everything.
  284. */
  285. nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
  286. nfp_net_debugfs_dir_clean(&vf->ddir);
  287. nfp_net_netdev_clean(nn->dp.netdev);
  288. nfp_net_irqs_disable(pdev);
  289. if (!vf->q_bar) {
  290. iounmap(nn->rx_bar);
  291. iounmap(nn->tx_bar);
  292. } else {
  293. iounmap(vf->q_bar);
  294. }
  295. iounmap(nn->dp.ctrl_bar);
  296. nfp_net_netdev_free(nn);
  297. pci_release_regions(pdev);
  298. pci_disable_device(pdev);
  299. pci_set_drvdata(pdev, NULL);
  300. kfree(vf);
  301. }
  302. struct pci_driver nfp_netvf_pci_driver = {
  303. .name = nfp_net_driver_name,
  304. .id_table = nfp_netvf_pci_device_ids,
  305. .probe = nfp_netvf_pci_probe,
  306. .remove = nfp_netvf_pci_remove,
  307. };