nfp_netvf_main.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353
  1. /*
  2. * Copyright (C) 2015 Netronome Systems, Inc.
  3. *
  4. * This software is dual licensed under the GNU General License Version 2,
  5. * June 1991 as shown in the file COPYING in the top-level directory of this
  6. * source tree or the BSD 2-Clause License provided below. You have the
  7. * option to license this software under the complete terms of either license.
  8. *
  9. * The BSD 2-Clause License:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * 1. Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * 2. Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. /*
  34. * nfp_netvf_main.c
  35. * Netronome virtual function network device driver: Main entry point
  36. * Author: Jason McMullan <jason.mcmullan@netronome.com>
  37. * Rolf Neugebauer <rolf.neugebauer@netronome.com>
  38. */
  39. #include <linux/module.h>
  40. #include <linux/kernel.h>
  41. #include <linux/init.h>
  42. #include <linux/etherdevice.h>
  43. #include "nfp_net_ctrl.h"
  44. #include "nfp_net.h"
  45. const char nfp_net_driver_name[] = "nfp_netvf";
  46. const char nfp_net_driver_version[] = "0.1";
  47. #define PCI_DEVICE_NFP6000VF 0x6003
  48. static const struct pci_device_id nfp_netvf_pci_device_ids[] = {
  49. { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP6000VF,
  50. PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
  51. PCI_ANY_ID, 0,
  52. },
  53. { 0, } /* Required last entry. */
  54. };
  55. MODULE_DEVICE_TABLE(pci, nfp_netvf_pci_device_ids);
  56. static void nfp_netvf_get_mac_addr(struct nfp_net *nn)
  57. {
  58. u8 mac_addr[ETH_ALEN];
  59. put_unaligned_be32(nn_readl(nn, NFP_NET_CFG_MACADDR + 0), &mac_addr[0]);
  60. put_unaligned_be16(nn_readw(nn, NFP_NET_CFG_MACADDR + 6), &mac_addr[4]);
  61. if (!is_valid_ether_addr(mac_addr)) {
  62. eth_hw_addr_random(nn->netdev);
  63. return;
  64. }
  65. ether_addr_copy(nn->netdev->dev_addr, mac_addr);
  66. ether_addr_copy(nn->netdev->perm_addr, mac_addr);
  67. }
  68. static int nfp_netvf_pci_probe(struct pci_dev *pdev,
  69. const struct pci_device_id *pci_id)
  70. {
  71. struct nfp_net_fw_version fw_ver;
  72. int max_tx_rings, max_rx_rings;
  73. u32 tx_bar_off, rx_bar_off;
  74. u32 tx_bar_sz, rx_bar_sz;
  75. int tx_bar_no, rx_bar_no;
  76. u8 __iomem *ctrl_bar;
  77. struct nfp_net *nn;
  78. u32 startq;
  79. int stride;
  80. int err;
  81. err = pci_enable_device_mem(pdev);
  82. if (err)
  83. return err;
  84. err = pci_request_regions(pdev, nfp_net_driver_name);
  85. if (err) {
  86. dev_err(&pdev->dev, "Unable to allocate device memory.\n");
  87. goto err_pci_disable;
  88. }
  89. pci_set_master(pdev);
  90. err = dma_set_mask_and_coherent(&pdev->dev,
  91. DMA_BIT_MASK(NFP_NET_MAX_DMA_BITS));
  92. if (err)
  93. goto err_pci_regions;
  94. /* Map the Control BAR.
  95. *
  96. * Irrespective of the advertised BAR size we only map the
  97. * first NFP_NET_CFG_BAR_SZ of the BAR. This keeps the code
  98. * the identical for PF and VF drivers.
  99. */
  100. ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CTRL_BAR),
  101. NFP_NET_CFG_BAR_SZ);
  102. if (!ctrl_bar) {
  103. dev_err(&pdev->dev,
  104. "Failed to map resource %d\n", NFP_NET_CTRL_BAR);
  105. err = -EIO;
  106. goto err_pci_regions;
  107. }
  108. nfp_net_get_fw_version(&fw_ver, ctrl_bar);
  109. if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
  110. dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n",
  111. fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
  112. err = -EINVAL;
  113. goto err_ctrl_unmap;
  114. }
  115. /* Determine stride */
  116. if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
  117. stride = 2;
  118. tx_bar_no = NFP_NET_Q0_BAR;
  119. rx_bar_no = NFP_NET_Q1_BAR;
  120. dev_warn(&pdev->dev, "OBSOLETE Firmware detected - VF isolation not available\n");
  121. } else {
  122. switch (fw_ver.major) {
  123. case 1 ... 4:
  124. stride = 4;
  125. tx_bar_no = NFP_NET_Q0_BAR;
  126. rx_bar_no = tx_bar_no;
  127. break;
  128. default:
  129. dev_err(&pdev->dev, "Unsupported Firmware ABI %d.%d.%d.%d\n",
  130. fw_ver.resv, fw_ver.class,
  131. fw_ver.major, fw_ver.minor);
  132. err = -EINVAL;
  133. goto err_ctrl_unmap;
  134. }
  135. }
  136. /* Find out how many rings are supported */
  137. max_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
  138. max_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
  139. tx_bar_sz = NFP_QCP_QUEUE_ADDR_SZ * max_tx_rings * stride;
  140. rx_bar_sz = NFP_QCP_QUEUE_ADDR_SZ * max_rx_rings * stride;
  141. /* Sanity checks */
  142. if (tx_bar_sz > pci_resource_len(pdev, tx_bar_no)) {
  143. dev_err(&pdev->dev,
  144. "TX BAR too small for number of TX rings. Adjusting\n");
  145. tx_bar_sz = pci_resource_len(pdev, tx_bar_no);
  146. max_tx_rings = (tx_bar_sz / NFP_QCP_QUEUE_ADDR_SZ) / 2;
  147. }
  148. if (rx_bar_sz > pci_resource_len(pdev, rx_bar_no)) {
  149. dev_err(&pdev->dev,
  150. "RX BAR too small for number of RX rings. Adjusting\n");
  151. rx_bar_sz = pci_resource_len(pdev, rx_bar_no);
  152. max_rx_rings = (rx_bar_sz / NFP_QCP_QUEUE_ADDR_SZ) / 2;
  153. }
  154. startq = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
  155. tx_bar_off = NFP_PCIE_QUEUE(startq);
  156. startq = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
  157. rx_bar_off = NFP_PCIE_QUEUE(startq);
  158. /* Allocate and initialise the netdev */
  159. nn = nfp_net_netdev_alloc(pdev, max_tx_rings, max_rx_rings);
  160. if (IS_ERR(nn)) {
  161. err = PTR_ERR(nn);
  162. goto err_ctrl_unmap;
  163. }
  164. nn->fw_ver = fw_ver;
  165. nn->ctrl_bar = ctrl_bar;
  166. nn->is_vf = 1;
  167. nn->stride_tx = stride;
  168. nn->stride_rx = stride;
  169. if (rx_bar_no == tx_bar_no) {
  170. u32 bar_off, bar_sz;
  171. resource_size_t map_addr;
  172. /* Make a single overlapping BAR mapping */
  173. if (tx_bar_off < rx_bar_off)
  174. bar_off = tx_bar_off;
  175. else
  176. bar_off = rx_bar_off;
  177. if ((tx_bar_off + tx_bar_sz) > (rx_bar_off + rx_bar_sz))
  178. bar_sz = (tx_bar_off + tx_bar_sz) - bar_off;
  179. else
  180. bar_sz = (rx_bar_off + rx_bar_sz) - bar_off;
  181. map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off;
  182. nn->q_bar = ioremap_nocache(map_addr, bar_sz);
  183. if (!nn->q_bar) {
  184. nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
  185. err = -EIO;
  186. goto err_netdev_free;
  187. }
  188. /* TX queues */
  189. nn->tx_bar = nn->q_bar + (tx_bar_off - bar_off);
  190. /* RX queues */
  191. nn->rx_bar = nn->q_bar + (rx_bar_off - bar_off);
  192. } else {
  193. resource_size_t map_addr;
  194. /* TX queues */
  195. map_addr = pci_resource_start(pdev, tx_bar_no) + tx_bar_off;
  196. nn->tx_bar = ioremap_nocache(map_addr, tx_bar_sz);
  197. if (!nn->tx_bar) {
  198. nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
  199. err = -EIO;
  200. goto err_netdev_free;
  201. }
  202. /* RX queues */
  203. map_addr = pci_resource_start(pdev, rx_bar_no) + rx_bar_off;
  204. nn->rx_bar = ioremap_nocache(map_addr, rx_bar_sz);
  205. if (!nn->rx_bar) {
  206. nn_err(nn, "Failed to map resource %d\n", rx_bar_no);
  207. err = -EIO;
  208. goto err_unmap_tx;
  209. }
  210. }
  211. nfp_netvf_get_mac_addr(nn);
  212. err = nfp_net_irqs_alloc(nn);
  213. if (!err) {
  214. nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
  215. err = -EIO;
  216. goto err_unmap_rx;
  217. }
  218. /* Get ME clock frequency from ctrl BAR
  219. * XXX for now frequency is hardcoded until we figure out how
  220. * to get the value from nfp-hwinfo into ctrl bar
  221. */
  222. nn->me_freq_mhz = 1200;
  223. err = nfp_net_netdev_init(nn->netdev);
  224. if (err)
  225. goto err_irqs_disable;
  226. pci_set_drvdata(pdev, nn);
  227. nfp_net_info(nn);
  228. nfp_net_debugfs_adapter_add(nn);
  229. return 0;
  230. err_irqs_disable:
  231. nfp_net_irqs_disable(nn);
  232. err_unmap_rx:
  233. if (!nn->q_bar)
  234. iounmap(nn->rx_bar);
  235. err_unmap_tx:
  236. if (!nn->q_bar)
  237. iounmap(nn->tx_bar);
  238. else
  239. iounmap(nn->q_bar);
  240. err_netdev_free:
  241. pci_set_drvdata(pdev, NULL);
  242. nfp_net_netdev_free(nn);
  243. err_ctrl_unmap:
  244. iounmap(ctrl_bar);
  245. err_pci_regions:
  246. pci_release_regions(pdev);
  247. err_pci_disable:
  248. pci_disable_device(pdev);
  249. return err;
  250. }
  251. static void nfp_netvf_pci_remove(struct pci_dev *pdev)
  252. {
  253. struct nfp_net *nn = pci_get_drvdata(pdev);
  254. /* Note, the order is slightly different from above as we need
  255. * to keep the nn pointer around till we have freed everything.
  256. */
  257. nfp_net_debugfs_adapter_del(nn);
  258. nfp_net_netdev_clean(nn->netdev);
  259. nfp_net_irqs_disable(nn);
  260. if (!nn->q_bar) {
  261. iounmap(nn->rx_bar);
  262. iounmap(nn->tx_bar);
  263. } else {
  264. iounmap(nn->q_bar);
  265. }
  266. iounmap(nn->ctrl_bar);
  267. pci_set_drvdata(pdev, NULL);
  268. nfp_net_netdev_free(nn);
  269. pci_release_regions(pdev);
  270. pci_disable_device(pdev);
  271. }
  272. static struct pci_driver nfp_netvf_pci_driver = {
  273. .name = nfp_net_driver_name,
  274. .id_table = nfp_netvf_pci_device_ids,
  275. .probe = nfp_netvf_pci_probe,
  276. .remove = nfp_netvf_pci_remove,
  277. };
  278. static int __init nfp_netvf_init(void)
  279. {
  280. int err;
  281. pr_info("%s: NFP VF Network driver, Copyright (C) 2014-2015 Netronome Systems\n",
  282. nfp_net_driver_name);
  283. nfp_net_debugfs_create();
  284. err = pci_register_driver(&nfp_netvf_pci_driver);
  285. if (err) {
  286. nfp_net_debugfs_destroy();
  287. return err;
  288. }
  289. return 0;
  290. }
  291. static void __exit nfp_netvf_exit(void)
  292. {
  293. pci_unregister_driver(&nfp_netvf_pci_driver);
  294. nfp_net_debugfs_destroy();
  295. }
  296. module_init(nfp_netvf_init);
  297. module_exit(nfp_netvf_exit);
  298. MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>");
  299. MODULE_LICENSE("GPL");
  300. MODULE_DESCRIPTION("NFP VF network device driver");