usnic_ib_main.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730
  1. /*
  2. * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. * Author: Upinder Malhi <umalhi@cisco.com>
  33. * Author: Anant Deepak <anadeepa@cisco.com>
  34. * Author: Cesare Cantu' <cantuc@cisco.com>
  35. * Author: Jeff Squyres <jsquyres@cisco.com>
  36. * Author: Kiran Thirumalai <kithirum@cisco.com>
  37. * Author: Xuyang Wang <xuywang@cisco.com>
  38. * Author: Reese Faucette <rfaucett@cisco.com>
  39. *
  40. */
  41. #include <linux/module.h>
  42. #include <linux/inetdevice.h>
  43. #include <linux/init.h>
  44. #include <linux/slab.h>
  45. #include <linux/errno.h>
  46. #include <linux/pci.h>
  47. #include <linux/netdevice.h>
  48. #include <rdma/ib_user_verbs.h>
  49. #include <rdma/ib_addr.h>
  50. #include "usnic_abi.h"
  51. #include "usnic_common_util.h"
  52. #include "usnic_ib.h"
  53. #include "usnic_ib_qp_grp.h"
  54. #include "usnic_log.h"
  55. #include "usnic_fwd.h"
  56. #include "usnic_debugfs.h"
  57. #include "usnic_ib_verbs.h"
  58. #include "usnic_transport.h"
  59. #include "usnic_uiom.h"
  60. #include "usnic_ib_sysfs.h"
  61. unsigned int usnic_log_lvl = USNIC_LOG_LVL_ERR;
  62. unsigned int usnic_ib_share_vf = 1;
  63. static const char usnic_version[] =
  64. DRV_NAME ": Cisco VIC (USNIC) Verbs Driver v"
  65. DRV_VERSION " (" DRV_RELDATE ")\n";
  66. static DEFINE_MUTEX(usnic_ib_ibdev_list_lock);
  67. static LIST_HEAD(usnic_ib_ibdev_list);
  68. /* Callback dump funcs */
  69. static int usnic_ib_dump_vf_hdr(void *obj, char *buf, int buf_sz)
  70. {
  71. struct usnic_ib_vf *vf = obj;
  72. return scnprintf(buf, buf_sz, "PF: %s ", vf->pf->ib_dev.name);
  73. }
  74. /* End callback dump funcs */
  75. static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz)
  76. {
  77. usnic_vnic_dump(vf->vnic, buf, buf_sz, vf,
  78. usnic_ib_dump_vf_hdr,
  79. usnic_ib_qp_grp_dump_hdr, usnic_ib_qp_grp_dump_rows);
  80. }
  81. void usnic_ib_log_vf(struct usnic_ib_vf *vf)
  82. {
  83. char buf[1000];
  84. usnic_ib_dump_vf(vf, buf, sizeof(buf));
  85. usnic_dbg("%s\n", buf);
  86. }
  87. /* Start of netdev section */
  88. static inline const char *usnic_ib_netdev_event_to_string(unsigned long event)
  89. {
  90. const char *event2str[] = {"NETDEV_NONE", "NETDEV_UP", "NETDEV_DOWN",
  91. "NETDEV_REBOOT", "NETDEV_CHANGE",
  92. "NETDEV_REGISTER", "NETDEV_UNREGISTER", "NETDEV_CHANGEMTU",
  93. "NETDEV_CHANGEADDR", "NETDEV_GOING_DOWN", "NETDEV_FEAT_CHANGE",
  94. "NETDEV_BONDING_FAILOVER", "NETDEV_PRE_UP",
  95. "NETDEV_PRE_TYPE_CHANGE", "NETDEV_POST_TYPE_CHANGE",
  96. "NETDEV_POST_INT", "NETDEV_UNREGISTER_FINAL", "NETDEV_RELEASE",
  97. "NETDEV_NOTIFY_PEERS", "NETDEV_JOIN"
  98. };
  99. if (event >= ARRAY_SIZE(event2str))
  100. return "UNKNOWN_NETDEV_EVENT";
  101. else
  102. return event2str[event];
  103. }
  104. static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev)
  105. {
  106. struct usnic_ib_ucontext *ctx;
  107. struct usnic_ib_qp_grp *qp_grp;
  108. enum ib_qp_state cur_state;
  109. int status;
  110. BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
  111. list_for_each_entry(ctx, &us_ibdev->ctx_list, link) {
  112. list_for_each_entry(qp_grp, &ctx->qp_grp_list, link) {
  113. cur_state = qp_grp->state;
  114. if (cur_state == IB_QPS_INIT ||
  115. cur_state == IB_QPS_RTR ||
  116. cur_state == IB_QPS_RTS) {
  117. status = usnic_ib_qp_grp_modify(qp_grp,
  118. IB_QPS_ERR,
  119. NULL);
  120. if (status) {
  121. usnic_err("Failed to transistion qp grp %u from %s to %s\n",
  122. qp_grp->grp_id,
  123. usnic_ib_qp_grp_state_to_string
  124. (cur_state),
  125. usnic_ib_qp_grp_state_to_string
  126. (IB_QPS_ERR));
  127. }
  128. }
  129. }
  130. }
  131. }
  132. static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
  133. unsigned long event)
  134. {
  135. struct net_device *netdev;
  136. struct ib_event ib_event;
  137. memset(&ib_event, 0, sizeof(ib_event));
  138. mutex_lock(&us_ibdev->usdev_lock);
  139. netdev = us_ibdev->netdev;
  140. switch (event) {
  141. case NETDEV_REBOOT:
  142. usnic_info("PF Reset on %s\n", us_ibdev->ib_dev.name);
  143. usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
  144. ib_event.event = IB_EVENT_PORT_ERR;
  145. ib_event.device = &us_ibdev->ib_dev;
  146. ib_event.element.port_num = 1;
  147. ib_dispatch_event(&ib_event);
  148. break;
  149. case NETDEV_UP:
  150. case NETDEV_DOWN:
  151. case NETDEV_CHANGE:
  152. if (!us_ibdev->ufdev->link_up &&
  153. netif_carrier_ok(netdev)) {
  154. usnic_fwd_carrier_up(us_ibdev->ufdev);
  155. usnic_info("Link UP on %s\n", us_ibdev->ib_dev.name);
  156. ib_event.event = IB_EVENT_PORT_ACTIVE;
  157. ib_event.device = &us_ibdev->ib_dev;
  158. ib_event.element.port_num = 1;
  159. ib_dispatch_event(&ib_event);
  160. } else if (us_ibdev->ufdev->link_up &&
  161. !netif_carrier_ok(netdev)) {
  162. usnic_fwd_carrier_down(us_ibdev->ufdev);
  163. usnic_info("Link DOWN on %s\n", us_ibdev->ib_dev.name);
  164. usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
  165. ib_event.event = IB_EVENT_PORT_ERR;
  166. ib_event.device = &us_ibdev->ib_dev;
  167. ib_event.element.port_num = 1;
  168. ib_dispatch_event(&ib_event);
  169. } else {
  170. usnic_dbg("Ignoring %s on %s\n",
  171. usnic_ib_netdev_event_to_string(event),
  172. us_ibdev->ib_dev.name);
  173. }
  174. break;
  175. case NETDEV_CHANGEADDR:
  176. if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr,
  177. sizeof(us_ibdev->ufdev->mac))) {
  178. usnic_dbg("Ignoring addr change on %s\n",
  179. us_ibdev->ib_dev.name);
  180. } else {
  181. usnic_info(" %s old mac: %pM new mac: %pM\n",
  182. us_ibdev->ib_dev.name,
  183. us_ibdev->ufdev->mac,
  184. netdev->dev_addr);
  185. usnic_fwd_set_mac(us_ibdev->ufdev, netdev->dev_addr);
  186. usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
  187. ib_event.event = IB_EVENT_GID_CHANGE;
  188. ib_event.device = &us_ibdev->ib_dev;
  189. ib_event.element.port_num = 1;
  190. ib_dispatch_event(&ib_event);
  191. }
  192. break;
  193. case NETDEV_CHANGEMTU:
  194. if (us_ibdev->ufdev->mtu != netdev->mtu) {
  195. usnic_info("MTU Change on %s old: %u new: %u\n",
  196. us_ibdev->ib_dev.name,
  197. us_ibdev->ufdev->mtu, netdev->mtu);
  198. usnic_fwd_set_mtu(us_ibdev->ufdev, netdev->mtu);
  199. usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
  200. } else {
  201. usnic_dbg("Ignoring MTU change on %s\n",
  202. us_ibdev->ib_dev.name);
  203. }
  204. break;
  205. default:
  206. usnic_dbg("Ignoring event %s on %s",
  207. usnic_ib_netdev_event_to_string(event),
  208. us_ibdev->ib_dev.name);
  209. }
  210. mutex_unlock(&us_ibdev->usdev_lock);
  211. }
  212. static int usnic_ib_netdevice_event(struct notifier_block *notifier,
  213. unsigned long event, void *ptr)
  214. {
  215. struct usnic_ib_dev *us_ibdev;
  216. struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
  217. mutex_lock(&usnic_ib_ibdev_list_lock);
  218. list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
  219. if (us_ibdev->netdev == netdev) {
  220. usnic_ib_handle_usdev_event(us_ibdev, event);
  221. break;
  222. }
  223. }
  224. mutex_unlock(&usnic_ib_ibdev_list_lock);
  225. return NOTIFY_DONE;
  226. }
  227. static struct notifier_block usnic_ib_netdevice_notifier = {
  228. .notifier_call = usnic_ib_netdevice_event
  229. };
  230. /* End of netdev section */
  231. /* Start of inet section */
  232. static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev,
  233. unsigned long event, void *ptr)
  234. {
  235. struct in_ifaddr *ifa = ptr;
  236. struct ib_event ib_event;
  237. mutex_lock(&us_ibdev->usdev_lock);
  238. switch (event) {
  239. case NETDEV_DOWN:
  240. usnic_info("%s via ip notifiers",
  241. usnic_ib_netdev_event_to_string(event));
  242. usnic_fwd_del_ipaddr(us_ibdev->ufdev);
  243. usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
  244. ib_event.event = IB_EVENT_GID_CHANGE;
  245. ib_event.device = &us_ibdev->ib_dev;
  246. ib_event.element.port_num = 1;
  247. ib_dispatch_event(&ib_event);
  248. break;
  249. case NETDEV_UP:
  250. usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address);
  251. usnic_info("%s via ip notifiers: ip %pI4",
  252. usnic_ib_netdev_event_to_string(event),
  253. &us_ibdev->ufdev->inaddr);
  254. ib_event.event = IB_EVENT_GID_CHANGE;
  255. ib_event.device = &us_ibdev->ib_dev;
  256. ib_event.element.port_num = 1;
  257. ib_dispatch_event(&ib_event);
  258. break;
  259. default:
  260. usnic_info("Ignoring event %s on %s",
  261. usnic_ib_netdev_event_to_string(event),
  262. us_ibdev->ib_dev.name);
  263. }
  264. mutex_unlock(&us_ibdev->usdev_lock);
  265. return NOTIFY_DONE;
  266. }
  267. static int usnic_ib_inetaddr_event(struct notifier_block *notifier,
  268. unsigned long event, void *ptr)
  269. {
  270. struct usnic_ib_dev *us_ibdev;
  271. struct in_ifaddr *ifa = ptr;
  272. struct net_device *netdev = ifa->ifa_dev->dev;
  273. mutex_lock(&usnic_ib_ibdev_list_lock);
  274. list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
  275. if (us_ibdev->netdev == netdev) {
  276. usnic_ib_handle_inet_event(us_ibdev, event, ptr);
  277. break;
  278. }
  279. }
  280. mutex_unlock(&usnic_ib_ibdev_list_lock);
  281. return NOTIFY_DONE;
  282. }
  283. static struct notifier_block usnic_ib_inetaddr_notifier = {
  284. .notifier_call = usnic_ib_inetaddr_event
  285. };
  286. /* End of inet section*/
  287. static int usnic_port_immutable(struct ib_device *ibdev, u8 port_num,
  288. struct ib_port_immutable *immutable)
  289. {
  290. struct ib_port_attr attr;
  291. int err;
  292. err = usnic_ib_query_port(ibdev, port_num, &attr);
  293. if (err)
  294. return err;
  295. immutable->pkey_tbl_len = attr.pkey_tbl_len;
  296. immutable->gid_tbl_len = attr.gid_tbl_len;
  297. return 0;
  298. }
  299. static void usnic_get_dev_fw_str(struct ib_device *device,
  300. char *str,
  301. size_t str_len)
  302. {
  303. struct usnic_ib_dev *us_ibdev =
  304. container_of(device, struct usnic_ib_dev, ib_dev);
  305. struct ethtool_drvinfo info;
  306. mutex_lock(&us_ibdev->usdev_lock);
  307. us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
  308. mutex_unlock(&us_ibdev->usdev_lock);
  309. snprintf(str, str_len, "%s", info.fw_version);
  310. }
  311. /* Start of PF discovery section */
  312. static void *usnic_ib_device_add(struct pci_dev *dev)
  313. {
  314. struct usnic_ib_dev *us_ibdev;
  315. union ib_gid gid;
  316. struct in_ifaddr *in;
  317. struct net_device *netdev;
  318. usnic_dbg("\n");
  319. netdev = pci_get_drvdata(dev);
  320. us_ibdev = (struct usnic_ib_dev *)ib_alloc_device(sizeof(*us_ibdev));
  321. if (!us_ibdev) {
  322. usnic_err("Device %s context alloc failed\n",
  323. netdev_name(pci_get_drvdata(dev)));
  324. return ERR_PTR(-EFAULT);
  325. }
  326. us_ibdev->ufdev = usnic_fwd_dev_alloc(dev);
  327. if (!us_ibdev->ufdev) {
  328. usnic_err("Failed to alloc ufdev for %s\n", pci_name(dev));
  329. goto err_dealloc;
  330. }
  331. mutex_init(&us_ibdev->usdev_lock);
  332. INIT_LIST_HEAD(&us_ibdev->vf_dev_list);
  333. INIT_LIST_HEAD(&us_ibdev->ctx_list);
  334. us_ibdev->pdev = dev;
  335. us_ibdev->netdev = pci_get_drvdata(dev);
  336. us_ibdev->ib_dev.owner = THIS_MODULE;
  337. us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP;
  338. us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT;
  339. us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS;
  340. us_ibdev->ib_dev.dma_device = &dev->dev;
  341. us_ibdev->ib_dev.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION;
  342. strlcpy(us_ibdev->ib_dev.name, "usnic_%d", IB_DEVICE_NAME_MAX);
  343. us_ibdev->ib_dev.uverbs_cmd_mask =
  344. (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
  345. (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
  346. (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
  347. (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
  348. (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
  349. (1ull << IB_USER_VERBS_CMD_REG_MR) |
  350. (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
  351. (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
  352. (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
  353. (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
  354. (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
  355. (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
  356. (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
  357. (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
  358. (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
  359. (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
  360. (1ull << IB_USER_VERBS_CMD_OPEN_QP);
  361. us_ibdev->ib_dev.query_device = usnic_ib_query_device;
  362. us_ibdev->ib_dev.query_port = usnic_ib_query_port;
  363. us_ibdev->ib_dev.query_pkey = usnic_ib_query_pkey;
  364. us_ibdev->ib_dev.query_gid = usnic_ib_query_gid;
  365. us_ibdev->ib_dev.get_link_layer = usnic_ib_port_link_layer;
  366. us_ibdev->ib_dev.alloc_pd = usnic_ib_alloc_pd;
  367. us_ibdev->ib_dev.dealloc_pd = usnic_ib_dealloc_pd;
  368. us_ibdev->ib_dev.create_qp = usnic_ib_create_qp;
  369. us_ibdev->ib_dev.modify_qp = usnic_ib_modify_qp;
  370. us_ibdev->ib_dev.query_qp = usnic_ib_query_qp;
  371. us_ibdev->ib_dev.destroy_qp = usnic_ib_destroy_qp;
  372. us_ibdev->ib_dev.create_cq = usnic_ib_create_cq;
  373. us_ibdev->ib_dev.destroy_cq = usnic_ib_destroy_cq;
  374. us_ibdev->ib_dev.reg_user_mr = usnic_ib_reg_mr;
  375. us_ibdev->ib_dev.dereg_mr = usnic_ib_dereg_mr;
  376. us_ibdev->ib_dev.alloc_ucontext = usnic_ib_alloc_ucontext;
  377. us_ibdev->ib_dev.dealloc_ucontext = usnic_ib_dealloc_ucontext;
  378. us_ibdev->ib_dev.mmap = usnic_ib_mmap;
  379. us_ibdev->ib_dev.create_ah = usnic_ib_create_ah;
  380. us_ibdev->ib_dev.destroy_ah = usnic_ib_destroy_ah;
  381. us_ibdev->ib_dev.post_send = usnic_ib_post_send;
  382. us_ibdev->ib_dev.post_recv = usnic_ib_post_recv;
  383. us_ibdev->ib_dev.poll_cq = usnic_ib_poll_cq;
  384. us_ibdev->ib_dev.req_notify_cq = usnic_ib_req_notify_cq;
  385. us_ibdev->ib_dev.get_dma_mr = usnic_ib_get_dma_mr;
  386. us_ibdev->ib_dev.get_port_immutable = usnic_port_immutable;
  387. us_ibdev->ib_dev.get_dev_fw_str = usnic_get_dev_fw_str;
  388. if (ib_register_device(&us_ibdev->ib_dev, NULL))
  389. goto err_fwd_dealloc;
  390. usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu);
  391. usnic_fwd_set_mac(us_ibdev->ufdev, us_ibdev->netdev->dev_addr);
  392. if (netif_carrier_ok(us_ibdev->netdev))
  393. usnic_fwd_carrier_up(us_ibdev->ufdev);
  394. in = ((struct in_device *)(netdev->ip_ptr))->ifa_list;
  395. if (in != NULL)
  396. usnic_fwd_add_ipaddr(us_ibdev->ufdev, in->ifa_address);
  397. usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr,
  398. us_ibdev->ufdev->inaddr, &gid.raw[0]);
  399. memcpy(&us_ibdev->ib_dev.node_guid, &gid.global.interface_id,
  400. sizeof(gid.global.interface_id));
  401. kref_init(&us_ibdev->vf_cnt);
  402. usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n",
  403. us_ibdev->ib_dev.name, netdev_name(us_ibdev->netdev),
  404. us_ibdev->ufdev->mac, us_ibdev->ufdev->link_up,
  405. us_ibdev->ufdev->mtu);
  406. return us_ibdev;
  407. err_fwd_dealloc:
  408. usnic_fwd_dev_free(us_ibdev->ufdev);
  409. err_dealloc:
  410. usnic_err("failed -- deallocing device\n");
  411. ib_dealloc_device(&us_ibdev->ib_dev);
  412. return NULL;
  413. }
  414. static void usnic_ib_device_remove(struct usnic_ib_dev *us_ibdev)
  415. {
  416. usnic_info("Unregistering %s\n", us_ibdev->ib_dev.name);
  417. usnic_ib_sysfs_unregister_usdev(us_ibdev);
  418. usnic_fwd_dev_free(us_ibdev->ufdev);
  419. ib_unregister_device(&us_ibdev->ib_dev);
  420. ib_dealloc_device(&us_ibdev->ib_dev);
  421. }
  422. static void usnic_ib_undiscover_pf(struct kref *kref)
  423. {
  424. struct usnic_ib_dev *us_ibdev, *tmp;
  425. struct pci_dev *dev;
  426. bool found = false;
  427. dev = container_of(kref, struct usnic_ib_dev, vf_cnt)->pdev;
  428. mutex_lock(&usnic_ib_ibdev_list_lock);
  429. list_for_each_entry_safe(us_ibdev, tmp,
  430. &usnic_ib_ibdev_list, ib_dev_link) {
  431. if (us_ibdev->pdev == dev) {
  432. list_del(&us_ibdev->ib_dev_link);
  433. usnic_ib_device_remove(us_ibdev);
  434. found = true;
  435. break;
  436. }
  437. }
  438. WARN(!found, "Failed to remove PF %s\n", pci_name(dev));
  439. mutex_unlock(&usnic_ib_ibdev_list_lock);
  440. }
  441. static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic)
  442. {
  443. struct usnic_ib_dev *us_ibdev;
  444. struct pci_dev *parent_pci, *vf_pci;
  445. int err;
  446. vf_pci = usnic_vnic_get_pdev(vnic);
  447. parent_pci = pci_physfn(vf_pci);
  448. BUG_ON(!parent_pci);
  449. mutex_lock(&usnic_ib_ibdev_list_lock);
  450. list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
  451. if (us_ibdev->pdev == parent_pci) {
  452. kref_get(&us_ibdev->vf_cnt);
  453. goto out;
  454. }
  455. }
  456. us_ibdev = usnic_ib_device_add(parent_pci);
  457. if (IS_ERR_OR_NULL(us_ibdev)) {
  458. us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT);
  459. goto out;
  460. }
  461. err = usnic_ib_sysfs_register_usdev(us_ibdev);
  462. if (err) {
  463. usnic_ib_device_remove(us_ibdev);
  464. us_ibdev = ERR_PTR(err);
  465. goto out;
  466. }
  467. list_add(&us_ibdev->ib_dev_link, &usnic_ib_ibdev_list);
  468. out:
  469. mutex_unlock(&usnic_ib_ibdev_list_lock);
  470. return us_ibdev;
  471. }
  472. /* End of PF discovery section */
  473. /* Start of PCI section */
  474. static const struct pci_device_id usnic_ib_pci_ids[] = {
  475. {PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC)},
  476. {0,}
  477. };
  478. static int usnic_ib_pci_probe(struct pci_dev *pdev,
  479. const struct pci_device_id *id)
  480. {
  481. int err;
  482. struct usnic_ib_dev *pf;
  483. struct usnic_ib_vf *vf;
  484. enum usnic_vnic_res_type res_type;
  485. vf = kzalloc(sizeof(*vf), GFP_KERNEL);
  486. if (!vf)
  487. return -ENOMEM;
  488. err = pci_enable_device(pdev);
  489. if (err) {
  490. usnic_err("Failed to enable %s with err %d\n",
  491. pci_name(pdev), err);
  492. goto out_clean_vf;
  493. }
  494. err = pci_request_regions(pdev, DRV_NAME);
  495. if (err) {
  496. usnic_err("Failed to request region for %s with err %d\n",
  497. pci_name(pdev), err);
  498. goto out_disable_device;
  499. }
  500. pci_set_master(pdev);
  501. pci_set_drvdata(pdev, vf);
  502. vf->vnic = usnic_vnic_alloc(pdev);
  503. if (IS_ERR_OR_NULL(vf->vnic)) {
  504. err = vf->vnic ? PTR_ERR(vf->vnic) : -ENOMEM;
  505. usnic_err("Failed to alloc vnic for %s with err %d\n",
  506. pci_name(pdev), err);
  507. goto out_release_regions;
  508. }
  509. pf = usnic_ib_discover_pf(vf->vnic);
  510. if (IS_ERR_OR_NULL(pf)) {
  511. usnic_err("Failed to discover pf of vnic %s with err%ld\n",
  512. pci_name(pdev), PTR_ERR(pf));
  513. err = pf ? PTR_ERR(pf) : -EFAULT;
  514. goto out_clean_vnic;
  515. }
  516. vf->pf = pf;
  517. spin_lock_init(&vf->lock);
  518. mutex_lock(&pf->usdev_lock);
  519. list_add_tail(&vf->link, &pf->vf_dev_list);
  520. /*
  521. * Save max settings (will be same for each VF, easier to re-write than
  522. * to say "if (!set) { set_values(); set=1; }
  523. */
  524. for (res_type = USNIC_VNIC_RES_TYPE_EOL+1;
  525. res_type < USNIC_VNIC_RES_TYPE_MAX;
  526. res_type++) {
  527. pf->vf_res_cnt[res_type] = usnic_vnic_res_cnt(vf->vnic,
  528. res_type);
  529. }
  530. mutex_unlock(&pf->usdev_lock);
  531. usnic_info("Registering usnic VF %s into PF %s\n", pci_name(pdev),
  532. pf->ib_dev.name);
  533. usnic_ib_log_vf(vf);
  534. return 0;
  535. out_clean_vnic:
  536. usnic_vnic_free(vf->vnic);
  537. out_release_regions:
  538. pci_set_drvdata(pdev, NULL);
  539. pci_clear_master(pdev);
  540. pci_release_regions(pdev);
  541. out_disable_device:
  542. pci_disable_device(pdev);
  543. out_clean_vf:
  544. kfree(vf);
  545. return err;
  546. }
  547. static void usnic_ib_pci_remove(struct pci_dev *pdev)
  548. {
  549. struct usnic_ib_vf *vf = pci_get_drvdata(pdev);
  550. struct usnic_ib_dev *pf = vf->pf;
  551. mutex_lock(&pf->usdev_lock);
  552. list_del(&vf->link);
  553. mutex_unlock(&pf->usdev_lock);
  554. kref_put(&pf->vf_cnt, usnic_ib_undiscover_pf);
  555. usnic_vnic_free(vf->vnic);
  556. pci_set_drvdata(pdev, NULL);
  557. pci_clear_master(pdev);
  558. pci_release_regions(pdev);
  559. pci_disable_device(pdev);
  560. kfree(vf);
  561. usnic_info("Removed VF %s\n", pci_name(pdev));
  562. }
  563. /* PCI driver entry points */
  564. static struct pci_driver usnic_ib_pci_driver = {
  565. .name = DRV_NAME,
  566. .id_table = usnic_ib_pci_ids,
  567. .probe = usnic_ib_pci_probe,
  568. .remove = usnic_ib_pci_remove,
  569. };
  570. /* End of PCI section */
  571. /* Start of module section */
  572. static int __init usnic_ib_init(void)
  573. {
  574. int err;
  575. printk_once(KERN_INFO "%s", usnic_version);
  576. err = usnic_uiom_init(DRV_NAME);
  577. if (err) {
  578. usnic_err("Unable to initalize umem with err %d\n", err);
  579. return err;
  580. }
  581. err = pci_register_driver(&usnic_ib_pci_driver);
  582. if (err) {
  583. usnic_err("Unable to register with PCI\n");
  584. goto out_umem_fini;
  585. }
  586. err = register_netdevice_notifier(&usnic_ib_netdevice_notifier);
  587. if (err) {
  588. usnic_err("Failed to register netdev notifier\n");
  589. goto out_pci_unreg;
  590. }
  591. err = register_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
  592. if (err) {
  593. usnic_err("Failed to register inet addr notifier\n");
  594. goto out_unreg_netdev_notifier;
  595. }
  596. err = usnic_transport_init();
  597. if (err) {
  598. usnic_err("Failed to initialize transport\n");
  599. goto out_unreg_inetaddr_notifier;
  600. }
  601. usnic_debugfs_init();
  602. return 0;
  603. out_unreg_inetaddr_notifier:
  604. unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
  605. out_unreg_netdev_notifier:
  606. unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
  607. out_pci_unreg:
  608. pci_unregister_driver(&usnic_ib_pci_driver);
  609. out_umem_fini:
  610. usnic_uiom_fini();
  611. return err;
  612. }
  613. static void __exit usnic_ib_destroy(void)
  614. {
  615. usnic_dbg("\n");
  616. usnic_debugfs_exit();
  617. usnic_transport_fini();
  618. unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
  619. unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
  620. pci_unregister_driver(&usnic_ib_pci_driver);
  621. usnic_uiom_fini();
  622. }
  623. MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver");
  624. MODULE_AUTHOR("Upinder Malhi <umalhi@cisco.com>");
  625. MODULE_LICENSE("Dual BSD/GPL");
  626. MODULE_VERSION(DRV_VERSION);
  627. module_param(usnic_log_lvl, uint, S_IRUGO | S_IWUSR);
  628. module_param(usnic_ib_share_vf, uint, S_IRUGO | S_IWUSR);
  629. MODULE_PARM_DESC(usnic_log_lvl, " Off=0, Err=1, Info=2, Debug=3");
  630. MODULE_PARM_DESC(usnic_ib_share_vf, "Off=0, On=1 VF sharing amongst QPs");
  631. MODULE_DEVICE_TABLE(pci, usnic_ib_pci_ids);
  632. module_init(usnic_ib_init);
  633. module_exit(usnic_ib_destroy);
  634. /* End of module section */