vt.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873
  1. /*
  2. * Copyright(c) 2016 Intel Corporation.
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * BSD LICENSE
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright
  25. * notice, this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright
  27. * notice, this list of conditions and the following disclaimer in
  28. * the documentation and/or other materials provided with the
  29. * distribution.
  30. * - Neither the name of Intel Corporation nor the names of its
  31. * contributors may be used to endorse or promote products derived
  32. * from this software without specific prior written permission.
  33. *
  34. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. *
  46. */
  47. #include <linux/module.h>
  48. #include <linux/kernel.h>
  49. #include "vt.h"
  50. #include "trace.h"
  51. #define RVT_UVERBS_ABI_VERSION 2
  52. MODULE_LICENSE("Dual BSD/GPL");
  53. MODULE_DESCRIPTION("RDMA Verbs Transport Library");
  54. static int rvt_init(void)
  55. {
  56. /*
  57. * rdmavt does not need to do anything special when it starts up. All it
  58. * needs to do is sit and wait until a driver attempts registration.
  59. */
  60. return 0;
  61. }
  62. module_init(rvt_init);
  63. static void rvt_cleanup(void)
  64. {
  65. /*
  66. * Nothing to do at exit time either. The module won't be able to be
  67. * removed until all drivers are gone which means all the dev structs
  68. * are gone so there is really nothing to do.
  69. */
  70. }
  71. module_exit(rvt_cleanup);
  72. /**
  73. * rvt_alloc_device - allocate rdi
  74. * @size: how big of a structure to allocate
  75. * @nports: number of ports to allocate array slots for
  76. *
  77. * Use IB core device alloc to allocate space for the rdi which is assumed to be
  78. * inside of the ib_device. Any extra space that drivers require should be
  79. * included in size.
  80. *
  81. * We also allocate a port array based on the number of ports.
  82. *
  83. * Return: pointer to allocated rdi
  84. */
  85. struct rvt_dev_info *rvt_alloc_device(size_t size, int nports)
  86. {
  87. struct rvt_dev_info *rdi = ERR_PTR(-ENOMEM);
  88. rdi = (struct rvt_dev_info *)ib_alloc_device(size);
  89. if (!rdi)
  90. return rdi;
  91. rdi->ports = kcalloc(nports,
  92. sizeof(struct rvt_ibport **),
  93. GFP_KERNEL);
  94. if (!rdi->ports)
  95. ib_dealloc_device(&rdi->ibdev);
  96. return rdi;
  97. }
  98. EXPORT_SYMBOL(rvt_alloc_device);
  99. static int rvt_query_device(struct ib_device *ibdev,
  100. struct ib_device_attr *props,
  101. struct ib_udata *uhw)
  102. {
  103. struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
  104. if (uhw->inlen || uhw->outlen)
  105. return -EINVAL;
  106. /*
  107. * Return rvt_dev_info.dparms.props contents
  108. */
  109. *props = rdi->dparms.props;
  110. return 0;
  111. }
  112. static int rvt_modify_device(struct ib_device *device,
  113. int device_modify_mask,
  114. struct ib_device_modify *device_modify)
  115. {
  116. /*
  117. * There is currently no need to supply this based on qib and hfi1.
  118. * Future drivers may need to implement this though.
  119. */
  120. return -EOPNOTSUPP;
  121. }
  122. /**
  123. * rvt_query_port: Passes the query port call to the driver
  124. * @ibdev: Verbs IB dev
  125. * @port_num: port number, 1 based from ib core
  126. * @props: structure to hold returned properties
  127. *
  128. * Return: 0 on success
  129. */
  130. static int rvt_query_port(struct ib_device *ibdev, u8 port_num,
  131. struct ib_port_attr *props)
  132. {
  133. struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
  134. struct rvt_ibport *rvp;
  135. int port_index = ibport_num_to_idx(ibdev, port_num);
  136. if (port_index < 0)
  137. return -EINVAL;
  138. rvp = rdi->ports[port_index];
  139. memset(props, 0, sizeof(*props));
  140. props->sm_lid = rvp->sm_lid;
  141. props->sm_sl = rvp->sm_sl;
  142. props->port_cap_flags = rvp->port_cap_flags;
  143. props->max_msg_sz = 0x80000000;
  144. props->pkey_tbl_len = rvt_get_npkeys(rdi);
  145. props->bad_pkey_cntr = rvp->pkey_violations;
  146. props->qkey_viol_cntr = rvp->qkey_violations;
  147. props->subnet_timeout = rvp->subnet_timeout;
  148. props->init_type_reply = 0;
  149. /* Populate the remaining ib_port_attr elements */
  150. return rdi->driver_f.query_port_state(rdi, port_num, props);
  151. }
  152. /**
  153. * rvt_modify_port
  154. * @ibdev: Verbs IB dev
  155. * @port_num: Port number, 1 based from ib core
  156. * @port_modify_mask: How to change the port
  157. * @props: Structure to fill in
  158. *
  159. * Return: 0 on success
  160. */
  161. static int rvt_modify_port(struct ib_device *ibdev, u8 port_num,
  162. int port_modify_mask, struct ib_port_modify *props)
  163. {
  164. struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
  165. struct rvt_ibport *rvp;
  166. int ret = 0;
  167. int port_index = ibport_num_to_idx(ibdev, port_num);
  168. if (port_index < 0)
  169. return -EINVAL;
  170. rvp = rdi->ports[port_index];
  171. rvp->port_cap_flags |= props->set_port_cap_mask;
  172. rvp->port_cap_flags &= ~props->clr_port_cap_mask;
  173. if (props->set_port_cap_mask || props->clr_port_cap_mask)
  174. rdi->driver_f.cap_mask_chg(rdi, port_num);
  175. if (port_modify_mask & IB_PORT_SHUTDOWN)
  176. ret = rdi->driver_f.shut_down_port(rdi, port_num);
  177. if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
  178. rvp->qkey_violations = 0;
  179. return ret;
  180. }
  181. /**
  182. * rvt_query_pkey - Return a pkey from the table at a given index
  183. * @ibdev: Verbs IB dev
  184. * @port_num: Port number, 1 based from ib core
  185. * @intex: Index into pkey table
  186. *
  187. * Return: 0 on failure pkey otherwise
  188. */
  189. static int rvt_query_pkey(struct ib_device *ibdev, u8 port_num, u16 index,
  190. u16 *pkey)
  191. {
  192. /*
  193. * Driver will be responsible for keeping rvt_dev_info.pkey_table up to
  194. * date. This function will just return that value. There is no need to
  195. * lock, if a stale value is read and sent to the user so be it there is
  196. * no way to protect against that anyway.
  197. */
  198. struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
  199. int port_index;
  200. port_index = ibport_num_to_idx(ibdev, port_num);
  201. if (port_index < 0)
  202. return -EINVAL;
  203. if (index >= rvt_get_npkeys(rdi))
  204. return -EINVAL;
  205. *pkey = rvt_get_pkey(rdi, port_index, index);
  206. return 0;
  207. }
  208. /**
  209. * rvt_query_gid - Return a gid from the table
  210. * @ibdev: Verbs IB dev
  211. * @port_num: Port number, 1 based from ib core
  212. * @index: = Index in table
  213. * @gid: Gid to return
  214. *
  215. * Return: 0 on success
  216. */
  217. static int rvt_query_gid(struct ib_device *ibdev, u8 port_num,
  218. int guid_index, union ib_gid *gid)
  219. {
  220. struct rvt_dev_info *rdi;
  221. struct rvt_ibport *rvp;
  222. int port_index;
  223. /*
  224. * Driver is responsible for updating the guid table. Which will be used
  225. * to craft the return value. This will work similar to how query_pkey()
  226. * is being done.
  227. */
  228. port_index = ibport_num_to_idx(ibdev, port_num);
  229. if (port_index < 0)
  230. return -EINVAL;
  231. rdi = ib_to_rvt(ibdev);
  232. rvp = rdi->ports[port_index];
  233. gid->global.subnet_prefix = rvp->gid_prefix;
  234. return rdi->driver_f.get_guid_be(rdi, rvp, guid_index,
  235. &gid->global.interface_id);
  236. }
  237. struct rvt_ucontext {
  238. struct ib_ucontext ibucontext;
  239. };
  240. static inline struct rvt_ucontext *to_iucontext(struct ib_ucontext
  241. *ibucontext)
  242. {
  243. return container_of(ibucontext, struct rvt_ucontext, ibucontext);
  244. }
  245. /**
  246. * rvt_alloc_ucontext - Allocate a user context
  247. * @ibdev: Vers IB dev
  248. * @data: User data allocated
  249. */
  250. static struct ib_ucontext *rvt_alloc_ucontext(struct ib_device *ibdev,
  251. struct ib_udata *udata)
  252. {
  253. struct rvt_ucontext *context;
  254. context = kmalloc(sizeof(*context), GFP_KERNEL);
  255. if (!context)
  256. return ERR_PTR(-ENOMEM);
  257. return &context->ibucontext;
  258. }
  259. /**
  260. *rvt_dealloc_ucontext - Free a user context
  261. *@context - Free this
  262. */
  263. static int rvt_dealloc_ucontext(struct ib_ucontext *context)
  264. {
  265. kfree(to_iucontext(context));
  266. return 0;
  267. }
  268. static int rvt_get_port_immutable(struct ib_device *ibdev, u8 port_num,
  269. struct ib_port_immutable *immutable)
  270. {
  271. struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
  272. struct ib_port_attr attr;
  273. int err, port_index;
  274. port_index = ibport_num_to_idx(ibdev, port_num);
  275. if (port_index < 0)
  276. return -EINVAL;
  277. err = rvt_query_port(ibdev, port_num, &attr);
  278. if (err)
  279. return err;
  280. immutable->pkey_tbl_len = attr.pkey_tbl_len;
  281. immutable->gid_tbl_len = attr.gid_tbl_len;
  282. immutable->core_cap_flags = rdi->dparms.core_cap_flags;
  283. immutable->max_mad_size = rdi->dparms.max_mad_size;
  284. return 0;
  285. }
  286. enum {
  287. MISC,
  288. QUERY_DEVICE,
  289. MODIFY_DEVICE,
  290. QUERY_PORT,
  291. MODIFY_PORT,
  292. QUERY_PKEY,
  293. QUERY_GID,
  294. ALLOC_UCONTEXT,
  295. DEALLOC_UCONTEXT,
  296. GET_PORT_IMMUTABLE,
  297. CREATE_QP,
  298. MODIFY_QP,
  299. DESTROY_QP,
  300. QUERY_QP,
  301. POST_SEND,
  302. POST_RECV,
  303. POST_SRQ_RECV,
  304. CREATE_AH,
  305. DESTROY_AH,
  306. MODIFY_AH,
  307. QUERY_AH,
  308. CREATE_SRQ,
  309. MODIFY_SRQ,
  310. DESTROY_SRQ,
  311. QUERY_SRQ,
  312. ATTACH_MCAST,
  313. DETACH_MCAST,
  314. GET_DMA_MR,
  315. REG_USER_MR,
  316. DEREG_MR,
  317. ALLOC_MR,
  318. ALLOC_FMR,
  319. MAP_PHYS_FMR,
  320. UNMAP_FMR,
  321. DEALLOC_FMR,
  322. MMAP,
  323. CREATE_CQ,
  324. DESTROY_CQ,
  325. POLL_CQ,
  326. REQ_NOTFIY_CQ,
  327. RESIZE_CQ,
  328. ALLOC_PD,
  329. DEALLOC_PD,
  330. _VERB_IDX_MAX /* Must always be last! */
  331. };
  332. static inline int check_driver_override(struct rvt_dev_info *rdi,
  333. size_t offset, void *func)
  334. {
  335. if (!*(void **)((void *)&rdi->ibdev + offset)) {
  336. *(void **)((void *)&rdi->ibdev + offset) = func;
  337. return 0;
  338. }
  339. return 1;
  340. }
  341. static noinline int check_support(struct rvt_dev_info *rdi, int verb)
  342. {
  343. switch (verb) {
  344. case MISC:
  345. /*
  346. * These functions are not part of verbs specifically but are
  347. * required for rdmavt to function.
  348. */
  349. if ((!rdi->driver_f.port_callback) ||
  350. (!rdi->driver_f.get_card_name) ||
  351. (!rdi->driver_f.get_pci_dev))
  352. return -EINVAL;
  353. break;
  354. case QUERY_DEVICE:
  355. check_driver_override(rdi, offsetof(struct ib_device,
  356. query_device),
  357. rvt_query_device);
  358. break;
  359. case MODIFY_DEVICE:
  360. /*
  361. * rdmavt does not support modify device currently drivers must
  362. * provide.
  363. */
  364. if (!check_driver_override(rdi, offsetof(struct ib_device,
  365. modify_device),
  366. rvt_modify_device))
  367. return -EOPNOTSUPP;
  368. break;
  369. case QUERY_PORT:
  370. if (!check_driver_override(rdi, offsetof(struct ib_device,
  371. query_port),
  372. rvt_query_port))
  373. if (!rdi->driver_f.query_port_state)
  374. return -EINVAL;
  375. break;
  376. case MODIFY_PORT:
  377. if (!check_driver_override(rdi, offsetof(struct ib_device,
  378. modify_port),
  379. rvt_modify_port))
  380. if (!rdi->driver_f.cap_mask_chg ||
  381. !rdi->driver_f.shut_down_port)
  382. return -EINVAL;
  383. break;
  384. case QUERY_PKEY:
  385. check_driver_override(rdi, offsetof(struct ib_device,
  386. query_pkey),
  387. rvt_query_pkey);
  388. break;
  389. case QUERY_GID:
  390. if (!check_driver_override(rdi, offsetof(struct ib_device,
  391. query_gid),
  392. rvt_query_gid))
  393. if (!rdi->driver_f.get_guid_be)
  394. return -EINVAL;
  395. break;
  396. case ALLOC_UCONTEXT:
  397. check_driver_override(rdi, offsetof(struct ib_device,
  398. alloc_ucontext),
  399. rvt_alloc_ucontext);
  400. break;
  401. case DEALLOC_UCONTEXT:
  402. check_driver_override(rdi, offsetof(struct ib_device,
  403. dealloc_ucontext),
  404. rvt_dealloc_ucontext);
  405. break;
  406. case GET_PORT_IMMUTABLE:
  407. check_driver_override(rdi, offsetof(struct ib_device,
  408. get_port_immutable),
  409. rvt_get_port_immutable);
  410. break;
  411. case CREATE_QP:
  412. if (!check_driver_override(rdi, offsetof(struct ib_device,
  413. create_qp),
  414. rvt_create_qp))
  415. if (!rdi->driver_f.qp_priv_alloc ||
  416. !rdi->driver_f.qp_priv_free ||
  417. !rdi->driver_f.notify_qp_reset ||
  418. !rdi->driver_f.flush_qp_waiters ||
  419. !rdi->driver_f.stop_send_queue ||
  420. !rdi->driver_f.quiesce_qp)
  421. return -EINVAL;
  422. break;
  423. case MODIFY_QP:
  424. if (!check_driver_override(rdi, offsetof(struct ib_device,
  425. modify_qp),
  426. rvt_modify_qp))
  427. if (!rdi->driver_f.notify_qp_reset ||
  428. !rdi->driver_f.schedule_send ||
  429. !rdi->driver_f.get_pmtu_from_attr ||
  430. !rdi->driver_f.flush_qp_waiters ||
  431. !rdi->driver_f.stop_send_queue ||
  432. !rdi->driver_f.quiesce_qp ||
  433. !rdi->driver_f.notify_error_qp ||
  434. !rdi->driver_f.mtu_from_qp ||
  435. !rdi->driver_f.mtu_to_path_mtu ||
  436. !rdi->driver_f.shut_down_port ||
  437. !rdi->driver_f.cap_mask_chg)
  438. return -EINVAL;
  439. break;
  440. case DESTROY_QP:
  441. if (!check_driver_override(rdi, offsetof(struct ib_device,
  442. destroy_qp),
  443. rvt_destroy_qp))
  444. if (!rdi->driver_f.qp_priv_free ||
  445. !rdi->driver_f.notify_qp_reset ||
  446. !rdi->driver_f.flush_qp_waiters ||
  447. !rdi->driver_f.stop_send_queue ||
  448. !rdi->driver_f.quiesce_qp)
  449. return -EINVAL;
  450. break;
  451. case QUERY_QP:
  452. check_driver_override(rdi, offsetof(struct ib_device,
  453. query_qp),
  454. rvt_query_qp);
  455. break;
  456. case POST_SEND:
  457. if (!check_driver_override(rdi, offsetof(struct ib_device,
  458. post_send),
  459. rvt_post_send))
  460. if (!rdi->driver_f.schedule_send ||
  461. !rdi->driver_f.do_send)
  462. return -EINVAL;
  463. break;
  464. case POST_RECV:
  465. check_driver_override(rdi, offsetof(struct ib_device,
  466. post_recv),
  467. rvt_post_recv);
  468. break;
  469. case POST_SRQ_RECV:
  470. check_driver_override(rdi, offsetof(struct ib_device,
  471. post_srq_recv),
  472. rvt_post_srq_recv);
  473. break;
  474. case CREATE_AH:
  475. check_driver_override(rdi, offsetof(struct ib_device,
  476. create_ah),
  477. rvt_create_ah);
  478. break;
  479. case DESTROY_AH:
  480. check_driver_override(rdi, offsetof(struct ib_device,
  481. destroy_ah),
  482. rvt_destroy_ah);
  483. break;
  484. case MODIFY_AH:
  485. check_driver_override(rdi, offsetof(struct ib_device,
  486. modify_ah),
  487. rvt_modify_ah);
  488. break;
  489. case QUERY_AH:
  490. check_driver_override(rdi, offsetof(struct ib_device,
  491. query_ah),
  492. rvt_query_ah);
  493. break;
  494. case CREATE_SRQ:
  495. check_driver_override(rdi, offsetof(struct ib_device,
  496. create_srq),
  497. rvt_create_srq);
  498. break;
  499. case MODIFY_SRQ:
  500. check_driver_override(rdi, offsetof(struct ib_device,
  501. modify_srq),
  502. rvt_modify_srq);
  503. break;
  504. case DESTROY_SRQ:
  505. check_driver_override(rdi, offsetof(struct ib_device,
  506. destroy_srq),
  507. rvt_destroy_srq);
  508. break;
  509. case QUERY_SRQ:
  510. check_driver_override(rdi, offsetof(struct ib_device,
  511. query_srq),
  512. rvt_query_srq);
  513. break;
  514. case ATTACH_MCAST:
  515. check_driver_override(rdi, offsetof(struct ib_device,
  516. attach_mcast),
  517. rvt_attach_mcast);
  518. break;
  519. case DETACH_MCAST:
  520. check_driver_override(rdi, offsetof(struct ib_device,
  521. detach_mcast),
  522. rvt_detach_mcast);
  523. break;
  524. case GET_DMA_MR:
  525. check_driver_override(rdi, offsetof(struct ib_device,
  526. get_dma_mr),
  527. rvt_get_dma_mr);
  528. break;
  529. case REG_USER_MR:
  530. check_driver_override(rdi, offsetof(struct ib_device,
  531. reg_user_mr),
  532. rvt_reg_user_mr);
  533. break;
  534. case DEREG_MR:
  535. check_driver_override(rdi, offsetof(struct ib_device,
  536. dereg_mr),
  537. rvt_dereg_mr);
  538. break;
  539. case ALLOC_FMR:
  540. check_driver_override(rdi, offsetof(struct ib_device,
  541. alloc_fmr),
  542. rvt_alloc_fmr);
  543. break;
  544. case ALLOC_MR:
  545. check_driver_override(rdi, offsetof(struct ib_device,
  546. alloc_mr),
  547. rvt_alloc_mr);
  548. break;
  549. case MAP_PHYS_FMR:
  550. check_driver_override(rdi, offsetof(struct ib_device,
  551. map_phys_fmr),
  552. rvt_map_phys_fmr);
  553. break;
  554. case UNMAP_FMR:
  555. check_driver_override(rdi, offsetof(struct ib_device,
  556. unmap_fmr),
  557. rvt_unmap_fmr);
  558. break;
  559. case DEALLOC_FMR:
  560. check_driver_override(rdi, offsetof(struct ib_device,
  561. dealloc_fmr),
  562. rvt_dealloc_fmr);
  563. break;
  564. case MMAP:
  565. check_driver_override(rdi, offsetof(struct ib_device,
  566. mmap),
  567. rvt_mmap);
  568. break;
  569. case CREATE_CQ:
  570. check_driver_override(rdi, offsetof(struct ib_device,
  571. create_cq),
  572. rvt_create_cq);
  573. break;
  574. case DESTROY_CQ:
  575. check_driver_override(rdi, offsetof(struct ib_device,
  576. destroy_cq),
  577. rvt_destroy_cq);
  578. break;
  579. case POLL_CQ:
  580. check_driver_override(rdi, offsetof(struct ib_device,
  581. poll_cq),
  582. rvt_poll_cq);
  583. break;
  584. case REQ_NOTFIY_CQ:
  585. check_driver_override(rdi, offsetof(struct ib_device,
  586. req_notify_cq),
  587. rvt_req_notify_cq);
  588. break;
  589. case RESIZE_CQ:
  590. check_driver_override(rdi, offsetof(struct ib_device,
  591. resize_cq),
  592. rvt_resize_cq);
  593. break;
  594. case ALLOC_PD:
  595. check_driver_override(rdi, offsetof(struct ib_device,
  596. alloc_pd),
  597. rvt_alloc_pd);
  598. break;
  599. case DEALLOC_PD:
  600. check_driver_override(rdi, offsetof(struct ib_device,
  601. dealloc_pd),
  602. rvt_dealloc_pd);
  603. break;
  604. default:
  605. return -EINVAL;
  606. }
  607. return 0;
  608. }
  609. /**
  610. * rvt_register_device - register a driver
  611. * @rdi: main dev structure for all of rdmavt operations
  612. *
  613. * It is up to drivers to allocate the rdi and fill in the appropriate
  614. * information.
  615. *
  616. * Return: 0 on success otherwise an errno.
  617. */
  618. int rvt_register_device(struct rvt_dev_info *rdi)
  619. {
  620. int ret = 0, i;
  621. if (!rdi)
  622. return -EINVAL;
  623. /*
  624. * Check to ensure drivers have setup the required helpers for the verbs
  625. * they want rdmavt to handle
  626. */
  627. for (i = 0; i < _VERB_IDX_MAX; i++)
  628. if (check_support(rdi, i)) {
  629. pr_err("Driver support req not met at %d\n", i);
  630. return -EINVAL;
  631. }
  632. /* Once we get past here we can use rvt_pr macros and tracepoints */
  633. trace_rvt_dbg(rdi, "Driver attempting registration");
  634. rvt_mmap_init(rdi);
  635. /* Queue Pairs */
  636. ret = rvt_driver_qp_init(rdi);
  637. if (ret) {
  638. pr_err("Error in driver QP init.\n");
  639. return -EINVAL;
  640. }
  641. /* Address Handle */
  642. spin_lock_init(&rdi->n_ahs_lock);
  643. rdi->n_ahs_allocated = 0;
  644. /* Shared Receive Queue */
  645. rvt_driver_srq_init(rdi);
  646. /* Multicast */
  647. rvt_driver_mcast_init(rdi);
  648. /* Mem Region */
  649. ret = rvt_driver_mr_init(rdi);
  650. if (ret) {
  651. pr_err("Error in driver MR init.\n");
  652. goto bail_no_mr;
  653. }
  654. /* Completion queues */
  655. ret = rvt_driver_cq_init(rdi);
  656. if (ret) {
  657. pr_err("Error in driver CQ init.\n");
  658. goto bail_mr;
  659. }
  660. /* DMA Operations */
  661. rdi->ibdev.dma_ops =
  662. rdi->ibdev.dma_ops ? : &rvt_default_dma_mapping_ops;
  663. /* Protection Domain */
  664. spin_lock_init(&rdi->n_pds_lock);
  665. rdi->n_pds_allocated = 0;
  666. /*
  667. * There are some things which could be set by underlying drivers but
  668. * really should be up to rdmavt to set. For instance drivers can't know
  669. * exactly which functions rdmavt supports, nor do they know the ABI
  670. * version, so we do all of this sort of stuff here.
  671. */
  672. rdi->ibdev.uverbs_abi_ver = RVT_UVERBS_ABI_VERSION;
  673. rdi->ibdev.uverbs_cmd_mask =
  674. (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
  675. (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
  676. (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
  677. (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
  678. (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
  679. (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
  680. (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
  681. (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
  682. (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
  683. (1ull << IB_USER_VERBS_CMD_REG_MR) |
  684. (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
  685. (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
  686. (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
  687. (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
  688. (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
  689. (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
  690. (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
  691. (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
  692. (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
  693. (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
  694. (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
  695. (1ull << IB_USER_VERBS_CMD_POST_SEND) |
  696. (1ull << IB_USER_VERBS_CMD_POST_RECV) |
  697. (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
  698. (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
  699. (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
  700. (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
  701. (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
  702. (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
  703. (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
  704. rdi->ibdev.node_type = RDMA_NODE_IB_CA;
  705. rdi->ibdev.num_comp_vectors = 1;
  706. /* We are now good to announce we exist */
  707. ret = ib_register_device(&rdi->ibdev, rdi->driver_f.port_callback);
  708. if (ret) {
  709. rvt_pr_err(rdi, "Failed to register driver with ib core.\n");
  710. goto bail_cq;
  711. }
  712. rvt_create_mad_agents(rdi);
  713. rvt_pr_info(rdi, "Registration with rdmavt done.\n");
  714. return ret;
  715. bail_cq:
  716. rvt_cq_exit(rdi);
  717. bail_mr:
  718. rvt_mr_exit(rdi);
  719. bail_no_mr:
  720. rvt_qp_exit(rdi);
  721. return ret;
  722. }
  723. EXPORT_SYMBOL(rvt_register_device);
  724. /**
  725. * rvt_unregister_device - remove a driver
  726. * @rdi: rvt dev struct
  727. */
  728. void rvt_unregister_device(struct rvt_dev_info *rdi)
  729. {
  730. trace_rvt_dbg(rdi, "Driver is unregistering.");
  731. if (!rdi)
  732. return;
  733. rvt_free_mad_agents(rdi);
  734. ib_unregister_device(&rdi->ibdev);
  735. rvt_cq_exit(rdi);
  736. rvt_mr_exit(rdi);
  737. rvt_qp_exit(rdi);
  738. }
  739. EXPORT_SYMBOL(rvt_unregister_device);
  740. /**
  741. * rvt_init_port - init internal data for driver port
  742. * @rdi: rvt dev strut
  743. * @port: rvt port
  744. * @port_index: 0 based index of ports, different from IB core port num
  745. *
  746. * Keep track of a list of ports. No need to have a detach port.
  747. * They persist until the driver goes away.
  748. *
  749. * Return: always 0
  750. */
  751. int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port,
  752. int port_index, u16 *pkey_table)
  753. {
  754. rdi->ports[port_index] = port;
  755. rdi->ports[port_index]->pkey_table = pkey_table;
  756. return 0;
  757. }
  758. EXPORT_SYMBOL(rvt_init_port);