usnic_ib_verbs.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791
  1. /*
  2. * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/module.h>
  34. #include <linux/init.h>
  35. #include <linux/slab.h>
  36. #include <linux/errno.h>
  37. #include <rdma/ib_user_verbs.h>
  38. #include <rdma/ib_addr.h>
  39. #include "usnic_abi.h"
  40. #include "usnic_ib.h"
  41. #include "usnic_common_util.h"
  42. #include "usnic_ib_qp_grp.h"
  43. #include "usnic_fwd.h"
  44. #include "usnic_log.h"
  45. #include "usnic_uiom.h"
  46. #include "usnic_transport.h"
  47. #define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
  48. static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver)
  49. {
  50. *fw_ver = *((u64 *)fw_ver_str);
  51. }
  52. static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
  53. struct ib_udata *udata)
  54. {
  55. struct usnic_ib_dev *us_ibdev;
  56. struct usnic_ib_create_qp_resp resp;
  57. struct pci_dev *pdev;
  58. struct vnic_dev_bar *bar;
  59. struct usnic_vnic_res_chunk *chunk;
  60. struct usnic_ib_qp_grp_flow *default_flow;
  61. int i, err;
  62. memset(&resp, 0, sizeof(resp));
  63. us_ibdev = qp_grp->vf->pf;
  64. pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
  65. if (!pdev) {
  66. usnic_err("Failed to get pdev of qp_grp %d\n",
  67. qp_grp->grp_id);
  68. return -EFAULT;
  69. }
  70. bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0);
  71. if (!bar) {
  72. usnic_err("Failed to get bar0 of qp_grp %d vf %s",
  73. qp_grp->grp_id, pci_name(pdev));
  74. return -EFAULT;
  75. }
  76. resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic);
  77. resp.bar_bus_addr = bar->bus_addr;
  78. resp.bar_len = bar->len;
  79. chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
  80. if (IS_ERR(chunk)) {
  81. usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
  82. usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
  83. qp_grp->grp_id,
  84. PTR_ERR(chunk));
  85. return PTR_ERR(chunk);
  86. }
  87. WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ);
  88. resp.rq_cnt = chunk->cnt;
  89. for (i = 0; i < chunk->cnt; i++)
  90. resp.rq_idx[i] = chunk->res[i]->vnic_idx;
  91. chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ);
  92. if (IS_ERR(chunk)) {
  93. usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
  94. usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ),
  95. qp_grp->grp_id,
  96. PTR_ERR(chunk));
  97. return PTR_ERR(chunk);
  98. }
  99. WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ);
  100. resp.wq_cnt = chunk->cnt;
  101. for (i = 0; i < chunk->cnt; i++)
  102. resp.wq_idx[i] = chunk->res[i]->vnic_idx;
  103. chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ);
  104. if (IS_ERR(chunk)) {
  105. usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
  106. usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ),
  107. qp_grp->grp_id,
  108. PTR_ERR(chunk));
  109. return PTR_ERR(chunk);
  110. }
  111. WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
  112. resp.cq_cnt = chunk->cnt;
  113. for (i = 0; i < chunk->cnt; i++)
  114. resp.cq_idx[i] = chunk->res[i]->vnic_idx;
  115. default_flow = list_first_entry(&qp_grp->flows_lst,
  116. struct usnic_ib_qp_grp_flow, link);
  117. resp.transport = default_flow->trans_type;
  118. err = ib_copy_to_udata(udata, &resp, sizeof(resp));
  119. if (err) {
  120. usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name);
  121. return err;
  122. }
  123. return 0;
  124. }
  125. static struct usnic_ib_qp_grp*
  126. find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
  127. struct usnic_ib_pd *pd,
  128. struct usnic_transport_spec *trans_spec,
  129. struct usnic_vnic_res_spec *res_spec)
  130. {
  131. struct usnic_ib_vf *vf;
  132. struct usnic_vnic *vnic;
  133. struct usnic_ib_qp_grp *qp_grp;
  134. struct device *dev, **dev_list;
  135. int i, found = 0;
  136. BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
  137. if (list_empty(&us_ibdev->vf_dev_list)) {
  138. usnic_info("No vfs to allocate\n");
  139. return NULL;
  140. }
  141. if (usnic_ib_share_vf) {
  142. /* Try to find resouces on a used vf which is in pd */
  143. dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
  144. for (i = 0; dev_list[i]; i++) {
  145. dev = dev_list[i];
  146. vf = pci_get_drvdata(to_pci_dev(dev));
  147. spin_lock(&vf->lock);
  148. vnic = vf->vnic;
  149. if (!usnic_vnic_check_room(vnic, res_spec)) {
  150. usnic_dbg("Found used vnic %s from %s\n",
  151. us_ibdev->ib_dev.name,
  152. pci_name(usnic_vnic_get_pdev(
  153. vnic)));
  154. found = 1;
  155. break;
  156. }
  157. spin_unlock(&vf->lock);
  158. }
  159. usnic_uiom_free_dev_list(dev_list);
  160. }
  161. if (!found) {
  162. /* Try to find resources on an unused vf */
  163. list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
  164. spin_lock(&vf->lock);
  165. vnic = vf->vnic;
  166. if (vf->qp_grp_ref_cnt == 0 &&
  167. usnic_vnic_check_room(vnic, res_spec) == 0) {
  168. found = 1;
  169. break;
  170. }
  171. spin_unlock(&vf->lock);
  172. }
  173. }
  174. if (!found) {
  175. usnic_info("No free qp grp found on %s\n",
  176. us_ibdev->ib_dev.name);
  177. return ERR_PTR(-ENOMEM);
  178. }
  179. qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf, pd, res_spec,
  180. trans_spec);
  181. spin_unlock(&vf->lock);
  182. if (IS_ERR_OR_NULL(qp_grp)) {
  183. usnic_err("Failed to allocate qp_grp\n");
  184. return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
  185. }
  186. return qp_grp;
  187. }
  188. static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
  189. {
  190. struct usnic_ib_vf *vf = qp_grp->vf;
  191. WARN_ON(qp_grp->state != IB_QPS_RESET);
  192. spin_lock(&vf->lock);
  193. usnic_ib_qp_grp_destroy(qp_grp);
  194. spin_unlock(&vf->lock);
  195. }
  196. static void eth_speed_to_ib_speed(int speed, u8 *active_speed,
  197. u8 *active_width)
  198. {
  199. if (speed <= 10000) {
  200. *active_width = IB_WIDTH_1X;
  201. *active_speed = IB_SPEED_FDR10;
  202. } else if (speed <= 20000) {
  203. *active_width = IB_WIDTH_4X;
  204. *active_speed = IB_SPEED_DDR;
  205. } else if (speed <= 30000) {
  206. *active_width = IB_WIDTH_4X;
  207. *active_speed = IB_SPEED_QDR;
  208. } else if (speed <= 40000) {
  209. *active_width = IB_WIDTH_4X;
  210. *active_speed = IB_SPEED_FDR10;
  211. } else {
  212. *active_width = IB_WIDTH_4X;
  213. *active_speed = IB_SPEED_EDR;
  214. }
  215. }
  216. static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
  217. {
  218. if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN ||
  219. cmd.spec.trans_type >= USNIC_TRANSPORT_MAX)
  220. return -EINVAL;
  221. return 0;
  222. }
  223. /* Start of ib callback functions */
  224. enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
  225. u8 port_num)
  226. {
  227. return IB_LINK_LAYER_ETHERNET;
  228. }
  229. int usnic_ib_query_device(struct ib_device *ibdev,
  230. struct ib_device_attr *props,
  231. struct ib_udata *uhw)
  232. {
  233. struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
  234. union ib_gid gid;
  235. struct ethtool_drvinfo info;
  236. int qp_per_vf;
  237. usnic_dbg("\n");
  238. if (uhw->inlen || uhw->outlen)
  239. return -EINVAL;
  240. mutex_lock(&us_ibdev->usdev_lock);
  241. us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
  242. memset(props, 0, sizeof(*props));
  243. usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
  244. &gid.raw[0]);
  245. memcpy(&props->sys_image_guid, &gid.global.interface_id,
  246. sizeof(gid.global.interface_id));
  247. usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver);
  248. props->max_mr_size = USNIC_UIOM_MAX_MR_SIZE;
  249. props->page_size_cap = USNIC_UIOM_PAGE_SIZE;
  250. props->vendor_id = PCI_VENDOR_ID_CISCO;
  251. props->vendor_part_id = PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC;
  252. props->hw_ver = us_ibdev->pdev->subsystem_device;
  253. qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
  254. us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
  255. props->max_qp = qp_per_vf *
  256. kref_read(&us_ibdev->vf_cnt);
  257. props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
  258. IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
  259. props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] *
  260. kref_read(&us_ibdev->vf_cnt);
  261. props->max_pd = USNIC_UIOM_MAX_PD_CNT;
  262. props->max_mr = USNIC_UIOM_MAX_MR_CNT;
  263. props->local_ca_ack_delay = 0;
  264. props->max_pkeys = 0;
  265. props->atomic_cap = IB_ATOMIC_NONE;
  266. props->masked_atomic_cap = props->atomic_cap;
  267. props->max_qp_rd_atom = 0;
  268. props->max_qp_init_rd_atom = 0;
  269. props->max_res_rd_atom = 0;
  270. props->max_srq = 0;
  271. props->max_srq_wr = 0;
  272. props->max_srq_sge = 0;
  273. props->max_fast_reg_page_list_len = 0;
  274. props->max_mcast_grp = 0;
  275. props->max_mcast_qp_attach = 0;
  276. props->max_total_mcast_qp_attach = 0;
  277. props->max_map_per_fmr = 0;
  278. /* Owned by Userspace
  279. * max_qp_wr, max_sge, max_sge_rd, max_cqe */
  280. mutex_unlock(&us_ibdev->usdev_lock);
  281. return 0;
  282. }
  283. int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
  284. struct ib_port_attr *props)
  285. {
  286. struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
  287. struct ethtool_link_ksettings cmd;
  288. usnic_dbg("\n");
  289. mutex_lock(&us_ibdev->usdev_lock);
  290. __ethtool_get_link_ksettings(us_ibdev->netdev, &cmd);
  291. /* props being zeroed by the caller, avoid zeroing it here */
  292. props->lid = 0;
  293. props->lmc = 1;
  294. props->sm_lid = 0;
  295. props->sm_sl = 0;
  296. if (!us_ibdev->ufdev->link_up) {
  297. props->state = IB_PORT_DOWN;
  298. props->phys_state = 3;
  299. } else if (!us_ibdev->ufdev->inaddr) {
  300. props->state = IB_PORT_INIT;
  301. props->phys_state = 4;
  302. } else {
  303. props->state = IB_PORT_ACTIVE;
  304. props->phys_state = 5;
  305. }
  306. props->port_cap_flags = 0;
  307. props->gid_tbl_len = 1;
  308. props->pkey_tbl_len = 1;
  309. props->bad_pkey_cntr = 0;
  310. props->qkey_viol_cntr = 0;
  311. eth_speed_to_ib_speed(cmd.base.speed, &props->active_speed,
  312. &props->active_width);
  313. props->max_mtu = IB_MTU_4096;
  314. props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu);
  315. /* Userspace will adjust for hdrs */
  316. props->max_msg_sz = us_ibdev->ufdev->mtu;
  317. props->max_vl_num = 1;
  318. mutex_unlock(&us_ibdev->usdev_lock);
  319. return 0;
  320. }
  321. int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
  322. int qp_attr_mask,
  323. struct ib_qp_init_attr *qp_init_attr)
  324. {
  325. struct usnic_ib_qp_grp *qp_grp;
  326. struct usnic_ib_vf *vf;
  327. int err;
  328. usnic_dbg("\n");
  329. memset(qp_attr, 0, sizeof(*qp_attr));
  330. memset(qp_init_attr, 0, sizeof(*qp_init_attr));
  331. qp_grp = to_uqp_grp(qp);
  332. vf = qp_grp->vf;
  333. mutex_lock(&vf->pf->usdev_lock);
  334. usnic_dbg("\n");
  335. qp_attr->qp_state = qp_grp->state;
  336. qp_attr->cur_qp_state = qp_grp->state;
  337. switch (qp_grp->ibqp.qp_type) {
  338. case IB_QPT_UD:
  339. qp_attr->qkey = 0;
  340. break;
  341. default:
  342. usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type);
  343. err = -EINVAL;
  344. goto err_out;
  345. }
  346. mutex_unlock(&vf->pf->usdev_lock);
  347. return 0;
  348. err_out:
  349. mutex_unlock(&vf->pf->usdev_lock);
  350. return err;
  351. }
  352. int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
  353. union ib_gid *gid)
  354. {
  355. struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
  356. usnic_dbg("\n");
  357. if (index > 1)
  358. return -EINVAL;
  359. mutex_lock(&us_ibdev->usdev_lock);
  360. memset(&(gid->raw[0]), 0, sizeof(gid->raw));
  361. usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
  362. &gid->raw[0]);
  363. mutex_unlock(&us_ibdev->usdev_lock);
  364. return 0;
  365. }
  366. int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
  367. u16 *pkey)
  368. {
  369. if (index > 1)
  370. return -EINVAL;
  371. *pkey = 0xffff;
  372. return 0;
  373. }
  374. struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
  375. struct ib_ucontext *context,
  376. struct ib_udata *udata)
  377. {
  378. struct usnic_ib_pd *pd;
  379. void *umem_pd;
  380. usnic_dbg("\n");
  381. pd = kzalloc(sizeof(*pd), GFP_KERNEL);
  382. if (!pd)
  383. return ERR_PTR(-ENOMEM);
  384. umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
  385. if (IS_ERR_OR_NULL(umem_pd)) {
  386. kfree(pd);
  387. return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM);
  388. }
  389. usnic_info("domain 0x%p allocated for context 0x%p and device %s\n",
  390. pd, context, ibdev->name);
  391. return &pd->ibpd;
  392. }
  393. int usnic_ib_dealloc_pd(struct ib_pd *pd)
  394. {
  395. usnic_info("freeing domain 0x%p\n", pd);
  396. usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
  397. kfree(pd);
  398. return 0;
  399. }
  400. struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
  401. struct ib_qp_init_attr *init_attr,
  402. struct ib_udata *udata)
  403. {
  404. int err;
  405. struct usnic_ib_dev *us_ibdev;
  406. struct usnic_ib_qp_grp *qp_grp;
  407. struct usnic_ib_ucontext *ucontext;
  408. int cq_cnt;
  409. struct usnic_vnic_res_spec res_spec;
  410. struct usnic_ib_create_qp_cmd cmd;
  411. struct usnic_transport_spec trans_spec;
  412. usnic_dbg("\n");
  413. ucontext = to_uucontext(pd->uobject->context);
  414. us_ibdev = to_usdev(pd->device);
  415. if (init_attr->create_flags)
  416. return ERR_PTR(-EINVAL);
  417. err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
  418. if (err) {
  419. usnic_err("%s: cannot copy udata for create_qp\n",
  420. us_ibdev->ib_dev.name);
  421. return ERR_PTR(-EINVAL);
  422. }
  423. err = create_qp_validate_user_data(cmd);
  424. if (err) {
  425. usnic_err("%s: Failed to validate user data\n",
  426. us_ibdev->ib_dev.name);
  427. return ERR_PTR(-EINVAL);
  428. }
  429. if (init_attr->qp_type != IB_QPT_UD) {
  430. usnic_err("%s asked to make a non-UD QP: %d\n",
  431. us_ibdev->ib_dev.name, init_attr->qp_type);
  432. return ERR_PTR(-EINVAL);
  433. }
  434. trans_spec = cmd.spec;
  435. mutex_lock(&us_ibdev->usdev_lock);
  436. cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2;
  437. res_spec = min_transport_spec[trans_spec.trans_type];
  438. usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt);
  439. qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd),
  440. &trans_spec,
  441. &res_spec);
  442. if (IS_ERR_OR_NULL(qp_grp)) {
  443. err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM;
  444. goto out_release_mutex;
  445. }
  446. err = usnic_ib_fill_create_qp_resp(qp_grp, udata);
  447. if (err) {
  448. err = -EBUSY;
  449. goto out_release_qp_grp;
  450. }
  451. qp_grp->ctx = ucontext;
  452. list_add_tail(&qp_grp->link, &ucontext->qp_grp_list);
  453. usnic_ib_log_vf(qp_grp->vf);
  454. mutex_unlock(&us_ibdev->usdev_lock);
  455. return &qp_grp->ibqp;
  456. out_release_qp_grp:
  457. qp_grp_destroy(qp_grp);
  458. out_release_mutex:
  459. mutex_unlock(&us_ibdev->usdev_lock);
  460. return ERR_PTR(err);
  461. }
  462. int usnic_ib_destroy_qp(struct ib_qp *qp)
  463. {
  464. struct usnic_ib_qp_grp *qp_grp;
  465. struct usnic_ib_vf *vf;
  466. usnic_dbg("\n");
  467. qp_grp = to_uqp_grp(qp);
  468. vf = qp_grp->vf;
  469. mutex_lock(&vf->pf->usdev_lock);
  470. if (usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RESET, NULL)) {
  471. usnic_err("Failed to move qp grp %u to reset\n",
  472. qp_grp->grp_id);
  473. }
  474. list_del(&qp_grp->link);
  475. qp_grp_destroy(qp_grp);
  476. mutex_unlock(&vf->pf->usdev_lock);
  477. return 0;
  478. }
  479. int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  480. int attr_mask, struct ib_udata *udata)
  481. {
  482. struct usnic_ib_qp_grp *qp_grp;
  483. int status;
  484. usnic_dbg("\n");
  485. qp_grp = to_uqp_grp(ibqp);
  486. mutex_lock(&qp_grp->vf->pf->usdev_lock);
  487. if ((attr_mask & IB_QP_PORT) && attr->port_num != 1) {
  488. /* usnic devices only have one port */
  489. status = -EINVAL;
  490. goto out_unlock;
  491. }
  492. if (attr_mask & IB_QP_STATE) {
  493. status = usnic_ib_qp_grp_modify(qp_grp, attr->qp_state, NULL);
  494. } else {
  495. usnic_err("Unhandled request, attr_mask=0x%x\n", attr_mask);
  496. status = -EINVAL;
  497. }
  498. out_unlock:
  499. mutex_unlock(&qp_grp->vf->pf->usdev_lock);
  500. return status;
  501. }
  502. struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
  503. const struct ib_cq_init_attr *attr,
  504. struct ib_ucontext *context,
  505. struct ib_udata *udata)
  506. {
  507. struct ib_cq *cq;
  508. usnic_dbg("\n");
  509. if (attr->flags)
  510. return ERR_PTR(-EINVAL);
  511. cq = kzalloc(sizeof(*cq), GFP_KERNEL);
  512. if (!cq)
  513. return ERR_PTR(-EBUSY);
  514. return cq;
  515. }
  516. int usnic_ib_destroy_cq(struct ib_cq *cq)
  517. {
  518. usnic_dbg("\n");
  519. kfree(cq);
  520. return 0;
  521. }
  522. struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
  523. u64 virt_addr, int access_flags,
  524. struct ib_udata *udata)
  525. {
  526. struct usnic_ib_mr *mr;
  527. int err;
  528. usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start,
  529. virt_addr, length);
  530. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  531. if (!mr)
  532. return ERR_PTR(-ENOMEM);
  533. mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length,
  534. access_flags, 0);
  535. if (IS_ERR_OR_NULL(mr->umem)) {
  536. err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT;
  537. goto err_free;
  538. }
  539. mr->ibmr.lkey = mr->ibmr.rkey = 0;
  540. return &mr->ibmr;
  541. err_free:
  542. kfree(mr);
  543. return ERR_PTR(err);
  544. }
  545. int usnic_ib_dereg_mr(struct ib_mr *ibmr)
  546. {
  547. struct usnic_ib_mr *mr = to_umr(ibmr);
  548. usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
  549. usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing);
  550. kfree(mr);
  551. return 0;
  552. }
  553. struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
  554. struct ib_udata *udata)
  555. {
  556. struct usnic_ib_ucontext *context;
  557. struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
  558. usnic_dbg("\n");
  559. context = kmalloc(sizeof(*context), GFP_KERNEL);
  560. if (!context)
  561. return ERR_PTR(-ENOMEM);
  562. INIT_LIST_HEAD(&context->qp_grp_list);
  563. mutex_lock(&us_ibdev->usdev_lock);
  564. list_add_tail(&context->link, &us_ibdev->ctx_list);
  565. mutex_unlock(&us_ibdev->usdev_lock);
  566. return &context->ibucontext;
  567. }
  568. int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
  569. {
  570. struct usnic_ib_ucontext *context = to_uucontext(ibcontext);
  571. struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device);
  572. usnic_dbg("\n");
  573. mutex_lock(&us_ibdev->usdev_lock);
  574. BUG_ON(!list_empty(&context->qp_grp_list));
  575. list_del(&context->link);
  576. mutex_unlock(&us_ibdev->usdev_lock);
  577. kfree(context);
  578. return 0;
  579. }
  580. int usnic_ib_mmap(struct ib_ucontext *context,
  581. struct vm_area_struct *vma)
  582. {
  583. struct usnic_ib_ucontext *uctx = to_ucontext(context);
  584. struct usnic_ib_dev *us_ibdev;
  585. struct usnic_ib_qp_grp *qp_grp;
  586. struct usnic_ib_vf *vf;
  587. struct vnic_dev_bar *bar;
  588. dma_addr_t bus_addr;
  589. unsigned int len;
  590. unsigned int vfid;
  591. usnic_dbg("\n");
  592. us_ibdev = to_usdev(context->device);
  593. vma->vm_flags |= VM_IO;
  594. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  595. vfid = vma->vm_pgoff;
  596. usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
  597. vma->vm_pgoff, PAGE_SHIFT, vfid);
  598. mutex_lock(&us_ibdev->usdev_lock);
  599. list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) {
  600. vf = qp_grp->vf;
  601. if (usnic_vnic_get_index(vf->vnic) == vfid) {
  602. bar = usnic_vnic_get_bar(vf->vnic, 0);
  603. if ((vma->vm_end - vma->vm_start) != bar->len) {
  604. usnic_err("Bar0 Len %lu - Request map %lu\n",
  605. bar->len,
  606. vma->vm_end - vma->vm_start);
  607. mutex_unlock(&us_ibdev->usdev_lock);
  608. return -EINVAL;
  609. }
  610. bus_addr = bar->bus_addr;
  611. len = bar->len;
  612. usnic_dbg("bus: %pa vaddr: %p size: %ld\n",
  613. &bus_addr, bar->vaddr, bar->len);
  614. mutex_unlock(&us_ibdev->usdev_lock);
  615. return remap_pfn_range(vma,
  616. vma->vm_start,
  617. bus_addr >> PAGE_SHIFT,
  618. len, vma->vm_page_prot);
  619. }
  620. }
  621. mutex_unlock(&us_ibdev->usdev_lock);
  622. usnic_err("No VF %u found\n", vfid);
  623. return -EINVAL;
  624. }
  625. /* In ib callbacks section - Start of stub funcs */
  626. struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
  627. struct ib_ah_attr *ah_attr,
  628. struct ib_udata *udata)
  629. {
  630. usnic_dbg("\n");
  631. return ERR_PTR(-EPERM);
  632. }
  633. int usnic_ib_destroy_ah(struct ib_ah *ah)
  634. {
  635. usnic_dbg("\n");
  636. return -EINVAL;
  637. }
  638. int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
  639. struct ib_send_wr **bad_wr)
  640. {
  641. usnic_dbg("\n");
  642. return -EINVAL;
  643. }
  644. int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
  645. struct ib_recv_wr **bad_wr)
  646. {
  647. usnic_dbg("\n");
  648. return -EINVAL;
  649. }
  650. int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
  651. struct ib_wc *wc)
  652. {
  653. usnic_dbg("\n");
  654. return -EINVAL;
  655. }
  656. int usnic_ib_req_notify_cq(struct ib_cq *cq,
  657. enum ib_cq_notify_flags flags)
  658. {
  659. usnic_dbg("\n");
  660. return -EINVAL;
  661. }
  662. struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc)
  663. {
  664. usnic_dbg("\n");
  665. return ERR_PTR(-ENOMEM);
  666. }
  667. /* In ib callbacks section - End of stub funcs */
  668. /* End of ib callbacks section */