usnic_ib_verbs.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791
  1. /*
  2. * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/module.h>
  34. #include <linux/init.h>
  35. #include <linux/slab.h>
  36. #include <linux/errno.h>
  37. #include <rdma/ib_user_verbs.h>
  38. #include <rdma/ib_addr.h>
  39. #include "usnic_abi.h"
  40. #include "usnic_ib.h"
  41. #include "usnic_common_util.h"
  42. #include "usnic_ib_qp_grp.h"
  43. #include "usnic_fwd.h"
  44. #include "usnic_log.h"
  45. #include "usnic_uiom.h"
  46. #include "usnic_transport.h"
  47. #define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
  48. static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver)
  49. {
  50. *fw_ver = (u64) *fw_ver_str;
  51. }
  52. static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
  53. struct ib_udata *udata)
  54. {
  55. struct usnic_ib_dev *us_ibdev;
  56. struct usnic_ib_create_qp_resp resp;
  57. struct pci_dev *pdev;
  58. struct vnic_dev_bar *bar;
  59. struct usnic_vnic_res_chunk *chunk;
  60. struct usnic_ib_qp_grp_flow *default_flow;
  61. int i, err;
  62. memset(&resp, 0, sizeof(resp));
  63. us_ibdev = qp_grp->vf->pf;
  64. pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
  65. if (!pdev) {
  66. usnic_err("Failed to get pdev of qp_grp %d\n",
  67. qp_grp->grp_id);
  68. return -EFAULT;
  69. }
  70. bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0);
  71. if (!bar) {
  72. usnic_err("Failed to get bar0 of qp_grp %d vf %s",
  73. qp_grp->grp_id, pci_name(pdev));
  74. return -EFAULT;
  75. }
  76. resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic);
  77. resp.bar_bus_addr = bar->bus_addr;
  78. resp.bar_len = bar->len;
  79. chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
  80. if (IS_ERR_OR_NULL(chunk)) {
  81. usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
  82. usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
  83. qp_grp->grp_id,
  84. PTR_ERR(chunk));
  85. return chunk ? PTR_ERR(chunk) : -ENOMEM;
  86. }
  87. WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ);
  88. resp.rq_cnt = chunk->cnt;
  89. for (i = 0; i < chunk->cnt; i++)
  90. resp.rq_idx[i] = chunk->res[i]->vnic_idx;
  91. chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ);
  92. if (IS_ERR_OR_NULL(chunk)) {
  93. usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
  94. usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ),
  95. qp_grp->grp_id,
  96. PTR_ERR(chunk));
  97. return chunk ? PTR_ERR(chunk) : -ENOMEM;
  98. }
  99. WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ);
  100. resp.wq_cnt = chunk->cnt;
  101. for (i = 0; i < chunk->cnt; i++)
  102. resp.wq_idx[i] = chunk->res[i]->vnic_idx;
  103. chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ);
  104. if (IS_ERR_OR_NULL(chunk)) {
  105. usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
  106. usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ),
  107. qp_grp->grp_id,
  108. PTR_ERR(chunk));
  109. return chunk ? PTR_ERR(chunk) : -ENOMEM;
  110. }
  111. WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
  112. resp.cq_cnt = chunk->cnt;
  113. for (i = 0; i < chunk->cnt; i++)
  114. resp.cq_idx[i] = chunk->res[i]->vnic_idx;
  115. default_flow = list_first_entry(&qp_grp->flows_lst,
  116. struct usnic_ib_qp_grp_flow, link);
  117. resp.transport = default_flow->trans_type;
  118. err = ib_copy_to_udata(udata, &resp, sizeof(resp));
  119. if (err) {
  120. usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name);
  121. return err;
  122. }
  123. return 0;
  124. }
  125. static struct usnic_ib_qp_grp*
  126. find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
  127. struct usnic_ib_pd *pd,
  128. struct usnic_transport_spec *trans_spec,
  129. struct usnic_vnic_res_spec *res_spec)
  130. {
  131. struct usnic_ib_vf *vf;
  132. struct usnic_vnic *vnic;
  133. struct usnic_ib_qp_grp *qp_grp;
  134. struct device *dev, **dev_list;
  135. int i, found = 0;
  136. BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
  137. if (list_empty(&us_ibdev->vf_dev_list)) {
  138. usnic_info("No vfs to allocate\n");
  139. return NULL;
  140. }
  141. if (usnic_ib_share_vf) {
  142. /* Try to find resouces on a used vf which is in pd */
  143. dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
  144. for (i = 0; dev_list[i]; i++) {
  145. dev = dev_list[i];
  146. vf = pci_get_drvdata(to_pci_dev(dev));
  147. spin_lock(&vf->lock);
  148. vnic = vf->vnic;
  149. if (!usnic_vnic_check_room(vnic, res_spec)) {
  150. usnic_dbg("Found used vnic %s from %s\n",
  151. us_ibdev->ib_dev.name,
  152. pci_name(usnic_vnic_get_pdev(
  153. vnic)));
  154. found = 1;
  155. break;
  156. }
  157. spin_unlock(&vf->lock);
  158. }
  159. usnic_uiom_free_dev_list(dev_list);
  160. }
  161. if (!found) {
  162. /* Try to find resources on an unused vf */
  163. list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
  164. spin_lock(&vf->lock);
  165. vnic = vf->vnic;
  166. if (vf->qp_grp_ref_cnt == 0 &&
  167. usnic_vnic_check_room(vnic, res_spec) == 0) {
  168. found = 1;
  169. break;
  170. }
  171. spin_unlock(&vf->lock);
  172. }
  173. }
  174. if (!found) {
  175. usnic_info("No free qp grp found on %s\n",
  176. us_ibdev->ib_dev.name);
  177. return ERR_PTR(-ENOMEM);
  178. }
  179. qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf, pd, res_spec,
  180. trans_spec);
  181. spin_unlock(&vf->lock);
  182. if (IS_ERR_OR_NULL(qp_grp)) {
  183. usnic_err("Failed to allocate qp_grp\n");
  184. return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
  185. }
  186. return qp_grp;
  187. }
  188. static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
  189. {
  190. struct usnic_ib_vf *vf = qp_grp->vf;
  191. WARN_ON(qp_grp->state != IB_QPS_RESET);
  192. spin_lock(&vf->lock);
  193. usnic_ib_qp_grp_destroy(qp_grp);
  194. spin_unlock(&vf->lock);
  195. }
  196. static void eth_speed_to_ib_speed(int speed, u8 *active_speed,
  197. u8 *active_width)
  198. {
  199. if (speed <= 10000) {
  200. *active_width = IB_WIDTH_1X;
  201. *active_speed = IB_SPEED_FDR10;
  202. } else if (speed <= 20000) {
  203. *active_width = IB_WIDTH_4X;
  204. *active_speed = IB_SPEED_DDR;
  205. } else if (speed <= 30000) {
  206. *active_width = IB_WIDTH_4X;
  207. *active_speed = IB_SPEED_QDR;
  208. } else if (speed <= 40000) {
  209. *active_width = IB_WIDTH_4X;
  210. *active_speed = IB_SPEED_FDR10;
  211. } else {
  212. *active_width = IB_WIDTH_4X;
  213. *active_speed = IB_SPEED_EDR;
  214. }
  215. }
  216. static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
  217. {
  218. if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN ||
  219. cmd.spec.trans_type >= USNIC_TRANSPORT_MAX)
  220. return -EINVAL;
  221. return 0;
  222. }
  223. /* Start of ib callback functions */
  224. enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
  225. u8 port_num)
  226. {
  227. return IB_LINK_LAYER_ETHERNET;
  228. }
  229. int usnic_ib_query_device(struct ib_device *ibdev,
  230. struct ib_device_attr *props,
  231. struct ib_udata *uhw)
  232. {
  233. struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
  234. union ib_gid gid;
  235. struct ethtool_drvinfo info;
  236. struct ethtool_cmd cmd;
  237. int qp_per_vf;
  238. usnic_dbg("\n");
  239. if (uhw->inlen || uhw->outlen)
  240. return -EINVAL;
  241. mutex_lock(&us_ibdev->usdev_lock);
  242. us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
  243. us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
  244. memset(props, 0, sizeof(*props));
  245. usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
  246. &gid.raw[0]);
  247. memcpy(&props->sys_image_guid, &gid.global.interface_id,
  248. sizeof(gid.global.interface_id));
  249. usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver);
  250. props->max_mr_size = USNIC_UIOM_MAX_MR_SIZE;
  251. props->page_size_cap = USNIC_UIOM_PAGE_SIZE;
  252. props->vendor_id = PCI_VENDOR_ID_CISCO;
  253. props->vendor_part_id = PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC;
  254. props->hw_ver = us_ibdev->pdev->subsystem_device;
  255. qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
  256. us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
  257. props->max_qp = qp_per_vf *
  258. atomic_read(&us_ibdev->vf_cnt.refcount);
  259. props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
  260. IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
  261. props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] *
  262. atomic_read(&us_ibdev->vf_cnt.refcount);
  263. props->max_pd = USNIC_UIOM_MAX_PD_CNT;
  264. props->max_mr = USNIC_UIOM_MAX_MR_CNT;
  265. props->local_ca_ack_delay = 0;
  266. props->max_pkeys = 0;
  267. props->atomic_cap = IB_ATOMIC_NONE;
  268. props->masked_atomic_cap = props->atomic_cap;
  269. props->max_qp_rd_atom = 0;
  270. props->max_qp_init_rd_atom = 0;
  271. props->max_res_rd_atom = 0;
  272. props->max_srq = 0;
  273. props->max_srq_wr = 0;
  274. props->max_srq_sge = 0;
  275. props->max_fast_reg_page_list_len = 0;
  276. props->max_mcast_grp = 0;
  277. props->max_mcast_qp_attach = 0;
  278. props->max_total_mcast_qp_attach = 0;
  279. props->max_map_per_fmr = 0;
  280. /* Owned by Userspace
  281. * max_qp_wr, max_sge, max_sge_rd, max_cqe */
  282. mutex_unlock(&us_ibdev->usdev_lock);
  283. return 0;
  284. }
  285. int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
  286. struct ib_port_attr *props)
  287. {
  288. struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
  289. struct ethtool_cmd cmd;
  290. usnic_dbg("\n");
  291. mutex_lock(&us_ibdev->usdev_lock);
  292. us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
  293. memset(props, 0, sizeof(*props));
  294. props->lid = 0;
  295. props->lmc = 1;
  296. props->sm_lid = 0;
  297. props->sm_sl = 0;
  298. if (!us_ibdev->ufdev->link_up) {
  299. props->state = IB_PORT_DOWN;
  300. props->phys_state = 3;
  301. } else if (!us_ibdev->ufdev->inaddr) {
  302. props->state = IB_PORT_INIT;
  303. props->phys_state = 4;
  304. } else {
  305. props->state = IB_PORT_ACTIVE;
  306. props->phys_state = 5;
  307. }
  308. props->port_cap_flags = 0;
  309. props->gid_tbl_len = 1;
  310. props->pkey_tbl_len = 1;
  311. props->bad_pkey_cntr = 0;
  312. props->qkey_viol_cntr = 0;
  313. eth_speed_to_ib_speed(cmd.speed, &props->active_speed,
  314. &props->active_width);
  315. props->max_mtu = IB_MTU_4096;
  316. props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu);
  317. /* Userspace will adjust for hdrs */
  318. props->max_msg_sz = us_ibdev->ufdev->mtu;
  319. props->max_vl_num = 1;
  320. mutex_unlock(&us_ibdev->usdev_lock);
  321. return 0;
  322. }
  323. int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
  324. int qp_attr_mask,
  325. struct ib_qp_init_attr *qp_init_attr)
  326. {
  327. struct usnic_ib_qp_grp *qp_grp;
  328. struct usnic_ib_vf *vf;
  329. int err;
  330. usnic_dbg("\n");
  331. memset(qp_attr, 0, sizeof(*qp_attr));
  332. memset(qp_init_attr, 0, sizeof(*qp_init_attr));
  333. qp_grp = to_uqp_grp(qp);
  334. vf = qp_grp->vf;
  335. mutex_lock(&vf->pf->usdev_lock);
  336. usnic_dbg("\n");
  337. qp_attr->qp_state = qp_grp->state;
  338. qp_attr->cur_qp_state = qp_grp->state;
  339. switch (qp_grp->ibqp.qp_type) {
  340. case IB_QPT_UD:
  341. qp_attr->qkey = 0;
  342. break;
  343. default:
  344. usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type);
  345. err = -EINVAL;
  346. goto err_out;
  347. }
  348. mutex_unlock(&vf->pf->usdev_lock);
  349. return 0;
  350. err_out:
  351. mutex_unlock(&vf->pf->usdev_lock);
  352. return err;
  353. }
  354. int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
  355. union ib_gid *gid)
  356. {
  357. struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
  358. usnic_dbg("\n");
  359. if (index > 1)
  360. return -EINVAL;
  361. mutex_lock(&us_ibdev->usdev_lock);
  362. memset(&(gid->raw[0]), 0, sizeof(gid->raw));
  363. usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
  364. &gid->raw[0]);
  365. mutex_unlock(&us_ibdev->usdev_lock);
  366. return 0;
  367. }
  368. int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
  369. u16 *pkey)
  370. {
  371. if (index > 1)
  372. return -EINVAL;
  373. *pkey = 0xffff;
  374. return 0;
  375. }
  376. struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
  377. struct ib_ucontext *context,
  378. struct ib_udata *udata)
  379. {
  380. struct usnic_ib_pd *pd;
  381. void *umem_pd;
  382. usnic_dbg("\n");
  383. pd = kzalloc(sizeof(*pd), GFP_KERNEL);
  384. if (!pd)
  385. return ERR_PTR(-ENOMEM);
  386. umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
  387. if (IS_ERR_OR_NULL(umem_pd)) {
  388. kfree(pd);
  389. return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM);
  390. }
  391. usnic_info("domain 0x%p allocated for context 0x%p and device %s\n",
  392. pd, context, ibdev->name);
  393. return &pd->ibpd;
  394. }
  395. int usnic_ib_dealloc_pd(struct ib_pd *pd)
  396. {
  397. usnic_info("freeing domain 0x%p\n", pd);
  398. usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
  399. kfree(pd);
  400. return 0;
  401. }
  402. struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
  403. struct ib_qp_init_attr *init_attr,
  404. struct ib_udata *udata)
  405. {
  406. int err;
  407. struct usnic_ib_dev *us_ibdev;
  408. struct usnic_ib_qp_grp *qp_grp;
  409. struct usnic_ib_ucontext *ucontext;
  410. int cq_cnt;
  411. struct usnic_vnic_res_spec res_spec;
  412. struct usnic_ib_create_qp_cmd cmd;
  413. struct usnic_transport_spec trans_spec;
  414. usnic_dbg("\n");
  415. ucontext = to_uucontext(pd->uobject->context);
  416. us_ibdev = to_usdev(pd->device);
  417. if (init_attr->create_flags)
  418. return ERR_PTR(-EINVAL);
  419. err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
  420. if (err) {
  421. usnic_err("%s: cannot copy udata for create_qp\n",
  422. us_ibdev->ib_dev.name);
  423. return ERR_PTR(-EINVAL);
  424. }
  425. err = create_qp_validate_user_data(cmd);
  426. if (err) {
  427. usnic_err("%s: Failed to validate user data\n",
  428. us_ibdev->ib_dev.name);
  429. return ERR_PTR(-EINVAL);
  430. }
  431. if (init_attr->qp_type != IB_QPT_UD) {
  432. usnic_err("%s asked to make a non-UD QP: %d\n",
  433. us_ibdev->ib_dev.name, init_attr->qp_type);
  434. return ERR_PTR(-EINVAL);
  435. }
  436. trans_spec = cmd.spec;
  437. mutex_lock(&us_ibdev->usdev_lock);
  438. cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2;
  439. res_spec = min_transport_spec[trans_spec.trans_type];
  440. usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt);
  441. qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd),
  442. &trans_spec,
  443. &res_spec);
  444. if (IS_ERR_OR_NULL(qp_grp)) {
  445. err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM;
  446. goto out_release_mutex;
  447. }
  448. err = usnic_ib_fill_create_qp_resp(qp_grp, udata);
  449. if (err) {
  450. err = -EBUSY;
  451. goto out_release_qp_grp;
  452. }
  453. qp_grp->ctx = ucontext;
  454. list_add_tail(&qp_grp->link, &ucontext->qp_grp_list);
  455. usnic_ib_log_vf(qp_grp->vf);
  456. mutex_unlock(&us_ibdev->usdev_lock);
  457. return &qp_grp->ibqp;
  458. out_release_qp_grp:
  459. qp_grp_destroy(qp_grp);
  460. out_release_mutex:
  461. mutex_unlock(&us_ibdev->usdev_lock);
  462. return ERR_PTR(err);
  463. }
  464. int usnic_ib_destroy_qp(struct ib_qp *qp)
  465. {
  466. struct usnic_ib_qp_grp *qp_grp;
  467. struct usnic_ib_vf *vf;
  468. usnic_dbg("\n");
  469. qp_grp = to_uqp_grp(qp);
  470. vf = qp_grp->vf;
  471. mutex_lock(&vf->pf->usdev_lock);
  472. if (usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RESET, NULL)) {
  473. usnic_err("Failed to move qp grp %u to reset\n",
  474. qp_grp->grp_id);
  475. }
  476. list_del(&qp_grp->link);
  477. qp_grp_destroy(qp_grp);
  478. mutex_unlock(&vf->pf->usdev_lock);
  479. return 0;
  480. }
  481. int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  482. int attr_mask, struct ib_udata *udata)
  483. {
  484. struct usnic_ib_qp_grp *qp_grp;
  485. int status;
  486. usnic_dbg("\n");
  487. qp_grp = to_uqp_grp(ibqp);
  488. /* TODO: Future Support All States */
  489. mutex_lock(&qp_grp->vf->pf->usdev_lock);
  490. if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT) {
  491. status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_INIT, NULL);
  492. } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTR) {
  493. status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTR, NULL);
  494. } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTS) {
  495. status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTS, NULL);
  496. } else {
  497. usnic_err("Unexpected combination mask: %u state: %u\n",
  498. attr_mask & IB_QP_STATE, attr->qp_state);
  499. status = -EINVAL;
  500. }
  501. mutex_unlock(&qp_grp->vf->pf->usdev_lock);
  502. return status;
  503. }
  504. struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
  505. const struct ib_cq_init_attr *attr,
  506. struct ib_ucontext *context,
  507. struct ib_udata *udata)
  508. {
  509. struct ib_cq *cq;
  510. usnic_dbg("\n");
  511. if (attr->flags)
  512. return ERR_PTR(-EINVAL);
  513. cq = kzalloc(sizeof(*cq), GFP_KERNEL);
  514. if (!cq)
  515. return ERR_PTR(-EBUSY);
  516. return cq;
  517. }
  518. int usnic_ib_destroy_cq(struct ib_cq *cq)
  519. {
  520. usnic_dbg("\n");
  521. kfree(cq);
  522. return 0;
  523. }
  524. struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
  525. u64 virt_addr, int access_flags,
  526. struct ib_udata *udata)
  527. {
  528. struct usnic_ib_mr *mr;
  529. int err;
  530. usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start,
  531. virt_addr, length);
  532. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  533. if (IS_ERR_OR_NULL(mr))
  534. return ERR_PTR(mr ? PTR_ERR(mr) : -ENOMEM);
  535. mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length,
  536. access_flags, 0);
  537. if (IS_ERR_OR_NULL(mr->umem)) {
  538. err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT;
  539. goto err_free;
  540. }
  541. mr->ibmr.lkey = mr->ibmr.rkey = 0;
  542. return &mr->ibmr;
  543. err_free:
  544. kfree(mr);
  545. return ERR_PTR(err);
  546. }
  547. int usnic_ib_dereg_mr(struct ib_mr *ibmr)
  548. {
  549. struct usnic_ib_mr *mr = to_umr(ibmr);
  550. usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
  551. usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing);
  552. kfree(mr);
  553. return 0;
  554. }
  555. struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
  556. struct ib_udata *udata)
  557. {
  558. struct usnic_ib_ucontext *context;
  559. struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
  560. usnic_dbg("\n");
  561. context = kmalloc(sizeof(*context), GFP_KERNEL);
  562. if (!context)
  563. return ERR_PTR(-ENOMEM);
  564. INIT_LIST_HEAD(&context->qp_grp_list);
  565. mutex_lock(&us_ibdev->usdev_lock);
  566. list_add_tail(&context->link, &us_ibdev->ctx_list);
  567. mutex_unlock(&us_ibdev->usdev_lock);
  568. return &context->ibucontext;
  569. }
  570. int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
  571. {
  572. struct usnic_ib_ucontext *context = to_uucontext(ibcontext);
  573. struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device);
  574. usnic_dbg("\n");
  575. mutex_lock(&us_ibdev->usdev_lock);
  576. BUG_ON(!list_empty(&context->qp_grp_list));
  577. list_del(&context->link);
  578. mutex_unlock(&us_ibdev->usdev_lock);
  579. kfree(context);
  580. return 0;
  581. }
  582. int usnic_ib_mmap(struct ib_ucontext *context,
  583. struct vm_area_struct *vma)
  584. {
  585. struct usnic_ib_ucontext *uctx = to_ucontext(context);
  586. struct usnic_ib_dev *us_ibdev;
  587. struct usnic_ib_qp_grp *qp_grp;
  588. struct usnic_ib_vf *vf;
  589. struct vnic_dev_bar *bar;
  590. dma_addr_t bus_addr;
  591. unsigned int len;
  592. unsigned int vfid;
  593. usnic_dbg("\n");
  594. us_ibdev = to_usdev(context->device);
  595. vma->vm_flags |= VM_IO;
  596. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  597. vfid = vma->vm_pgoff;
  598. usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
  599. vma->vm_pgoff, PAGE_SHIFT, vfid);
  600. mutex_lock(&us_ibdev->usdev_lock);
  601. list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) {
  602. vf = qp_grp->vf;
  603. if (usnic_vnic_get_index(vf->vnic) == vfid) {
  604. bar = usnic_vnic_get_bar(vf->vnic, 0);
  605. if ((vma->vm_end - vma->vm_start) != bar->len) {
  606. usnic_err("Bar0 Len %lu - Request map %lu\n",
  607. bar->len,
  608. vma->vm_end - vma->vm_start);
  609. mutex_unlock(&us_ibdev->usdev_lock);
  610. return -EINVAL;
  611. }
  612. bus_addr = bar->bus_addr;
  613. len = bar->len;
  614. usnic_dbg("bus: %pa vaddr: %p size: %ld\n",
  615. &bus_addr, bar->vaddr, bar->len);
  616. mutex_unlock(&us_ibdev->usdev_lock);
  617. return remap_pfn_range(vma,
  618. vma->vm_start,
  619. bus_addr >> PAGE_SHIFT,
  620. len, vma->vm_page_prot);
  621. }
  622. }
  623. mutex_unlock(&us_ibdev->usdev_lock);
  624. usnic_err("No VF %u found\n", vfid);
  625. return -EINVAL;
  626. }
  627. /* In ib callbacks section - Start of stub funcs */
  628. struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
  629. struct ib_ah_attr *ah_attr)
  630. {
  631. usnic_dbg("\n");
  632. return ERR_PTR(-EPERM);
  633. }
  634. int usnic_ib_destroy_ah(struct ib_ah *ah)
  635. {
  636. usnic_dbg("\n");
  637. return -EINVAL;
  638. }
  639. int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
  640. struct ib_send_wr **bad_wr)
  641. {
  642. usnic_dbg("\n");
  643. return -EINVAL;
  644. }
  645. int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
  646. struct ib_recv_wr **bad_wr)
  647. {
  648. usnic_dbg("\n");
  649. return -EINVAL;
  650. }
  651. int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
  652. struct ib_wc *wc)
  653. {
  654. usnic_dbg("\n");
  655. return -EINVAL;
  656. }
  657. int usnic_ib_req_notify_cq(struct ib_cq *cq,
  658. enum ib_cq_notify_flags flags)
  659. {
  660. usnic_dbg("\n");
  661. return -EINVAL;
  662. }
  663. struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc)
  664. {
  665. usnic_dbg("\n");
  666. return ERR_PTR(-ENOMEM);
  667. }
  668. /* In ib callbacks section - End of stub funcs */
  669. /* End of ib callbacks section */