rxe_verbs.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330
  1. /*
  2. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include "rxe.h"
  34. #include "rxe_loc.h"
  35. #include "rxe_queue.h"
  36. static int rxe_query_device(struct ib_device *dev,
  37. struct ib_device_attr *attr,
  38. struct ib_udata *uhw)
  39. {
  40. struct rxe_dev *rxe = to_rdev(dev);
  41. if (uhw->inlen || uhw->outlen)
  42. return -EINVAL;
  43. *attr = rxe->attr;
  44. return 0;
  45. }
  46. static void rxe_eth_speed_to_ib_speed(int speed, u8 *active_speed,
  47. u8 *active_width)
  48. {
  49. if (speed <= 1000) {
  50. *active_width = IB_WIDTH_1X;
  51. *active_speed = IB_SPEED_SDR;
  52. } else if (speed <= 10000) {
  53. *active_width = IB_WIDTH_1X;
  54. *active_speed = IB_SPEED_FDR10;
  55. } else if (speed <= 20000) {
  56. *active_width = IB_WIDTH_4X;
  57. *active_speed = IB_SPEED_DDR;
  58. } else if (speed <= 30000) {
  59. *active_width = IB_WIDTH_4X;
  60. *active_speed = IB_SPEED_QDR;
  61. } else if (speed <= 40000) {
  62. *active_width = IB_WIDTH_4X;
  63. *active_speed = IB_SPEED_FDR10;
  64. } else {
  65. *active_width = IB_WIDTH_4X;
  66. *active_speed = IB_SPEED_EDR;
  67. }
  68. }
  69. static int rxe_query_port(struct ib_device *dev,
  70. u8 port_num, struct ib_port_attr *attr)
  71. {
  72. struct rxe_dev *rxe = to_rdev(dev);
  73. struct rxe_port *port;
  74. u32 speed;
  75. if (unlikely(port_num != 1)) {
  76. pr_warn("invalid port_number %d\n", port_num);
  77. goto err1;
  78. }
  79. port = &rxe->port;
  80. *attr = port->attr;
  81. mutex_lock(&rxe->usdev_lock);
  82. if (rxe->ndev->ethtool_ops->get_link_ksettings) {
  83. struct ethtool_link_ksettings ks;
  84. rxe->ndev->ethtool_ops->get_link_ksettings(rxe->ndev, &ks);
  85. speed = ks.base.speed;
  86. } else if (rxe->ndev->ethtool_ops->get_settings) {
  87. struct ethtool_cmd cmd;
  88. rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd);
  89. speed = cmd.speed;
  90. } else {
  91. pr_warn("%s speed is unknown, defaulting to 1000\n", rxe->ndev->name);
  92. speed = 1000;
  93. }
  94. rxe_eth_speed_to_ib_speed(speed, &attr->active_speed, &attr->active_width);
  95. mutex_unlock(&rxe->usdev_lock);
  96. return 0;
  97. err1:
  98. return -EINVAL;
  99. }
  100. static int rxe_query_gid(struct ib_device *device,
  101. u8 port_num, int index, union ib_gid *gid)
  102. {
  103. int ret;
  104. if (index > RXE_PORT_GID_TBL_LEN)
  105. return -EINVAL;
  106. ret = ib_get_cached_gid(device, port_num, index, gid, NULL);
  107. if (ret == -EAGAIN) {
  108. memcpy(gid, &zgid, sizeof(*gid));
  109. return 0;
  110. }
  111. return ret;
  112. }
  113. static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int
  114. index, const union ib_gid *gid,
  115. const struct ib_gid_attr *attr, void **context)
  116. {
  117. if (index >= RXE_PORT_GID_TBL_LEN)
  118. return -EINVAL;
  119. return 0;
  120. }
  121. static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int
  122. index, void **context)
  123. {
  124. if (index >= RXE_PORT_GID_TBL_LEN)
  125. return -EINVAL;
  126. return 0;
  127. }
  128. static struct net_device *rxe_get_netdev(struct ib_device *device,
  129. u8 port_num)
  130. {
  131. struct rxe_dev *rxe = to_rdev(device);
  132. if (rxe->ndev) {
  133. dev_hold(rxe->ndev);
  134. return rxe->ndev;
  135. }
  136. return NULL;
  137. }
  138. static int rxe_query_pkey(struct ib_device *device,
  139. u8 port_num, u16 index, u16 *pkey)
  140. {
  141. struct rxe_dev *rxe = to_rdev(device);
  142. struct rxe_port *port;
  143. if (unlikely(port_num != 1)) {
  144. dev_warn(device->dma_device, "invalid port_num = %d\n",
  145. port_num);
  146. goto err1;
  147. }
  148. port = &rxe->port;
  149. if (unlikely(index >= port->attr.pkey_tbl_len)) {
  150. dev_warn(device->dma_device, "invalid index = %d\n",
  151. index);
  152. goto err1;
  153. }
  154. *pkey = port->pkey_tbl[index];
  155. return 0;
  156. err1:
  157. return -EINVAL;
  158. }
  159. static int rxe_modify_device(struct ib_device *dev,
  160. int mask, struct ib_device_modify *attr)
  161. {
  162. struct rxe_dev *rxe = to_rdev(dev);
  163. if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
  164. rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
  165. if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
  166. memcpy(rxe->ib_dev.node_desc,
  167. attr->node_desc, sizeof(rxe->ib_dev.node_desc));
  168. }
  169. return 0;
  170. }
  171. static int rxe_modify_port(struct ib_device *dev,
  172. u8 port_num, int mask, struct ib_port_modify *attr)
  173. {
  174. struct rxe_dev *rxe = to_rdev(dev);
  175. struct rxe_port *port;
  176. if (unlikely(port_num != 1)) {
  177. pr_warn("invalid port_num = %d\n", port_num);
  178. goto err1;
  179. }
  180. port = &rxe->port;
  181. port->attr.port_cap_flags |= attr->set_port_cap_mask;
  182. port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
  183. if (mask & IB_PORT_RESET_QKEY_CNTR)
  184. port->attr.qkey_viol_cntr = 0;
  185. return 0;
  186. err1:
  187. return -EINVAL;
  188. }
  189. static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
  190. u8 port_num)
  191. {
  192. struct rxe_dev *rxe = to_rdev(dev);
  193. return rxe->ifc_ops->link_layer(rxe, port_num);
  194. }
  195. static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
  196. struct ib_udata *udata)
  197. {
  198. struct rxe_dev *rxe = to_rdev(dev);
  199. struct rxe_ucontext *uc;
  200. uc = rxe_alloc(&rxe->uc_pool);
  201. return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
  202. }
  203. static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
  204. {
  205. struct rxe_ucontext *uc = to_ruc(ibuc);
  206. rxe_drop_ref(uc);
  207. return 0;
  208. }
  209. static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
  210. struct ib_port_immutable *immutable)
  211. {
  212. int err;
  213. struct ib_port_attr attr;
  214. err = rxe_query_port(dev, port_num, &attr);
  215. if (err)
  216. return err;
  217. immutable->pkey_tbl_len = attr.pkey_tbl_len;
  218. immutable->gid_tbl_len = attr.gid_tbl_len;
  219. immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
  220. immutable->max_mad_size = IB_MGMT_MAD_SIZE;
  221. return 0;
  222. }
  223. static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
  224. struct ib_ucontext *context,
  225. struct ib_udata *udata)
  226. {
  227. struct rxe_dev *rxe = to_rdev(dev);
  228. struct rxe_pd *pd;
  229. pd = rxe_alloc(&rxe->pd_pool);
  230. return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
  231. }
  232. static int rxe_dealloc_pd(struct ib_pd *ibpd)
  233. {
  234. struct rxe_pd *pd = to_rpd(ibpd);
  235. rxe_drop_ref(pd);
  236. return 0;
  237. }
  238. static int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr,
  239. struct rxe_av *av)
  240. {
  241. int err;
  242. union ib_gid sgid;
  243. struct ib_gid_attr sgid_attr;
  244. err = ib_get_cached_gid(&rxe->ib_dev, attr->port_num,
  245. attr->grh.sgid_index, &sgid,
  246. &sgid_attr);
  247. if (err) {
  248. pr_err("Failed to query sgid. err = %d\n", err);
  249. return err;
  250. }
  251. err = rxe_av_from_attr(rxe, attr->port_num, av, attr);
  252. if (!err)
  253. err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid);
  254. if (sgid_attr.ndev)
  255. dev_put(sgid_attr.ndev);
  256. return err;
  257. }
  258. static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
  259. {
  260. int err;
  261. struct rxe_dev *rxe = to_rdev(ibpd->device);
  262. struct rxe_pd *pd = to_rpd(ibpd);
  263. struct rxe_ah *ah;
  264. err = rxe_av_chk_attr(rxe, attr);
  265. if (err)
  266. goto err1;
  267. ah = rxe_alloc(&rxe->ah_pool);
  268. if (!ah) {
  269. err = -ENOMEM;
  270. goto err1;
  271. }
  272. rxe_add_ref(pd);
  273. ah->pd = pd;
  274. err = rxe_init_av(rxe, attr, &ah->av);
  275. if (err)
  276. goto err2;
  277. return &ah->ibah;
  278. err2:
  279. rxe_drop_ref(pd);
  280. rxe_drop_ref(ah);
  281. err1:
  282. return ERR_PTR(err);
  283. }
  284. static int rxe_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
  285. {
  286. int err;
  287. struct rxe_dev *rxe = to_rdev(ibah->device);
  288. struct rxe_ah *ah = to_rah(ibah);
  289. err = rxe_av_chk_attr(rxe, attr);
  290. if (err)
  291. return err;
  292. err = rxe_init_av(rxe, attr, &ah->av);
  293. if (err)
  294. return err;
  295. return 0;
  296. }
  297. static int rxe_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
  298. {
  299. struct rxe_dev *rxe = to_rdev(ibah->device);
  300. struct rxe_ah *ah = to_rah(ibah);
  301. rxe_av_to_attr(rxe, &ah->av, attr);
  302. return 0;
  303. }
  304. static int rxe_destroy_ah(struct ib_ah *ibah)
  305. {
  306. struct rxe_ah *ah = to_rah(ibah);
  307. rxe_drop_ref(ah->pd);
  308. rxe_drop_ref(ah);
  309. return 0;
  310. }
  311. static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
  312. {
  313. int err;
  314. int i;
  315. u32 length;
  316. struct rxe_recv_wqe *recv_wqe;
  317. int num_sge = ibwr->num_sge;
  318. if (unlikely(queue_full(rq->queue))) {
  319. err = -ENOMEM;
  320. goto err1;
  321. }
  322. if (unlikely(num_sge > rq->max_sge)) {
  323. err = -EINVAL;
  324. goto err1;
  325. }
  326. length = 0;
  327. for (i = 0; i < num_sge; i++)
  328. length += ibwr->sg_list[i].length;
  329. recv_wqe = producer_addr(rq->queue);
  330. recv_wqe->wr_id = ibwr->wr_id;
  331. recv_wqe->num_sge = num_sge;
  332. memcpy(recv_wqe->dma.sge, ibwr->sg_list,
  333. num_sge * sizeof(struct ib_sge));
  334. recv_wqe->dma.length = length;
  335. recv_wqe->dma.resid = length;
  336. recv_wqe->dma.num_sge = num_sge;
  337. recv_wqe->dma.cur_sge = 0;
  338. recv_wqe->dma.sge_offset = 0;
  339. /* make sure all changes to the work queue are written before we
  340. * update the producer pointer
  341. */
  342. smp_wmb();
  343. advance_producer(rq->queue);
  344. return 0;
  345. err1:
  346. return err;
  347. }
  348. static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
  349. struct ib_srq_init_attr *init,
  350. struct ib_udata *udata)
  351. {
  352. int err;
  353. struct rxe_dev *rxe = to_rdev(ibpd->device);
  354. struct rxe_pd *pd = to_rpd(ibpd);
  355. struct rxe_srq *srq;
  356. struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
  357. err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
  358. if (err)
  359. goto err1;
  360. srq = rxe_alloc(&rxe->srq_pool);
  361. if (!srq) {
  362. err = -ENOMEM;
  363. goto err1;
  364. }
  365. rxe_add_index(srq);
  366. rxe_add_ref(pd);
  367. srq->pd = pd;
  368. err = rxe_srq_from_init(rxe, srq, init, context, udata);
  369. if (err)
  370. goto err2;
  371. return &srq->ibsrq;
  372. err2:
  373. rxe_drop_ref(pd);
  374. rxe_drop_index(srq);
  375. rxe_drop_ref(srq);
  376. err1:
  377. return ERR_PTR(err);
  378. }
  379. static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
  380. enum ib_srq_attr_mask mask,
  381. struct ib_udata *udata)
  382. {
  383. int err;
  384. struct rxe_srq *srq = to_rsrq(ibsrq);
  385. struct rxe_dev *rxe = to_rdev(ibsrq->device);
  386. err = rxe_srq_chk_attr(rxe, srq, attr, mask);
  387. if (err)
  388. goto err1;
  389. err = rxe_srq_from_attr(rxe, srq, attr, mask, udata);
  390. if (err)
  391. goto err1;
  392. return 0;
  393. err1:
  394. return err;
  395. }
  396. static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
  397. {
  398. struct rxe_srq *srq = to_rsrq(ibsrq);
  399. if (srq->error)
  400. return -EINVAL;
  401. attr->max_wr = srq->rq.queue->buf->index_mask;
  402. attr->max_sge = srq->rq.max_sge;
  403. attr->srq_limit = srq->limit;
  404. return 0;
  405. }
  406. static int rxe_destroy_srq(struct ib_srq *ibsrq)
  407. {
  408. struct rxe_srq *srq = to_rsrq(ibsrq);
  409. if (srq->rq.queue)
  410. rxe_queue_cleanup(srq->rq.queue);
  411. rxe_drop_ref(srq->pd);
  412. rxe_drop_index(srq);
  413. rxe_drop_ref(srq);
  414. return 0;
  415. }
  416. static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
  417. struct ib_recv_wr **bad_wr)
  418. {
  419. int err = 0;
  420. unsigned long flags;
  421. struct rxe_srq *srq = to_rsrq(ibsrq);
  422. spin_lock_irqsave(&srq->rq.producer_lock, flags);
  423. while (wr) {
  424. err = post_one_recv(&srq->rq, wr);
  425. if (unlikely(err))
  426. break;
  427. wr = wr->next;
  428. }
  429. spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
  430. if (err)
  431. *bad_wr = wr;
  432. return err;
  433. }
  434. static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
  435. struct ib_qp_init_attr *init,
  436. struct ib_udata *udata)
  437. {
  438. int err;
  439. struct rxe_dev *rxe = to_rdev(ibpd->device);
  440. struct rxe_pd *pd = to_rpd(ibpd);
  441. struct rxe_qp *qp;
  442. err = rxe_qp_chk_init(rxe, init);
  443. if (err)
  444. goto err1;
  445. qp = rxe_alloc(&rxe->qp_pool);
  446. if (!qp) {
  447. err = -ENOMEM;
  448. goto err1;
  449. }
  450. if (udata) {
  451. if (udata->inlen) {
  452. err = -EINVAL;
  453. goto err1;
  454. }
  455. qp->is_user = 1;
  456. }
  457. rxe_add_index(qp);
  458. err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd);
  459. if (err)
  460. goto err2;
  461. return &qp->ibqp;
  462. err2:
  463. rxe_drop_index(qp);
  464. rxe_drop_ref(qp);
  465. err1:
  466. return ERR_PTR(err);
  467. }
  468. static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  469. int mask, struct ib_udata *udata)
  470. {
  471. int err;
  472. struct rxe_dev *rxe = to_rdev(ibqp->device);
  473. struct rxe_qp *qp = to_rqp(ibqp);
  474. err = rxe_qp_chk_attr(rxe, qp, attr, mask);
  475. if (err)
  476. goto err1;
  477. err = rxe_qp_from_attr(qp, attr, mask, udata);
  478. if (err)
  479. goto err1;
  480. return 0;
  481. err1:
  482. return err;
  483. }
  484. static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  485. int mask, struct ib_qp_init_attr *init)
  486. {
  487. struct rxe_qp *qp = to_rqp(ibqp);
  488. rxe_qp_to_init(qp, init);
  489. rxe_qp_to_attr(qp, attr, mask);
  490. return 0;
  491. }
  492. static int rxe_destroy_qp(struct ib_qp *ibqp)
  493. {
  494. struct rxe_qp *qp = to_rqp(ibqp);
  495. rxe_qp_destroy(qp);
  496. rxe_drop_index(qp);
  497. rxe_drop_ref(qp);
  498. return 0;
  499. }
  500. static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
  501. unsigned int mask, unsigned int length)
  502. {
  503. int num_sge = ibwr->num_sge;
  504. struct rxe_sq *sq = &qp->sq;
  505. if (unlikely(num_sge > sq->max_sge))
  506. goto err1;
  507. if (unlikely(mask & WR_ATOMIC_MASK)) {
  508. if (length < 8)
  509. goto err1;
  510. if (atomic_wr(ibwr)->remote_addr & 0x7)
  511. goto err1;
  512. }
  513. if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
  514. (length > sq->max_inline)))
  515. goto err1;
  516. return 0;
  517. err1:
  518. return -EINVAL;
  519. }
  520. static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
  521. struct ib_send_wr *ibwr)
  522. {
  523. wr->wr_id = ibwr->wr_id;
  524. wr->num_sge = ibwr->num_sge;
  525. wr->opcode = ibwr->opcode;
  526. wr->send_flags = ibwr->send_flags;
  527. if (qp_type(qp) == IB_QPT_UD ||
  528. qp_type(qp) == IB_QPT_SMI ||
  529. qp_type(qp) == IB_QPT_GSI) {
  530. wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
  531. wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
  532. if (qp_type(qp) == IB_QPT_GSI)
  533. wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
  534. if (wr->opcode == IB_WR_SEND_WITH_IMM)
  535. wr->ex.imm_data = ibwr->ex.imm_data;
  536. } else {
  537. switch (wr->opcode) {
  538. case IB_WR_RDMA_WRITE_WITH_IMM:
  539. wr->ex.imm_data = ibwr->ex.imm_data;
  540. case IB_WR_RDMA_READ:
  541. case IB_WR_RDMA_WRITE:
  542. wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
  543. wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
  544. break;
  545. case IB_WR_SEND_WITH_IMM:
  546. wr->ex.imm_data = ibwr->ex.imm_data;
  547. break;
  548. case IB_WR_SEND_WITH_INV:
  549. wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
  550. break;
  551. case IB_WR_ATOMIC_CMP_AND_SWP:
  552. case IB_WR_ATOMIC_FETCH_AND_ADD:
  553. wr->wr.atomic.remote_addr =
  554. atomic_wr(ibwr)->remote_addr;
  555. wr->wr.atomic.compare_add =
  556. atomic_wr(ibwr)->compare_add;
  557. wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
  558. wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
  559. break;
  560. case IB_WR_LOCAL_INV:
  561. wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
  562. break;
  563. case IB_WR_REG_MR:
  564. wr->wr.reg.mr = reg_wr(ibwr)->mr;
  565. wr->wr.reg.key = reg_wr(ibwr)->key;
  566. wr->wr.reg.access = reg_wr(ibwr)->access;
  567. break;
  568. default:
  569. break;
  570. }
  571. }
  572. }
  573. static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
  574. unsigned int mask, unsigned int length,
  575. struct rxe_send_wqe *wqe)
  576. {
  577. int num_sge = ibwr->num_sge;
  578. struct ib_sge *sge;
  579. int i;
  580. u8 *p;
  581. init_send_wr(qp, &wqe->wr, ibwr);
  582. if (qp_type(qp) == IB_QPT_UD ||
  583. qp_type(qp) == IB_QPT_SMI ||
  584. qp_type(qp) == IB_QPT_GSI)
  585. memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
  586. if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
  587. p = wqe->dma.inline_data;
  588. sge = ibwr->sg_list;
  589. for (i = 0; i < num_sge; i++, sge++) {
  590. if (qp->is_user && copy_from_user(p, (__user void *)
  591. (uintptr_t)sge->addr, sge->length))
  592. return -EFAULT;
  593. else if (!qp->is_user)
  594. memcpy(p, (void *)(uintptr_t)sge->addr,
  595. sge->length);
  596. p += sge->length;
  597. }
  598. } else if (mask & WR_REG_MASK) {
  599. wqe->mask = mask;
  600. wqe->state = wqe_state_posted;
  601. return 0;
  602. } else
  603. memcpy(wqe->dma.sge, ibwr->sg_list,
  604. num_sge * sizeof(struct ib_sge));
  605. wqe->iova = (mask & WR_ATOMIC_MASK) ?
  606. atomic_wr(ibwr)->remote_addr :
  607. rdma_wr(ibwr)->remote_addr;
  608. wqe->mask = mask;
  609. wqe->dma.length = length;
  610. wqe->dma.resid = length;
  611. wqe->dma.num_sge = num_sge;
  612. wqe->dma.cur_sge = 0;
  613. wqe->dma.sge_offset = 0;
  614. wqe->state = wqe_state_posted;
  615. wqe->ssn = atomic_add_return(1, &qp->ssn);
  616. return 0;
  617. }
  618. static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
  619. unsigned mask, u32 length)
  620. {
  621. int err;
  622. struct rxe_sq *sq = &qp->sq;
  623. struct rxe_send_wqe *send_wqe;
  624. unsigned long flags;
  625. err = validate_send_wr(qp, ibwr, mask, length);
  626. if (err)
  627. return err;
  628. spin_lock_irqsave(&qp->sq.sq_lock, flags);
  629. if (unlikely(queue_full(sq->queue))) {
  630. err = -ENOMEM;
  631. goto err1;
  632. }
  633. send_wqe = producer_addr(sq->queue);
  634. err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
  635. if (unlikely(err))
  636. goto err1;
  637. /*
  638. * make sure all changes to the work queue are
  639. * written before we update the producer pointer
  640. */
  641. smp_wmb();
  642. advance_producer(sq->queue);
  643. spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
  644. return 0;
  645. err1:
  646. spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
  647. return err;
  648. }
  649. static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
  650. struct ib_send_wr **bad_wr)
  651. {
  652. int err = 0;
  653. struct rxe_qp *qp = to_rqp(ibqp);
  654. unsigned int mask;
  655. unsigned int length = 0;
  656. int i;
  657. int must_sched;
  658. if (unlikely(!qp->valid)) {
  659. *bad_wr = wr;
  660. return -EINVAL;
  661. }
  662. if (unlikely(qp->req.state < QP_STATE_READY)) {
  663. *bad_wr = wr;
  664. return -EINVAL;
  665. }
  666. while (wr) {
  667. mask = wr_opcode_mask(wr->opcode, qp);
  668. if (unlikely(!mask)) {
  669. err = -EINVAL;
  670. *bad_wr = wr;
  671. break;
  672. }
  673. if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
  674. !(mask & WR_INLINE_MASK))) {
  675. err = -EINVAL;
  676. *bad_wr = wr;
  677. break;
  678. }
  679. length = 0;
  680. for (i = 0; i < wr->num_sge; i++)
  681. length += wr->sg_list[i].length;
  682. err = post_one_send(qp, wr, mask, length);
  683. if (err) {
  684. *bad_wr = wr;
  685. break;
  686. }
  687. wr = wr->next;
  688. }
  689. /*
  690. * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
  691. * and the requester call ip_local_out_sk() that takes spin_lock_bh.
  692. */
  693. must_sched = (qp_type(qp) == IB_QPT_GSI) ||
  694. (queue_count(qp->sq.queue) > 1);
  695. rxe_run_task(&qp->req.task, must_sched);
  696. return err;
  697. }
  698. static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
  699. struct ib_recv_wr **bad_wr)
  700. {
  701. int err = 0;
  702. struct rxe_qp *qp = to_rqp(ibqp);
  703. struct rxe_rq *rq = &qp->rq;
  704. unsigned long flags;
  705. if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
  706. *bad_wr = wr;
  707. err = -EINVAL;
  708. goto err1;
  709. }
  710. if (unlikely(qp->srq)) {
  711. *bad_wr = wr;
  712. err = -EINVAL;
  713. goto err1;
  714. }
  715. spin_lock_irqsave(&rq->producer_lock, flags);
  716. while (wr) {
  717. err = post_one_recv(rq, wr);
  718. if (unlikely(err)) {
  719. *bad_wr = wr;
  720. break;
  721. }
  722. wr = wr->next;
  723. }
  724. spin_unlock_irqrestore(&rq->producer_lock, flags);
  725. err1:
  726. return err;
  727. }
  728. static struct ib_cq *rxe_create_cq(struct ib_device *dev,
  729. const struct ib_cq_init_attr *attr,
  730. struct ib_ucontext *context,
  731. struct ib_udata *udata)
  732. {
  733. int err;
  734. struct rxe_dev *rxe = to_rdev(dev);
  735. struct rxe_cq *cq;
  736. if (attr->flags)
  737. return ERR_PTR(-EINVAL);
  738. err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector, udata);
  739. if (err)
  740. goto err1;
  741. cq = rxe_alloc(&rxe->cq_pool);
  742. if (!cq) {
  743. err = -ENOMEM;
  744. goto err1;
  745. }
  746. err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
  747. context, udata);
  748. if (err)
  749. goto err2;
  750. return &cq->ibcq;
  751. err2:
  752. rxe_drop_ref(cq);
  753. err1:
  754. return ERR_PTR(err);
  755. }
  756. static int rxe_destroy_cq(struct ib_cq *ibcq)
  757. {
  758. struct rxe_cq *cq = to_rcq(ibcq);
  759. rxe_drop_ref(cq);
  760. return 0;
  761. }
  762. static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
  763. {
  764. int err;
  765. struct rxe_cq *cq = to_rcq(ibcq);
  766. struct rxe_dev *rxe = to_rdev(ibcq->device);
  767. err = rxe_cq_chk_attr(rxe, cq, cqe, 0, udata);
  768. if (err)
  769. goto err1;
  770. err = rxe_cq_resize_queue(cq, cqe, udata);
  771. if (err)
  772. goto err1;
  773. return 0;
  774. err1:
  775. return err;
  776. }
  777. static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
  778. {
  779. int i;
  780. struct rxe_cq *cq = to_rcq(ibcq);
  781. struct rxe_cqe *cqe;
  782. unsigned long flags;
  783. spin_lock_irqsave(&cq->cq_lock, flags);
  784. for (i = 0; i < num_entries; i++) {
  785. cqe = queue_head(cq->queue);
  786. if (!cqe)
  787. break;
  788. memcpy(wc++, &cqe->ibwc, sizeof(*wc));
  789. advance_consumer(cq->queue);
  790. }
  791. spin_unlock_irqrestore(&cq->cq_lock, flags);
  792. return i;
  793. }
  794. static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
  795. {
  796. struct rxe_cq *cq = to_rcq(ibcq);
  797. int count = queue_count(cq->queue);
  798. return (count > wc_cnt) ? wc_cnt : count;
  799. }
  800. static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
  801. {
  802. struct rxe_cq *cq = to_rcq(ibcq);
  803. if (cq->notify != IB_CQ_NEXT_COMP)
  804. cq->notify = flags & IB_CQ_SOLICITED_MASK;
  805. return 0;
  806. }
  807. static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
  808. {
  809. struct rxe_dev *rxe = to_rdev(ibpd->device);
  810. struct rxe_pd *pd = to_rpd(ibpd);
  811. struct rxe_mem *mr;
  812. int err;
  813. mr = rxe_alloc(&rxe->mr_pool);
  814. if (!mr) {
  815. err = -ENOMEM;
  816. goto err1;
  817. }
  818. rxe_add_index(mr);
  819. rxe_add_ref(pd);
  820. err = rxe_mem_init_dma(rxe, pd, access, mr);
  821. if (err)
  822. goto err2;
  823. return &mr->ibmr;
  824. err2:
  825. rxe_drop_ref(pd);
  826. rxe_drop_index(mr);
  827. rxe_drop_ref(mr);
  828. err1:
  829. return ERR_PTR(err);
  830. }
  831. static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
  832. u64 start,
  833. u64 length,
  834. u64 iova,
  835. int access, struct ib_udata *udata)
  836. {
  837. int err;
  838. struct rxe_dev *rxe = to_rdev(ibpd->device);
  839. struct rxe_pd *pd = to_rpd(ibpd);
  840. struct rxe_mem *mr;
  841. mr = rxe_alloc(&rxe->mr_pool);
  842. if (!mr) {
  843. err = -ENOMEM;
  844. goto err2;
  845. }
  846. rxe_add_index(mr);
  847. rxe_add_ref(pd);
  848. err = rxe_mem_init_user(rxe, pd, start, length, iova,
  849. access, udata, mr);
  850. if (err)
  851. goto err3;
  852. return &mr->ibmr;
  853. err3:
  854. rxe_drop_ref(pd);
  855. rxe_drop_index(mr);
  856. rxe_drop_ref(mr);
  857. err2:
  858. return ERR_PTR(err);
  859. }
  860. static int rxe_dereg_mr(struct ib_mr *ibmr)
  861. {
  862. struct rxe_mem *mr = to_rmr(ibmr);
  863. mr->state = RXE_MEM_STATE_ZOMBIE;
  864. rxe_drop_ref(mr->pd);
  865. rxe_drop_index(mr);
  866. rxe_drop_ref(mr);
  867. return 0;
  868. }
  869. static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
  870. enum ib_mr_type mr_type,
  871. u32 max_num_sg)
  872. {
  873. struct rxe_dev *rxe = to_rdev(ibpd->device);
  874. struct rxe_pd *pd = to_rpd(ibpd);
  875. struct rxe_mem *mr;
  876. int err;
  877. if (mr_type != IB_MR_TYPE_MEM_REG)
  878. return ERR_PTR(-EINVAL);
  879. mr = rxe_alloc(&rxe->mr_pool);
  880. if (!mr) {
  881. err = -ENOMEM;
  882. goto err1;
  883. }
  884. rxe_add_index(mr);
  885. rxe_add_ref(pd);
  886. err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
  887. if (err)
  888. goto err2;
  889. return &mr->ibmr;
  890. err2:
  891. rxe_drop_ref(pd);
  892. rxe_drop_index(mr);
  893. rxe_drop_ref(mr);
  894. err1:
  895. return ERR_PTR(err);
  896. }
  897. static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
  898. {
  899. struct rxe_mem *mr = to_rmr(ibmr);
  900. struct rxe_map *map;
  901. struct rxe_phys_buf *buf;
  902. if (unlikely(mr->nbuf == mr->num_buf))
  903. return -ENOMEM;
  904. map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
  905. buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
  906. buf->addr = addr;
  907. buf->size = ibmr->page_size;
  908. mr->nbuf++;
  909. return 0;
  910. }
  911. static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
  912. unsigned int *sg_offset)
  913. {
  914. struct rxe_mem *mr = to_rmr(ibmr);
  915. int n;
  916. mr->nbuf = 0;
  917. n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
  918. mr->va = ibmr->iova;
  919. mr->iova = ibmr->iova;
  920. mr->length = ibmr->length;
  921. mr->page_shift = ilog2(ibmr->page_size);
  922. mr->page_mask = ibmr->page_size - 1;
  923. mr->offset = mr->iova & mr->page_mask;
  924. return n;
  925. }
  926. static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
  927. {
  928. int err;
  929. struct rxe_dev *rxe = to_rdev(ibqp->device);
  930. struct rxe_qp *qp = to_rqp(ibqp);
  931. struct rxe_mc_grp *grp;
  932. /* takes a ref on grp if successful */
  933. err = rxe_mcast_get_grp(rxe, mgid, &grp);
  934. if (err)
  935. return err;
  936. err = rxe_mcast_add_grp_elem(rxe, qp, grp);
  937. rxe_drop_ref(grp);
  938. return err;
  939. }
  940. static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
  941. {
  942. struct rxe_dev *rxe = to_rdev(ibqp->device);
  943. struct rxe_qp *qp = to_rqp(ibqp);
  944. return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
  945. }
  946. static ssize_t rxe_show_parent(struct device *device,
  947. struct device_attribute *attr, char *buf)
  948. {
  949. struct rxe_dev *rxe = container_of(device, struct rxe_dev,
  950. ib_dev.dev);
  951. char *name;
  952. name = rxe->ifc_ops->parent_name(rxe, 1);
  953. return snprintf(buf, 16, "%s\n", name);
  954. }
  955. static DEVICE_ATTR(parent, S_IRUGO, rxe_show_parent, NULL);
  956. static struct device_attribute *rxe_dev_attributes[] = {
  957. &dev_attr_parent,
  958. };
  959. int rxe_register_device(struct rxe_dev *rxe)
  960. {
  961. int err;
  962. int i;
  963. struct ib_device *dev = &rxe->ib_dev;
  964. strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
  965. strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
  966. dev->owner = THIS_MODULE;
  967. dev->node_type = RDMA_NODE_IB_CA;
  968. dev->phys_port_cnt = 1;
  969. dev->num_comp_vectors = RXE_NUM_COMP_VECTORS;
  970. dev->dma_device = rxe->ifc_ops->dma_device(rxe);
  971. dev->local_dma_lkey = 0;
  972. dev->node_guid = rxe->ifc_ops->node_guid(rxe);
  973. dev->dma_ops = &rxe_dma_mapping_ops;
  974. dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
  975. dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
  976. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
  977. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
  978. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
  979. | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
  980. | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
  981. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
  982. | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
  983. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
  984. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
  985. | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
  986. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
  987. | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
  988. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
  989. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
  990. | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
  991. | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
  992. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
  993. | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
  994. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
  995. | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
  996. | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
  997. | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
  998. | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
  999. | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
  1000. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
  1001. | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
  1002. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
  1003. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
  1004. | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
  1005. | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
  1006. ;
  1007. dev->query_device = rxe_query_device;
  1008. dev->modify_device = rxe_modify_device;
  1009. dev->query_port = rxe_query_port;
  1010. dev->modify_port = rxe_modify_port;
  1011. dev->get_link_layer = rxe_get_link_layer;
  1012. dev->query_gid = rxe_query_gid;
  1013. dev->get_netdev = rxe_get_netdev;
  1014. dev->add_gid = rxe_add_gid;
  1015. dev->del_gid = rxe_del_gid;
  1016. dev->query_pkey = rxe_query_pkey;
  1017. dev->alloc_ucontext = rxe_alloc_ucontext;
  1018. dev->dealloc_ucontext = rxe_dealloc_ucontext;
  1019. dev->mmap = rxe_mmap;
  1020. dev->get_port_immutable = rxe_port_immutable;
  1021. dev->alloc_pd = rxe_alloc_pd;
  1022. dev->dealloc_pd = rxe_dealloc_pd;
  1023. dev->create_ah = rxe_create_ah;
  1024. dev->modify_ah = rxe_modify_ah;
  1025. dev->query_ah = rxe_query_ah;
  1026. dev->destroy_ah = rxe_destroy_ah;
  1027. dev->create_srq = rxe_create_srq;
  1028. dev->modify_srq = rxe_modify_srq;
  1029. dev->query_srq = rxe_query_srq;
  1030. dev->destroy_srq = rxe_destroy_srq;
  1031. dev->post_srq_recv = rxe_post_srq_recv;
  1032. dev->create_qp = rxe_create_qp;
  1033. dev->modify_qp = rxe_modify_qp;
  1034. dev->query_qp = rxe_query_qp;
  1035. dev->destroy_qp = rxe_destroy_qp;
  1036. dev->post_send = rxe_post_send;
  1037. dev->post_recv = rxe_post_recv;
  1038. dev->create_cq = rxe_create_cq;
  1039. dev->destroy_cq = rxe_destroy_cq;
  1040. dev->resize_cq = rxe_resize_cq;
  1041. dev->poll_cq = rxe_poll_cq;
  1042. dev->peek_cq = rxe_peek_cq;
  1043. dev->req_notify_cq = rxe_req_notify_cq;
  1044. dev->get_dma_mr = rxe_get_dma_mr;
  1045. dev->reg_user_mr = rxe_reg_user_mr;
  1046. dev->dereg_mr = rxe_dereg_mr;
  1047. dev->alloc_mr = rxe_alloc_mr;
  1048. dev->map_mr_sg = rxe_map_mr_sg;
  1049. dev->attach_mcast = rxe_attach_mcast;
  1050. dev->detach_mcast = rxe_detach_mcast;
  1051. err = ib_register_device(dev, NULL);
  1052. if (err) {
  1053. pr_warn("rxe_register_device failed, err = %d\n", err);
  1054. goto err1;
  1055. }
  1056. for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
  1057. err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
  1058. if (err) {
  1059. pr_warn("device_create_file failed, i = %d, err = %d\n",
  1060. i, err);
  1061. goto err2;
  1062. }
  1063. }
  1064. return 0;
  1065. err2:
  1066. ib_unregister_device(dev);
  1067. err1:
  1068. return err;
  1069. }
  1070. int rxe_unregister_device(struct rxe_dev *rxe)
  1071. {
  1072. int i;
  1073. struct ib_device *dev = &rxe->ib_dev;
  1074. for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
  1075. device_remove_file(&dev->dev, rxe_dev_attributes[i]);
  1076. ib_unregister_device(dev);
  1077. return 0;
  1078. }