rxe_verbs.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336
  1. /*
  2. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/dma-mapping.h>
  34. #include <net/addrconf.h>
  35. #include "rxe.h"
  36. #include "rxe_loc.h"
  37. #include "rxe_queue.h"
  38. #include "rxe_hw_counters.h"
  39. static int rxe_query_device(struct ib_device *dev,
  40. struct ib_device_attr *attr,
  41. struct ib_udata *uhw)
  42. {
  43. struct rxe_dev *rxe = to_rdev(dev);
  44. if (uhw->inlen || uhw->outlen)
  45. return -EINVAL;
  46. *attr = rxe->attr;
  47. return 0;
  48. }
  49. static int rxe_query_port(struct ib_device *dev,
  50. u8 port_num, struct ib_port_attr *attr)
  51. {
  52. struct rxe_dev *rxe = to_rdev(dev);
  53. struct rxe_port *port;
  54. int rc = -EINVAL;
  55. if (unlikely(port_num != 1)) {
  56. pr_warn("invalid port_number %d\n", port_num);
  57. goto out;
  58. }
  59. port = &rxe->port;
  60. /* *attr being zeroed by the caller, avoid zeroing it here */
  61. *attr = port->attr;
  62. mutex_lock(&rxe->usdev_lock);
  63. rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
  64. &attr->active_width);
  65. mutex_unlock(&rxe->usdev_lock);
  66. out:
  67. return rc;
  68. }
  69. static struct net_device *rxe_get_netdev(struct ib_device *device,
  70. u8 port_num)
  71. {
  72. struct rxe_dev *rxe = to_rdev(device);
  73. if (rxe->ndev) {
  74. dev_hold(rxe->ndev);
  75. return rxe->ndev;
  76. }
  77. return NULL;
  78. }
  79. static int rxe_query_pkey(struct ib_device *device,
  80. u8 port_num, u16 index, u16 *pkey)
  81. {
  82. struct rxe_dev *rxe = to_rdev(device);
  83. struct rxe_port *port;
  84. if (unlikely(port_num != 1)) {
  85. dev_warn(device->dev.parent, "invalid port_num = %d\n",
  86. port_num);
  87. goto err1;
  88. }
  89. port = &rxe->port;
  90. if (unlikely(index >= port->attr.pkey_tbl_len)) {
  91. dev_warn(device->dev.parent, "invalid index = %d\n",
  92. index);
  93. goto err1;
  94. }
  95. *pkey = port->pkey_tbl[index];
  96. return 0;
  97. err1:
  98. return -EINVAL;
  99. }
  100. static int rxe_modify_device(struct ib_device *dev,
  101. int mask, struct ib_device_modify *attr)
  102. {
  103. struct rxe_dev *rxe = to_rdev(dev);
  104. if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
  105. rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
  106. if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
  107. memcpy(rxe->ib_dev.node_desc,
  108. attr->node_desc, sizeof(rxe->ib_dev.node_desc));
  109. }
  110. return 0;
  111. }
  112. static int rxe_modify_port(struct ib_device *dev,
  113. u8 port_num, int mask, struct ib_port_modify *attr)
  114. {
  115. struct rxe_dev *rxe = to_rdev(dev);
  116. struct rxe_port *port;
  117. if (unlikely(port_num != 1)) {
  118. pr_warn("invalid port_num = %d\n", port_num);
  119. goto err1;
  120. }
  121. port = &rxe->port;
  122. port->attr.port_cap_flags |= attr->set_port_cap_mask;
  123. port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
  124. if (mask & IB_PORT_RESET_QKEY_CNTR)
  125. port->attr.qkey_viol_cntr = 0;
  126. return 0;
  127. err1:
  128. return -EINVAL;
  129. }
  130. static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
  131. u8 port_num)
  132. {
  133. struct rxe_dev *rxe = to_rdev(dev);
  134. return rxe_link_layer(rxe, port_num);
  135. }
  136. static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
  137. struct ib_udata *udata)
  138. {
  139. struct rxe_dev *rxe = to_rdev(dev);
  140. struct rxe_ucontext *uc;
  141. uc = rxe_alloc(&rxe->uc_pool);
  142. return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
  143. }
  144. static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
  145. {
  146. struct rxe_ucontext *uc = to_ruc(ibuc);
  147. rxe_drop_ref(uc);
  148. return 0;
  149. }
  150. static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
  151. struct ib_port_immutable *immutable)
  152. {
  153. int err;
  154. struct ib_port_attr attr;
  155. immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
  156. err = ib_query_port(dev, port_num, &attr);
  157. if (err)
  158. return err;
  159. immutable->pkey_tbl_len = attr.pkey_tbl_len;
  160. immutable->gid_tbl_len = attr.gid_tbl_len;
  161. immutable->max_mad_size = IB_MGMT_MAD_SIZE;
  162. return 0;
  163. }
  164. static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
  165. struct ib_ucontext *context,
  166. struct ib_udata *udata)
  167. {
  168. struct rxe_dev *rxe = to_rdev(dev);
  169. struct rxe_pd *pd;
  170. pd = rxe_alloc(&rxe->pd_pool);
  171. return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
  172. }
  173. static int rxe_dealloc_pd(struct ib_pd *ibpd)
  174. {
  175. struct rxe_pd *pd = to_rpd(ibpd);
  176. rxe_drop_ref(pd);
  177. return 0;
  178. }
  179. static int rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr,
  180. struct rxe_av *av)
  181. {
  182. int err;
  183. union ib_gid sgid;
  184. struct ib_gid_attr sgid_attr;
  185. err = ib_get_cached_gid(&rxe->ib_dev, rdma_ah_get_port_num(attr),
  186. rdma_ah_read_grh(attr)->sgid_index, &sgid,
  187. &sgid_attr);
  188. if (err) {
  189. pr_err("Failed to query sgid. err = %d\n", err);
  190. return err;
  191. }
  192. rxe_av_from_attr(rdma_ah_get_port_num(attr), av, attr);
  193. rxe_av_fill_ip_info(av, attr, &sgid_attr, &sgid);
  194. dev_put(sgid_attr.ndev);
  195. return 0;
  196. }
  197. static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
  198. struct rdma_ah_attr *attr,
  199. struct ib_udata *udata)
  200. {
  201. int err;
  202. struct rxe_dev *rxe = to_rdev(ibpd->device);
  203. struct rxe_pd *pd = to_rpd(ibpd);
  204. struct rxe_ah *ah;
  205. err = rxe_av_chk_attr(rxe, attr);
  206. if (err)
  207. goto err1;
  208. ah = rxe_alloc(&rxe->ah_pool);
  209. if (!ah) {
  210. err = -ENOMEM;
  211. goto err1;
  212. }
  213. rxe_add_ref(pd);
  214. ah->pd = pd;
  215. err = rxe_init_av(rxe, attr, &ah->av);
  216. if (err)
  217. goto err2;
  218. return &ah->ibah;
  219. err2:
  220. rxe_drop_ref(pd);
  221. rxe_drop_ref(ah);
  222. err1:
  223. return ERR_PTR(err);
  224. }
  225. static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
  226. {
  227. int err;
  228. struct rxe_dev *rxe = to_rdev(ibah->device);
  229. struct rxe_ah *ah = to_rah(ibah);
  230. err = rxe_av_chk_attr(rxe, attr);
  231. if (err)
  232. return err;
  233. err = rxe_init_av(rxe, attr, &ah->av);
  234. if (err)
  235. return err;
  236. return 0;
  237. }
  238. static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
  239. {
  240. struct rxe_ah *ah = to_rah(ibah);
  241. memset(attr, 0, sizeof(*attr));
  242. attr->type = ibah->type;
  243. rxe_av_to_attr(&ah->av, attr);
  244. return 0;
  245. }
  246. static int rxe_destroy_ah(struct ib_ah *ibah)
  247. {
  248. struct rxe_ah *ah = to_rah(ibah);
  249. rxe_drop_ref(ah->pd);
  250. rxe_drop_ref(ah);
  251. return 0;
  252. }
  253. static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
  254. {
  255. int err;
  256. int i;
  257. u32 length;
  258. struct rxe_recv_wqe *recv_wqe;
  259. int num_sge = ibwr->num_sge;
  260. if (unlikely(queue_full(rq->queue))) {
  261. err = -ENOMEM;
  262. goto err1;
  263. }
  264. if (unlikely(num_sge > rq->max_sge)) {
  265. err = -EINVAL;
  266. goto err1;
  267. }
  268. length = 0;
  269. for (i = 0; i < num_sge; i++)
  270. length += ibwr->sg_list[i].length;
  271. recv_wqe = producer_addr(rq->queue);
  272. recv_wqe->wr_id = ibwr->wr_id;
  273. recv_wqe->num_sge = num_sge;
  274. memcpy(recv_wqe->dma.sge, ibwr->sg_list,
  275. num_sge * sizeof(struct ib_sge));
  276. recv_wqe->dma.length = length;
  277. recv_wqe->dma.resid = length;
  278. recv_wqe->dma.num_sge = num_sge;
  279. recv_wqe->dma.cur_sge = 0;
  280. recv_wqe->dma.sge_offset = 0;
  281. /* make sure all changes to the work queue are written before we
  282. * update the producer pointer
  283. */
  284. smp_wmb();
  285. advance_producer(rq->queue);
  286. return 0;
  287. err1:
  288. return err;
  289. }
  290. static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
  291. struct ib_srq_init_attr *init,
  292. struct ib_udata *udata)
  293. {
  294. int err;
  295. struct rxe_dev *rxe = to_rdev(ibpd->device);
  296. struct rxe_pd *pd = to_rpd(ibpd);
  297. struct rxe_srq *srq;
  298. struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
  299. struct rxe_create_srq_resp __user *uresp = NULL;
  300. if (udata) {
  301. if (udata->outlen < sizeof(*uresp))
  302. return ERR_PTR(-EINVAL);
  303. uresp = udata->outbuf;
  304. }
  305. err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
  306. if (err)
  307. goto err1;
  308. srq = rxe_alloc(&rxe->srq_pool);
  309. if (!srq) {
  310. err = -ENOMEM;
  311. goto err1;
  312. }
  313. rxe_add_index(srq);
  314. rxe_add_ref(pd);
  315. srq->pd = pd;
  316. err = rxe_srq_from_init(rxe, srq, init, context, uresp);
  317. if (err)
  318. goto err2;
  319. return &srq->ibsrq;
  320. err2:
  321. rxe_drop_ref(pd);
  322. rxe_drop_index(srq);
  323. rxe_drop_ref(srq);
  324. err1:
  325. return ERR_PTR(err);
  326. }
  327. static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
  328. enum ib_srq_attr_mask mask,
  329. struct ib_udata *udata)
  330. {
  331. int err;
  332. struct rxe_srq *srq = to_rsrq(ibsrq);
  333. struct rxe_dev *rxe = to_rdev(ibsrq->device);
  334. struct rxe_modify_srq_cmd ucmd = {};
  335. if (udata) {
  336. if (udata->inlen < sizeof(ucmd))
  337. return -EINVAL;
  338. err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
  339. if (err)
  340. return err;
  341. }
  342. err = rxe_srq_chk_attr(rxe, srq, attr, mask);
  343. if (err)
  344. goto err1;
  345. err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd);
  346. if (err)
  347. goto err1;
  348. return 0;
  349. err1:
  350. return err;
  351. }
  352. static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
  353. {
  354. struct rxe_srq *srq = to_rsrq(ibsrq);
  355. if (srq->error)
  356. return -EINVAL;
  357. attr->max_wr = srq->rq.queue->buf->index_mask;
  358. attr->max_sge = srq->rq.max_sge;
  359. attr->srq_limit = srq->limit;
  360. return 0;
  361. }
  362. static int rxe_destroy_srq(struct ib_srq *ibsrq)
  363. {
  364. struct rxe_srq *srq = to_rsrq(ibsrq);
  365. if (srq->rq.queue)
  366. rxe_queue_cleanup(srq->rq.queue);
  367. rxe_drop_ref(srq->pd);
  368. rxe_drop_index(srq);
  369. rxe_drop_ref(srq);
  370. return 0;
  371. }
  372. static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
  373. struct ib_recv_wr **bad_wr)
  374. {
  375. int err = 0;
  376. unsigned long flags;
  377. struct rxe_srq *srq = to_rsrq(ibsrq);
  378. spin_lock_irqsave(&srq->rq.producer_lock, flags);
  379. while (wr) {
  380. err = post_one_recv(&srq->rq, wr);
  381. if (unlikely(err))
  382. break;
  383. wr = wr->next;
  384. }
  385. spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
  386. if (err)
  387. *bad_wr = wr;
  388. return err;
  389. }
  390. static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
  391. struct ib_qp_init_attr *init,
  392. struct ib_udata *udata)
  393. {
  394. int err;
  395. struct rxe_dev *rxe = to_rdev(ibpd->device);
  396. struct rxe_pd *pd = to_rpd(ibpd);
  397. struct rxe_qp *qp;
  398. struct rxe_create_qp_resp __user *uresp = NULL;
  399. if (udata) {
  400. if (udata->outlen < sizeof(*uresp))
  401. return ERR_PTR(-EINVAL);
  402. uresp = udata->outbuf;
  403. }
  404. err = rxe_qp_chk_init(rxe, init);
  405. if (err)
  406. goto err1;
  407. qp = rxe_alloc(&rxe->qp_pool);
  408. if (!qp) {
  409. err = -ENOMEM;
  410. goto err1;
  411. }
  412. if (udata) {
  413. if (udata->inlen) {
  414. err = -EINVAL;
  415. goto err2;
  416. }
  417. qp->is_user = 1;
  418. }
  419. rxe_add_index(qp);
  420. err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd);
  421. if (err)
  422. goto err3;
  423. return &qp->ibqp;
  424. err3:
  425. rxe_drop_index(qp);
  426. err2:
  427. rxe_drop_ref(qp);
  428. err1:
  429. return ERR_PTR(err);
  430. }
  431. static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  432. int mask, struct ib_udata *udata)
  433. {
  434. int err;
  435. struct rxe_dev *rxe = to_rdev(ibqp->device);
  436. struct rxe_qp *qp = to_rqp(ibqp);
  437. err = rxe_qp_chk_attr(rxe, qp, attr, mask);
  438. if (err)
  439. goto err1;
  440. err = rxe_qp_from_attr(qp, attr, mask, udata);
  441. if (err)
  442. goto err1;
  443. return 0;
  444. err1:
  445. return err;
  446. }
  447. static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  448. int mask, struct ib_qp_init_attr *init)
  449. {
  450. struct rxe_qp *qp = to_rqp(ibqp);
  451. rxe_qp_to_init(qp, init);
  452. rxe_qp_to_attr(qp, attr, mask);
  453. return 0;
  454. }
  455. static int rxe_destroy_qp(struct ib_qp *ibqp)
  456. {
  457. struct rxe_qp *qp = to_rqp(ibqp);
  458. rxe_qp_destroy(qp);
  459. rxe_drop_index(qp);
  460. rxe_drop_ref(qp);
  461. return 0;
  462. }
  463. static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
  464. unsigned int mask, unsigned int length)
  465. {
  466. int num_sge = ibwr->num_sge;
  467. struct rxe_sq *sq = &qp->sq;
  468. if (unlikely(num_sge > sq->max_sge))
  469. goto err1;
  470. if (unlikely(mask & WR_ATOMIC_MASK)) {
  471. if (length < 8)
  472. goto err1;
  473. if (atomic_wr(ibwr)->remote_addr & 0x7)
  474. goto err1;
  475. }
  476. if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
  477. (length > sq->max_inline)))
  478. goto err1;
  479. return 0;
  480. err1:
  481. return -EINVAL;
  482. }
  483. static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
  484. struct ib_send_wr *ibwr)
  485. {
  486. wr->wr_id = ibwr->wr_id;
  487. wr->num_sge = ibwr->num_sge;
  488. wr->opcode = ibwr->opcode;
  489. wr->send_flags = ibwr->send_flags;
  490. if (qp_type(qp) == IB_QPT_UD ||
  491. qp_type(qp) == IB_QPT_SMI ||
  492. qp_type(qp) == IB_QPT_GSI) {
  493. wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
  494. wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
  495. if (qp_type(qp) == IB_QPT_GSI)
  496. wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
  497. if (wr->opcode == IB_WR_SEND_WITH_IMM)
  498. wr->ex.imm_data = ibwr->ex.imm_data;
  499. } else {
  500. switch (wr->opcode) {
  501. case IB_WR_RDMA_WRITE_WITH_IMM:
  502. wr->ex.imm_data = ibwr->ex.imm_data;
  503. /* fall through */
  504. case IB_WR_RDMA_READ:
  505. case IB_WR_RDMA_WRITE:
  506. wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
  507. wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
  508. break;
  509. case IB_WR_SEND_WITH_IMM:
  510. wr->ex.imm_data = ibwr->ex.imm_data;
  511. break;
  512. case IB_WR_SEND_WITH_INV:
  513. wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
  514. break;
  515. case IB_WR_ATOMIC_CMP_AND_SWP:
  516. case IB_WR_ATOMIC_FETCH_AND_ADD:
  517. wr->wr.atomic.remote_addr =
  518. atomic_wr(ibwr)->remote_addr;
  519. wr->wr.atomic.compare_add =
  520. atomic_wr(ibwr)->compare_add;
  521. wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
  522. wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
  523. break;
  524. case IB_WR_LOCAL_INV:
  525. wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
  526. break;
  527. case IB_WR_REG_MR:
  528. wr->wr.reg.mr = reg_wr(ibwr)->mr;
  529. wr->wr.reg.key = reg_wr(ibwr)->key;
  530. wr->wr.reg.access = reg_wr(ibwr)->access;
  531. break;
  532. default:
  533. break;
  534. }
  535. }
  536. }
  537. static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
  538. unsigned int mask, unsigned int length,
  539. struct rxe_send_wqe *wqe)
  540. {
  541. int num_sge = ibwr->num_sge;
  542. struct ib_sge *sge;
  543. int i;
  544. u8 *p;
  545. init_send_wr(qp, &wqe->wr, ibwr);
  546. if (qp_type(qp) == IB_QPT_UD ||
  547. qp_type(qp) == IB_QPT_SMI ||
  548. qp_type(qp) == IB_QPT_GSI)
  549. memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
  550. if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
  551. p = wqe->dma.inline_data;
  552. sge = ibwr->sg_list;
  553. for (i = 0; i < num_sge; i++, sge++) {
  554. memcpy(p, (void *)(uintptr_t)sge->addr,
  555. sge->length);
  556. p += sge->length;
  557. }
  558. } else if (mask & WR_REG_MASK) {
  559. wqe->mask = mask;
  560. wqe->state = wqe_state_posted;
  561. return 0;
  562. } else
  563. memcpy(wqe->dma.sge, ibwr->sg_list,
  564. num_sge * sizeof(struct ib_sge));
  565. wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
  566. mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
  567. wqe->mask = mask;
  568. wqe->dma.length = length;
  569. wqe->dma.resid = length;
  570. wqe->dma.num_sge = num_sge;
  571. wqe->dma.cur_sge = 0;
  572. wqe->dma.sge_offset = 0;
  573. wqe->state = wqe_state_posted;
  574. wqe->ssn = atomic_add_return(1, &qp->ssn);
  575. return 0;
  576. }
  577. static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
  578. unsigned int mask, u32 length)
  579. {
  580. int err;
  581. struct rxe_sq *sq = &qp->sq;
  582. struct rxe_send_wqe *send_wqe;
  583. unsigned long flags;
  584. err = validate_send_wr(qp, ibwr, mask, length);
  585. if (err)
  586. return err;
  587. spin_lock_irqsave(&qp->sq.sq_lock, flags);
  588. if (unlikely(queue_full(sq->queue))) {
  589. err = -ENOMEM;
  590. goto err1;
  591. }
  592. send_wqe = producer_addr(sq->queue);
  593. err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
  594. if (unlikely(err))
  595. goto err1;
  596. /*
  597. * make sure all changes to the work queue are
  598. * written before we update the producer pointer
  599. */
  600. smp_wmb();
  601. advance_producer(sq->queue);
  602. spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
  603. return 0;
  604. err1:
  605. spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
  606. return err;
  607. }
  608. static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
  609. struct ib_send_wr **bad_wr)
  610. {
  611. int err = 0;
  612. unsigned int mask;
  613. unsigned int length = 0;
  614. int i;
  615. int must_sched;
  616. while (wr) {
  617. mask = wr_opcode_mask(wr->opcode, qp);
  618. if (unlikely(!mask)) {
  619. err = -EINVAL;
  620. *bad_wr = wr;
  621. break;
  622. }
  623. if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
  624. !(mask & WR_INLINE_MASK))) {
  625. err = -EINVAL;
  626. *bad_wr = wr;
  627. break;
  628. }
  629. length = 0;
  630. for (i = 0; i < wr->num_sge; i++)
  631. length += wr->sg_list[i].length;
  632. err = post_one_send(qp, wr, mask, length);
  633. if (err) {
  634. *bad_wr = wr;
  635. break;
  636. }
  637. wr = wr->next;
  638. }
  639. /*
  640. * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
  641. * and the requester call ip_local_out_sk() that takes spin_lock_bh.
  642. */
  643. must_sched = (qp_type(qp) == IB_QPT_GSI) ||
  644. (queue_count(qp->sq.queue) > 1);
  645. rxe_run_task(&qp->req.task, must_sched);
  646. if (unlikely(qp->req.state == QP_STATE_ERROR))
  647. rxe_run_task(&qp->comp.task, 1);
  648. return err;
  649. }
  650. static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
  651. struct ib_send_wr **bad_wr)
  652. {
  653. struct rxe_qp *qp = to_rqp(ibqp);
  654. if (unlikely(!qp->valid)) {
  655. *bad_wr = wr;
  656. return -EINVAL;
  657. }
  658. if (unlikely(qp->req.state < QP_STATE_READY)) {
  659. *bad_wr = wr;
  660. return -EINVAL;
  661. }
  662. if (qp->is_user) {
  663. /* Utilize process context to do protocol processing */
  664. rxe_run_task(&qp->req.task, 0);
  665. return 0;
  666. } else
  667. return rxe_post_send_kernel(qp, wr, bad_wr);
  668. }
  669. static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
  670. struct ib_recv_wr **bad_wr)
  671. {
  672. int err = 0;
  673. struct rxe_qp *qp = to_rqp(ibqp);
  674. struct rxe_rq *rq = &qp->rq;
  675. unsigned long flags;
  676. if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
  677. *bad_wr = wr;
  678. err = -EINVAL;
  679. goto err1;
  680. }
  681. if (unlikely(qp->srq)) {
  682. *bad_wr = wr;
  683. err = -EINVAL;
  684. goto err1;
  685. }
  686. spin_lock_irqsave(&rq->producer_lock, flags);
  687. while (wr) {
  688. err = post_one_recv(rq, wr);
  689. if (unlikely(err)) {
  690. *bad_wr = wr;
  691. break;
  692. }
  693. wr = wr->next;
  694. }
  695. spin_unlock_irqrestore(&rq->producer_lock, flags);
  696. if (qp->resp.state == QP_STATE_ERROR)
  697. rxe_run_task(&qp->resp.task, 1);
  698. err1:
  699. return err;
  700. }
  701. static struct ib_cq *rxe_create_cq(struct ib_device *dev,
  702. const struct ib_cq_init_attr *attr,
  703. struct ib_ucontext *context,
  704. struct ib_udata *udata)
  705. {
  706. int err;
  707. struct rxe_dev *rxe = to_rdev(dev);
  708. struct rxe_cq *cq;
  709. struct rxe_create_cq_resp __user *uresp = NULL;
  710. if (udata) {
  711. if (udata->outlen < sizeof(*uresp))
  712. return ERR_PTR(-EINVAL);
  713. uresp = udata->outbuf;
  714. }
  715. if (attr->flags)
  716. return ERR_PTR(-EINVAL);
  717. err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
  718. if (err)
  719. goto err1;
  720. cq = rxe_alloc(&rxe->cq_pool);
  721. if (!cq) {
  722. err = -ENOMEM;
  723. goto err1;
  724. }
  725. err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
  726. context, uresp);
  727. if (err)
  728. goto err2;
  729. return &cq->ibcq;
  730. err2:
  731. rxe_drop_ref(cq);
  732. err1:
  733. return ERR_PTR(err);
  734. }
  735. static int rxe_destroy_cq(struct ib_cq *ibcq)
  736. {
  737. struct rxe_cq *cq = to_rcq(ibcq);
  738. rxe_cq_disable(cq);
  739. rxe_drop_ref(cq);
  740. return 0;
  741. }
  742. static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
  743. {
  744. int err;
  745. struct rxe_cq *cq = to_rcq(ibcq);
  746. struct rxe_dev *rxe = to_rdev(ibcq->device);
  747. struct rxe_resize_cq_resp __user *uresp = NULL;
  748. if (udata) {
  749. if (udata->outlen < sizeof(*uresp))
  750. return -EINVAL;
  751. uresp = udata->outbuf;
  752. }
  753. err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
  754. if (err)
  755. goto err1;
  756. err = rxe_cq_resize_queue(cq, cqe, uresp);
  757. if (err)
  758. goto err1;
  759. return 0;
  760. err1:
  761. return err;
  762. }
  763. static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
  764. {
  765. int i;
  766. struct rxe_cq *cq = to_rcq(ibcq);
  767. struct rxe_cqe *cqe;
  768. unsigned long flags;
  769. spin_lock_irqsave(&cq->cq_lock, flags);
  770. for (i = 0; i < num_entries; i++) {
  771. cqe = queue_head(cq->queue);
  772. if (!cqe)
  773. break;
  774. memcpy(wc++, &cqe->ibwc, sizeof(*wc));
  775. advance_consumer(cq->queue);
  776. }
  777. spin_unlock_irqrestore(&cq->cq_lock, flags);
  778. return i;
  779. }
  780. static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
  781. {
  782. struct rxe_cq *cq = to_rcq(ibcq);
  783. int count = queue_count(cq->queue);
  784. return (count > wc_cnt) ? wc_cnt : count;
  785. }
  786. static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
  787. {
  788. struct rxe_cq *cq = to_rcq(ibcq);
  789. unsigned long irq_flags;
  790. int ret = 0;
  791. spin_lock_irqsave(&cq->cq_lock, irq_flags);
  792. if (cq->notify != IB_CQ_NEXT_COMP)
  793. cq->notify = flags & IB_CQ_SOLICITED_MASK;
  794. if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
  795. ret = 1;
  796. spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
  797. return ret;
  798. }
  799. static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
  800. {
  801. struct rxe_dev *rxe = to_rdev(ibpd->device);
  802. struct rxe_pd *pd = to_rpd(ibpd);
  803. struct rxe_mem *mr;
  804. int err;
  805. mr = rxe_alloc(&rxe->mr_pool);
  806. if (!mr) {
  807. err = -ENOMEM;
  808. goto err1;
  809. }
  810. rxe_add_index(mr);
  811. rxe_add_ref(pd);
  812. err = rxe_mem_init_dma(rxe, pd, access, mr);
  813. if (err)
  814. goto err2;
  815. return &mr->ibmr;
  816. err2:
  817. rxe_drop_ref(pd);
  818. rxe_drop_index(mr);
  819. rxe_drop_ref(mr);
  820. err1:
  821. return ERR_PTR(err);
  822. }
  823. static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
  824. u64 start,
  825. u64 length,
  826. u64 iova,
  827. int access, struct ib_udata *udata)
  828. {
  829. int err;
  830. struct rxe_dev *rxe = to_rdev(ibpd->device);
  831. struct rxe_pd *pd = to_rpd(ibpd);
  832. struct rxe_mem *mr;
  833. mr = rxe_alloc(&rxe->mr_pool);
  834. if (!mr) {
  835. err = -ENOMEM;
  836. goto err2;
  837. }
  838. rxe_add_index(mr);
  839. rxe_add_ref(pd);
  840. err = rxe_mem_init_user(rxe, pd, start, length, iova,
  841. access, udata, mr);
  842. if (err)
  843. goto err3;
  844. return &mr->ibmr;
  845. err3:
  846. rxe_drop_ref(pd);
  847. rxe_drop_index(mr);
  848. rxe_drop_ref(mr);
  849. err2:
  850. return ERR_PTR(err);
  851. }
  852. static int rxe_dereg_mr(struct ib_mr *ibmr)
  853. {
  854. struct rxe_mem *mr = to_rmr(ibmr);
  855. mr->state = RXE_MEM_STATE_ZOMBIE;
  856. rxe_drop_ref(mr->pd);
  857. rxe_drop_index(mr);
  858. rxe_drop_ref(mr);
  859. return 0;
  860. }
  861. static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
  862. enum ib_mr_type mr_type,
  863. u32 max_num_sg)
  864. {
  865. struct rxe_dev *rxe = to_rdev(ibpd->device);
  866. struct rxe_pd *pd = to_rpd(ibpd);
  867. struct rxe_mem *mr;
  868. int err;
  869. if (mr_type != IB_MR_TYPE_MEM_REG)
  870. return ERR_PTR(-EINVAL);
  871. mr = rxe_alloc(&rxe->mr_pool);
  872. if (!mr) {
  873. err = -ENOMEM;
  874. goto err1;
  875. }
  876. rxe_add_index(mr);
  877. rxe_add_ref(pd);
  878. err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
  879. if (err)
  880. goto err2;
  881. return &mr->ibmr;
  882. err2:
  883. rxe_drop_ref(pd);
  884. rxe_drop_index(mr);
  885. rxe_drop_ref(mr);
  886. err1:
  887. return ERR_PTR(err);
  888. }
  889. static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
  890. {
  891. struct rxe_mem *mr = to_rmr(ibmr);
  892. struct rxe_map *map;
  893. struct rxe_phys_buf *buf;
  894. if (unlikely(mr->nbuf == mr->num_buf))
  895. return -ENOMEM;
  896. map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
  897. buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
  898. buf->addr = addr;
  899. buf->size = ibmr->page_size;
  900. mr->nbuf++;
  901. return 0;
  902. }
  903. static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
  904. int sg_nents, unsigned int *sg_offset)
  905. {
  906. struct rxe_mem *mr = to_rmr(ibmr);
  907. int n;
  908. mr->nbuf = 0;
  909. n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
  910. mr->va = ibmr->iova;
  911. mr->iova = ibmr->iova;
  912. mr->length = ibmr->length;
  913. mr->page_shift = ilog2(ibmr->page_size);
  914. mr->page_mask = ibmr->page_size - 1;
  915. mr->offset = mr->iova & mr->page_mask;
  916. return n;
  917. }
  918. static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
  919. {
  920. int err;
  921. struct rxe_dev *rxe = to_rdev(ibqp->device);
  922. struct rxe_qp *qp = to_rqp(ibqp);
  923. struct rxe_mc_grp *grp;
  924. /* takes a ref on grp if successful */
  925. err = rxe_mcast_get_grp(rxe, mgid, &grp);
  926. if (err)
  927. return err;
  928. err = rxe_mcast_add_grp_elem(rxe, qp, grp);
  929. rxe_drop_ref(grp);
  930. return err;
  931. }
  932. static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
  933. {
  934. struct rxe_dev *rxe = to_rdev(ibqp->device);
  935. struct rxe_qp *qp = to_rqp(ibqp);
  936. return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
  937. }
  938. static ssize_t parent_show(struct device *device,
  939. struct device_attribute *attr, char *buf)
  940. {
  941. struct rxe_dev *rxe = container_of(device, struct rxe_dev,
  942. ib_dev.dev);
  943. return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
  944. }
  945. static DEVICE_ATTR_RO(parent);
  946. static struct device_attribute *rxe_dev_attributes[] = {
  947. &dev_attr_parent,
  948. };
  949. int rxe_register_device(struct rxe_dev *rxe)
  950. {
  951. int err;
  952. int i;
  953. struct ib_device *dev = &rxe->ib_dev;
  954. struct crypto_shash *tfm;
  955. strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
  956. strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
  957. dev->owner = THIS_MODULE;
  958. dev->node_type = RDMA_NODE_IB_CA;
  959. dev->phys_port_cnt = 1;
  960. dev->num_comp_vectors = num_possible_cpus();
  961. dev->dev.parent = rxe_dma_device(rxe);
  962. dev->local_dma_lkey = 0;
  963. addrconf_addr_eui48((unsigned char *)&dev->node_guid,
  964. rxe->ndev->dev_addr);
  965. dev->dev.dma_ops = &dma_virt_ops;
  966. dma_coerce_mask_and_coherent(&dev->dev,
  967. dma_get_required_mask(&dev->dev));
  968. dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
  969. dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
  970. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
  971. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
  972. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
  973. | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
  974. | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
  975. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
  976. | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
  977. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
  978. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
  979. | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
  980. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
  981. | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
  982. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
  983. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
  984. | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
  985. | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
  986. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
  987. | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
  988. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
  989. | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
  990. | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
  991. | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
  992. | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
  993. | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
  994. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
  995. | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
  996. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
  997. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
  998. | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
  999. | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
  1000. ;
  1001. dev->query_device = rxe_query_device;
  1002. dev->modify_device = rxe_modify_device;
  1003. dev->query_port = rxe_query_port;
  1004. dev->modify_port = rxe_modify_port;
  1005. dev->get_link_layer = rxe_get_link_layer;
  1006. dev->get_netdev = rxe_get_netdev;
  1007. dev->query_pkey = rxe_query_pkey;
  1008. dev->alloc_ucontext = rxe_alloc_ucontext;
  1009. dev->dealloc_ucontext = rxe_dealloc_ucontext;
  1010. dev->mmap = rxe_mmap;
  1011. dev->get_port_immutable = rxe_port_immutable;
  1012. dev->alloc_pd = rxe_alloc_pd;
  1013. dev->dealloc_pd = rxe_dealloc_pd;
  1014. dev->create_ah = rxe_create_ah;
  1015. dev->modify_ah = rxe_modify_ah;
  1016. dev->query_ah = rxe_query_ah;
  1017. dev->destroy_ah = rxe_destroy_ah;
  1018. dev->create_srq = rxe_create_srq;
  1019. dev->modify_srq = rxe_modify_srq;
  1020. dev->query_srq = rxe_query_srq;
  1021. dev->destroy_srq = rxe_destroy_srq;
  1022. dev->post_srq_recv = rxe_post_srq_recv;
  1023. dev->create_qp = rxe_create_qp;
  1024. dev->modify_qp = rxe_modify_qp;
  1025. dev->query_qp = rxe_query_qp;
  1026. dev->destroy_qp = rxe_destroy_qp;
  1027. dev->post_send = rxe_post_send;
  1028. dev->post_recv = rxe_post_recv;
  1029. dev->create_cq = rxe_create_cq;
  1030. dev->destroy_cq = rxe_destroy_cq;
  1031. dev->resize_cq = rxe_resize_cq;
  1032. dev->poll_cq = rxe_poll_cq;
  1033. dev->peek_cq = rxe_peek_cq;
  1034. dev->req_notify_cq = rxe_req_notify_cq;
  1035. dev->get_dma_mr = rxe_get_dma_mr;
  1036. dev->reg_user_mr = rxe_reg_user_mr;
  1037. dev->dereg_mr = rxe_dereg_mr;
  1038. dev->alloc_mr = rxe_alloc_mr;
  1039. dev->map_mr_sg = rxe_map_mr_sg;
  1040. dev->attach_mcast = rxe_attach_mcast;
  1041. dev->detach_mcast = rxe_detach_mcast;
  1042. dev->get_hw_stats = rxe_ib_get_hw_stats;
  1043. dev->alloc_hw_stats = rxe_ib_alloc_hw_stats;
  1044. tfm = crypto_alloc_shash("crc32", 0, 0);
  1045. if (IS_ERR(tfm)) {
  1046. pr_err("failed to allocate crc algorithm err:%ld\n",
  1047. PTR_ERR(tfm));
  1048. return PTR_ERR(tfm);
  1049. }
  1050. rxe->tfm = tfm;
  1051. dev->driver_id = RDMA_DRIVER_RXE;
  1052. err = ib_register_device(dev, NULL);
  1053. if (err) {
  1054. pr_warn("%s failed with error %d\n", __func__, err);
  1055. goto err1;
  1056. }
  1057. for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
  1058. err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
  1059. if (err) {
  1060. pr_warn("%s failed with error %d for attr number %d\n",
  1061. __func__, err, i);
  1062. goto err2;
  1063. }
  1064. }
  1065. return 0;
  1066. err2:
  1067. ib_unregister_device(dev);
  1068. err1:
  1069. crypto_free_shash(rxe->tfm);
  1070. return err;
  1071. }
  1072. int rxe_unregister_device(struct rxe_dev *rxe)
  1073. {
  1074. int i;
  1075. struct ib_device *dev = &rxe->ib_dev;
  1076. for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
  1077. device_remove_file(&dev->dev, rxe_dev_attributes[i]);
  1078. ib_unregister_device(dev);
  1079. return 0;
  1080. }