rxe_verbs.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355
  1. /*
  2. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include "rxe.h"
  34. #include "rxe_loc.h"
  35. #include "rxe_queue.h"
  36. static int rxe_query_device(struct ib_device *dev,
  37. struct ib_device_attr *attr,
  38. struct ib_udata *uhw)
  39. {
  40. struct rxe_dev *rxe = to_rdev(dev);
  41. if (uhw->inlen || uhw->outlen)
  42. return -EINVAL;
  43. *attr = rxe->attr;
  44. return 0;
  45. }
  46. static void rxe_eth_speed_to_ib_speed(int speed, u8 *active_speed,
  47. u8 *active_width)
  48. {
  49. if (speed <= 1000) {
  50. *active_width = IB_WIDTH_1X;
  51. *active_speed = IB_SPEED_SDR;
  52. } else if (speed <= 10000) {
  53. *active_width = IB_WIDTH_1X;
  54. *active_speed = IB_SPEED_FDR10;
  55. } else if (speed <= 20000) {
  56. *active_width = IB_WIDTH_4X;
  57. *active_speed = IB_SPEED_DDR;
  58. } else if (speed <= 30000) {
  59. *active_width = IB_WIDTH_4X;
  60. *active_speed = IB_SPEED_QDR;
  61. } else if (speed <= 40000) {
  62. *active_width = IB_WIDTH_4X;
  63. *active_speed = IB_SPEED_FDR10;
  64. } else {
  65. *active_width = IB_WIDTH_4X;
  66. *active_speed = IB_SPEED_EDR;
  67. }
  68. }
  69. static int rxe_query_port(struct ib_device *dev,
  70. u8 port_num, struct ib_port_attr *attr)
  71. {
  72. struct rxe_dev *rxe = to_rdev(dev);
  73. struct rxe_port *port;
  74. u32 speed;
  75. if (unlikely(port_num != 1)) {
  76. pr_warn("invalid port_number %d\n", port_num);
  77. goto err1;
  78. }
  79. port = &rxe->port;
  80. *attr = port->attr;
  81. mutex_lock(&rxe->usdev_lock);
  82. if (rxe->ndev->ethtool_ops->get_link_ksettings) {
  83. struct ethtool_link_ksettings ks;
  84. rxe->ndev->ethtool_ops->get_link_ksettings(rxe->ndev, &ks);
  85. speed = ks.base.speed;
  86. } else if (rxe->ndev->ethtool_ops->get_settings) {
  87. struct ethtool_cmd cmd;
  88. rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd);
  89. speed = cmd.speed;
  90. } else {
  91. pr_warn("%s speed is unknown, defaulting to 1000\n",
  92. rxe->ndev->name);
  93. speed = 1000;
  94. }
  95. rxe_eth_speed_to_ib_speed(speed, &attr->active_speed,
  96. &attr->active_width);
  97. mutex_unlock(&rxe->usdev_lock);
  98. return 0;
  99. err1:
  100. return -EINVAL;
  101. }
  102. static int rxe_query_gid(struct ib_device *device,
  103. u8 port_num, int index, union ib_gid *gid)
  104. {
  105. int ret;
  106. if (index > RXE_PORT_GID_TBL_LEN)
  107. return -EINVAL;
  108. ret = ib_get_cached_gid(device, port_num, index, gid, NULL);
  109. if (ret == -EAGAIN) {
  110. memcpy(gid, &zgid, sizeof(*gid));
  111. return 0;
  112. }
  113. return ret;
  114. }
  115. static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int
  116. index, const union ib_gid *gid,
  117. const struct ib_gid_attr *attr, void **context)
  118. {
  119. if (index >= RXE_PORT_GID_TBL_LEN)
  120. return -EINVAL;
  121. return 0;
  122. }
  123. static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int
  124. index, void **context)
  125. {
  126. if (index >= RXE_PORT_GID_TBL_LEN)
  127. return -EINVAL;
  128. return 0;
  129. }
  130. static struct net_device *rxe_get_netdev(struct ib_device *device,
  131. u8 port_num)
  132. {
  133. struct rxe_dev *rxe = to_rdev(device);
  134. if (rxe->ndev) {
  135. dev_hold(rxe->ndev);
  136. return rxe->ndev;
  137. }
  138. return NULL;
  139. }
  140. static int rxe_query_pkey(struct ib_device *device,
  141. u8 port_num, u16 index, u16 *pkey)
  142. {
  143. struct rxe_dev *rxe = to_rdev(device);
  144. struct rxe_port *port;
  145. if (unlikely(port_num != 1)) {
  146. dev_warn(device->dma_device, "invalid port_num = %d\n",
  147. port_num);
  148. goto err1;
  149. }
  150. port = &rxe->port;
  151. if (unlikely(index >= port->attr.pkey_tbl_len)) {
  152. dev_warn(device->dma_device, "invalid index = %d\n",
  153. index);
  154. goto err1;
  155. }
  156. *pkey = port->pkey_tbl[index];
  157. return 0;
  158. err1:
  159. return -EINVAL;
  160. }
  161. static int rxe_modify_device(struct ib_device *dev,
  162. int mask, struct ib_device_modify *attr)
  163. {
  164. struct rxe_dev *rxe = to_rdev(dev);
  165. if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
  166. rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
  167. if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
  168. memcpy(rxe->ib_dev.node_desc,
  169. attr->node_desc, sizeof(rxe->ib_dev.node_desc));
  170. }
  171. return 0;
  172. }
  173. static int rxe_modify_port(struct ib_device *dev,
  174. u8 port_num, int mask, struct ib_port_modify *attr)
  175. {
  176. struct rxe_dev *rxe = to_rdev(dev);
  177. struct rxe_port *port;
  178. if (unlikely(port_num != 1)) {
  179. pr_warn("invalid port_num = %d\n", port_num);
  180. goto err1;
  181. }
  182. port = &rxe->port;
  183. port->attr.port_cap_flags |= attr->set_port_cap_mask;
  184. port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
  185. if (mask & IB_PORT_RESET_QKEY_CNTR)
  186. port->attr.qkey_viol_cntr = 0;
  187. return 0;
  188. err1:
  189. return -EINVAL;
  190. }
  191. static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
  192. u8 port_num)
  193. {
  194. struct rxe_dev *rxe = to_rdev(dev);
  195. return rxe->ifc_ops->link_layer(rxe, port_num);
  196. }
  197. static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
  198. struct ib_udata *udata)
  199. {
  200. struct rxe_dev *rxe = to_rdev(dev);
  201. struct rxe_ucontext *uc;
  202. uc = rxe_alloc(&rxe->uc_pool);
  203. return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
  204. }
  205. static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
  206. {
  207. struct rxe_ucontext *uc = to_ruc(ibuc);
  208. rxe_drop_ref(uc);
  209. return 0;
  210. }
  211. static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
  212. struct ib_port_immutable *immutable)
  213. {
  214. int err;
  215. struct ib_port_attr attr;
  216. err = rxe_query_port(dev, port_num, &attr);
  217. if (err)
  218. return err;
  219. immutable->pkey_tbl_len = attr.pkey_tbl_len;
  220. immutable->gid_tbl_len = attr.gid_tbl_len;
  221. immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
  222. immutable->max_mad_size = IB_MGMT_MAD_SIZE;
  223. return 0;
  224. }
  225. static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
  226. struct ib_ucontext *context,
  227. struct ib_udata *udata)
  228. {
  229. struct rxe_dev *rxe = to_rdev(dev);
  230. struct rxe_pd *pd;
  231. pd = rxe_alloc(&rxe->pd_pool);
  232. return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
  233. }
  234. static int rxe_dealloc_pd(struct ib_pd *ibpd)
  235. {
  236. struct rxe_pd *pd = to_rpd(ibpd);
  237. rxe_drop_ref(pd);
  238. return 0;
  239. }
  240. static int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr,
  241. struct rxe_av *av)
  242. {
  243. int err;
  244. union ib_gid sgid;
  245. struct ib_gid_attr sgid_attr;
  246. err = ib_get_cached_gid(&rxe->ib_dev, attr->port_num,
  247. attr->grh.sgid_index, &sgid,
  248. &sgid_attr);
  249. if (err) {
  250. pr_err("Failed to query sgid. err = %d\n", err);
  251. return err;
  252. }
  253. err = rxe_av_from_attr(rxe, attr->port_num, av, attr);
  254. if (!err)
  255. err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid);
  256. if (sgid_attr.ndev)
  257. dev_put(sgid_attr.ndev);
  258. return err;
  259. }
  260. static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
  261. struct ib_udata *udata)
  262. {
  263. int err;
  264. struct rxe_dev *rxe = to_rdev(ibpd->device);
  265. struct rxe_pd *pd = to_rpd(ibpd);
  266. struct rxe_ah *ah;
  267. err = rxe_av_chk_attr(rxe, attr);
  268. if (err)
  269. goto err1;
  270. ah = rxe_alloc(&rxe->ah_pool);
  271. if (!ah) {
  272. err = -ENOMEM;
  273. goto err1;
  274. }
  275. rxe_add_ref(pd);
  276. ah->pd = pd;
  277. err = rxe_init_av(rxe, attr, &ah->av);
  278. if (err)
  279. goto err2;
  280. return &ah->ibah;
  281. err2:
  282. rxe_drop_ref(pd);
  283. rxe_drop_ref(ah);
  284. err1:
  285. return ERR_PTR(err);
  286. }
  287. static int rxe_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
  288. {
  289. int err;
  290. struct rxe_dev *rxe = to_rdev(ibah->device);
  291. struct rxe_ah *ah = to_rah(ibah);
  292. err = rxe_av_chk_attr(rxe, attr);
  293. if (err)
  294. return err;
  295. err = rxe_init_av(rxe, attr, &ah->av);
  296. if (err)
  297. return err;
  298. return 0;
  299. }
  300. static int rxe_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
  301. {
  302. struct rxe_dev *rxe = to_rdev(ibah->device);
  303. struct rxe_ah *ah = to_rah(ibah);
  304. rxe_av_to_attr(rxe, &ah->av, attr);
  305. return 0;
  306. }
  307. static int rxe_destroy_ah(struct ib_ah *ibah)
  308. {
  309. struct rxe_ah *ah = to_rah(ibah);
  310. rxe_drop_ref(ah->pd);
  311. rxe_drop_ref(ah);
  312. return 0;
  313. }
  314. static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
  315. {
  316. int err;
  317. int i;
  318. u32 length;
  319. struct rxe_recv_wqe *recv_wqe;
  320. int num_sge = ibwr->num_sge;
  321. if (unlikely(queue_full(rq->queue))) {
  322. err = -ENOMEM;
  323. goto err1;
  324. }
  325. if (unlikely(num_sge > rq->max_sge)) {
  326. err = -EINVAL;
  327. goto err1;
  328. }
  329. length = 0;
  330. for (i = 0; i < num_sge; i++)
  331. length += ibwr->sg_list[i].length;
  332. recv_wqe = producer_addr(rq->queue);
  333. recv_wqe->wr_id = ibwr->wr_id;
  334. recv_wqe->num_sge = num_sge;
  335. memcpy(recv_wqe->dma.sge, ibwr->sg_list,
  336. num_sge * sizeof(struct ib_sge));
  337. recv_wqe->dma.length = length;
  338. recv_wqe->dma.resid = length;
  339. recv_wqe->dma.num_sge = num_sge;
  340. recv_wqe->dma.cur_sge = 0;
  341. recv_wqe->dma.sge_offset = 0;
  342. /* make sure all changes to the work queue are written before we
  343. * update the producer pointer
  344. */
  345. smp_wmb();
  346. advance_producer(rq->queue);
  347. return 0;
  348. err1:
  349. return err;
  350. }
  351. static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
  352. struct ib_srq_init_attr *init,
  353. struct ib_udata *udata)
  354. {
  355. int err;
  356. struct rxe_dev *rxe = to_rdev(ibpd->device);
  357. struct rxe_pd *pd = to_rpd(ibpd);
  358. struct rxe_srq *srq;
  359. struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
  360. err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
  361. if (err)
  362. goto err1;
  363. srq = rxe_alloc(&rxe->srq_pool);
  364. if (!srq) {
  365. err = -ENOMEM;
  366. goto err1;
  367. }
  368. rxe_add_index(srq);
  369. rxe_add_ref(pd);
  370. srq->pd = pd;
  371. err = rxe_srq_from_init(rxe, srq, init, context, udata);
  372. if (err)
  373. goto err2;
  374. return &srq->ibsrq;
  375. err2:
  376. rxe_drop_ref(pd);
  377. rxe_drop_index(srq);
  378. rxe_drop_ref(srq);
  379. err1:
  380. return ERR_PTR(err);
  381. }
  382. static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
  383. enum ib_srq_attr_mask mask,
  384. struct ib_udata *udata)
  385. {
  386. int err;
  387. struct rxe_srq *srq = to_rsrq(ibsrq);
  388. struct rxe_dev *rxe = to_rdev(ibsrq->device);
  389. err = rxe_srq_chk_attr(rxe, srq, attr, mask);
  390. if (err)
  391. goto err1;
  392. err = rxe_srq_from_attr(rxe, srq, attr, mask, udata);
  393. if (err)
  394. goto err1;
  395. return 0;
  396. err1:
  397. return err;
  398. }
  399. static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
  400. {
  401. struct rxe_srq *srq = to_rsrq(ibsrq);
  402. if (srq->error)
  403. return -EINVAL;
  404. attr->max_wr = srq->rq.queue->buf->index_mask;
  405. attr->max_sge = srq->rq.max_sge;
  406. attr->srq_limit = srq->limit;
  407. return 0;
  408. }
  409. static int rxe_destroy_srq(struct ib_srq *ibsrq)
  410. {
  411. struct rxe_srq *srq = to_rsrq(ibsrq);
  412. if (srq->rq.queue)
  413. rxe_queue_cleanup(srq->rq.queue);
  414. rxe_drop_ref(srq->pd);
  415. rxe_drop_index(srq);
  416. rxe_drop_ref(srq);
  417. return 0;
  418. }
  419. static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
  420. struct ib_recv_wr **bad_wr)
  421. {
  422. int err = 0;
  423. unsigned long flags;
  424. struct rxe_srq *srq = to_rsrq(ibsrq);
  425. spin_lock_irqsave(&srq->rq.producer_lock, flags);
  426. while (wr) {
  427. err = post_one_recv(&srq->rq, wr);
  428. if (unlikely(err))
  429. break;
  430. wr = wr->next;
  431. }
  432. spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
  433. if (err)
  434. *bad_wr = wr;
  435. return err;
  436. }
  437. static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
  438. struct ib_qp_init_attr *init,
  439. struct ib_udata *udata)
  440. {
  441. int err;
  442. struct rxe_dev *rxe = to_rdev(ibpd->device);
  443. struct rxe_pd *pd = to_rpd(ibpd);
  444. struct rxe_qp *qp;
  445. err = rxe_qp_chk_init(rxe, init);
  446. if (err)
  447. goto err1;
  448. qp = rxe_alloc(&rxe->qp_pool);
  449. if (!qp) {
  450. err = -ENOMEM;
  451. goto err1;
  452. }
  453. if (udata) {
  454. if (udata->inlen) {
  455. err = -EINVAL;
  456. goto err2;
  457. }
  458. qp->is_user = 1;
  459. }
  460. rxe_add_index(qp);
  461. err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd);
  462. if (err)
  463. goto err3;
  464. return &qp->ibqp;
  465. err3:
  466. rxe_drop_index(qp);
  467. err2:
  468. rxe_drop_ref(qp);
  469. err1:
  470. return ERR_PTR(err);
  471. }
  472. static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  473. int mask, struct ib_udata *udata)
  474. {
  475. int err;
  476. struct rxe_dev *rxe = to_rdev(ibqp->device);
  477. struct rxe_qp *qp = to_rqp(ibqp);
  478. err = rxe_qp_chk_attr(rxe, qp, attr, mask);
  479. if (err)
  480. goto err1;
  481. err = rxe_qp_from_attr(qp, attr, mask, udata);
  482. if (err)
  483. goto err1;
  484. return 0;
  485. err1:
  486. return err;
  487. }
  488. static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  489. int mask, struct ib_qp_init_attr *init)
  490. {
  491. struct rxe_qp *qp = to_rqp(ibqp);
  492. rxe_qp_to_init(qp, init);
  493. rxe_qp_to_attr(qp, attr, mask);
  494. return 0;
  495. }
  496. static int rxe_destroy_qp(struct ib_qp *ibqp)
  497. {
  498. struct rxe_qp *qp = to_rqp(ibqp);
  499. rxe_qp_destroy(qp);
  500. rxe_drop_index(qp);
  501. rxe_drop_ref(qp);
  502. return 0;
  503. }
  504. static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
  505. unsigned int mask, unsigned int length)
  506. {
  507. int num_sge = ibwr->num_sge;
  508. struct rxe_sq *sq = &qp->sq;
  509. if (unlikely(num_sge > sq->max_sge))
  510. goto err1;
  511. if (unlikely(mask & WR_ATOMIC_MASK)) {
  512. if (length < 8)
  513. goto err1;
  514. if (atomic_wr(ibwr)->remote_addr & 0x7)
  515. goto err1;
  516. }
  517. if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
  518. (length > sq->max_inline)))
  519. goto err1;
  520. return 0;
  521. err1:
  522. return -EINVAL;
  523. }
  524. static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
  525. struct ib_send_wr *ibwr)
  526. {
  527. wr->wr_id = ibwr->wr_id;
  528. wr->num_sge = ibwr->num_sge;
  529. wr->opcode = ibwr->opcode;
  530. wr->send_flags = ibwr->send_flags;
  531. if (qp_type(qp) == IB_QPT_UD ||
  532. qp_type(qp) == IB_QPT_SMI ||
  533. qp_type(qp) == IB_QPT_GSI) {
  534. wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
  535. wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
  536. if (qp_type(qp) == IB_QPT_GSI)
  537. wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
  538. if (wr->opcode == IB_WR_SEND_WITH_IMM)
  539. wr->ex.imm_data = ibwr->ex.imm_data;
  540. } else {
  541. switch (wr->opcode) {
  542. case IB_WR_RDMA_WRITE_WITH_IMM:
  543. wr->ex.imm_data = ibwr->ex.imm_data;
  544. case IB_WR_RDMA_READ:
  545. case IB_WR_RDMA_WRITE:
  546. wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
  547. wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
  548. break;
  549. case IB_WR_SEND_WITH_IMM:
  550. wr->ex.imm_data = ibwr->ex.imm_data;
  551. break;
  552. case IB_WR_SEND_WITH_INV:
  553. wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
  554. break;
  555. case IB_WR_ATOMIC_CMP_AND_SWP:
  556. case IB_WR_ATOMIC_FETCH_AND_ADD:
  557. wr->wr.atomic.remote_addr =
  558. atomic_wr(ibwr)->remote_addr;
  559. wr->wr.atomic.compare_add =
  560. atomic_wr(ibwr)->compare_add;
  561. wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
  562. wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
  563. break;
  564. case IB_WR_LOCAL_INV:
  565. wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
  566. break;
  567. case IB_WR_REG_MR:
  568. wr->wr.reg.mr = reg_wr(ibwr)->mr;
  569. wr->wr.reg.key = reg_wr(ibwr)->key;
  570. wr->wr.reg.access = reg_wr(ibwr)->access;
  571. break;
  572. default:
  573. break;
  574. }
  575. }
  576. }
  577. static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
  578. unsigned int mask, unsigned int length,
  579. struct rxe_send_wqe *wqe)
  580. {
  581. int num_sge = ibwr->num_sge;
  582. struct ib_sge *sge;
  583. int i;
  584. u8 *p;
  585. init_send_wr(qp, &wqe->wr, ibwr);
  586. if (qp_type(qp) == IB_QPT_UD ||
  587. qp_type(qp) == IB_QPT_SMI ||
  588. qp_type(qp) == IB_QPT_GSI)
  589. memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
  590. if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
  591. p = wqe->dma.inline_data;
  592. sge = ibwr->sg_list;
  593. for (i = 0; i < num_sge; i++, sge++) {
  594. if (qp->is_user && copy_from_user(p, (__user void *)
  595. (uintptr_t)sge->addr, sge->length))
  596. return -EFAULT;
  597. else if (!qp->is_user)
  598. memcpy(p, (void *)(uintptr_t)sge->addr,
  599. sge->length);
  600. p += sge->length;
  601. }
  602. } else if (mask & WR_REG_MASK) {
  603. wqe->mask = mask;
  604. wqe->state = wqe_state_posted;
  605. return 0;
  606. } else
  607. memcpy(wqe->dma.sge, ibwr->sg_list,
  608. num_sge * sizeof(struct ib_sge));
  609. wqe->iova = (mask & WR_ATOMIC_MASK) ?
  610. atomic_wr(ibwr)->remote_addr :
  611. rdma_wr(ibwr)->remote_addr;
  612. wqe->mask = mask;
  613. wqe->dma.length = length;
  614. wqe->dma.resid = length;
  615. wqe->dma.num_sge = num_sge;
  616. wqe->dma.cur_sge = 0;
  617. wqe->dma.sge_offset = 0;
  618. wqe->state = wqe_state_posted;
  619. wqe->ssn = atomic_add_return(1, &qp->ssn);
  620. return 0;
  621. }
  622. static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
  623. unsigned int mask, u32 length)
  624. {
  625. int err;
  626. struct rxe_sq *sq = &qp->sq;
  627. struct rxe_send_wqe *send_wqe;
  628. unsigned long flags;
  629. err = validate_send_wr(qp, ibwr, mask, length);
  630. if (err)
  631. return err;
  632. spin_lock_irqsave(&qp->sq.sq_lock, flags);
  633. if (unlikely(queue_full(sq->queue))) {
  634. err = -ENOMEM;
  635. goto err1;
  636. }
  637. send_wqe = producer_addr(sq->queue);
  638. err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
  639. if (unlikely(err))
  640. goto err1;
  641. /*
  642. * make sure all changes to the work queue are
  643. * written before we update the producer pointer
  644. */
  645. smp_wmb();
  646. advance_producer(sq->queue);
  647. spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
  648. return 0;
  649. err1:
  650. spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
  651. return err;
  652. }
  653. static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
  654. struct ib_send_wr **bad_wr)
  655. {
  656. int err = 0;
  657. unsigned int mask;
  658. unsigned int length = 0;
  659. int i;
  660. int must_sched;
  661. while (wr) {
  662. mask = wr_opcode_mask(wr->opcode, qp);
  663. if (unlikely(!mask)) {
  664. err = -EINVAL;
  665. *bad_wr = wr;
  666. break;
  667. }
  668. if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
  669. !(mask & WR_INLINE_MASK))) {
  670. err = -EINVAL;
  671. *bad_wr = wr;
  672. break;
  673. }
  674. length = 0;
  675. for (i = 0; i < wr->num_sge; i++)
  676. length += wr->sg_list[i].length;
  677. err = post_one_send(qp, wr, mask, length);
  678. if (err) {
  679. *bad_wr = wr;
  680. break;
  681. }
  682. wr = wr->next;
  683. }
  684. /*
  685. * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
  686. * and the requester call ip_local_out_sk() that takes spin_lock_bh.
  687. */
  688. must_sched = (qp_type(qp) == IB_QPT_GSI) ||
  689. (queue_count(qp->sq.queue) > 1);
  690. rxe_run_task(&qp->req.task, must_sched);
  691. return err;
  692. }
  693. static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
  694. struct ib_send_wr **bad_wr)
  695. {
  696. struct rxe_qp *qp = to_rqp(ibqp);
  697. if (unlikely(!qp->valid)) {
  698. *bad_wr = wr;
  699. return -EINVAL;
  700. }
  701. if (unlikely(qp->req.state < QP_STATE_READY)) {
  702. *bad_wr = wr;
  703. return -EINVAL;
  704. }
  705. if (qp->is_user) {
  706. /* Utilize process context to do protocol processing */
  707. rxe_run_task(&qp->req.task, 0);
  708. return 0;
  709. } else
  710. return rxe_post_send_kernel(qp, wr, bad_wr);
  711. }
  712. static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
  713. struct ib_recv_wr **bad_wr)
  714. {
  715. int err = 0;
  716. struct rxe_qp *qp = to_rqp(ibqp);
  717. struct rxe_rq *rq = &qp->rq;
  718. unsigned long flags;
  719. if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
  720. *bad_wr = wr;
  721. err = -EINVAL;
  722. goto err1;
  723. }
  724. if (unlikely(qp->srq)) {
  725. *bad_wr = wr;
  726. err = -EINVAL;
  727. goto err1;
  728. }
  729. spin_lock_irqsave(&rq->producer_lock, flags);
  730. while (wr) {
  731. err = post_one_recv(rq, wr);
  732. if (unlikely(err)) {
  733. *bad_wr = wr;
  734. break;
  735. }
  736. wr = wr->next;
  737. }
  738. spin_unlock_irqrestore(&rq->producer_lock, flags);
  739. err1:
  740. return err;
  741. }
  742. static struct ib_cq *rxe_create_cq(struct ib_device *dev,
  743. const struct ib_cq_init_attr *attr,
  744. struct ib_ucontext *context,
  745. struct ib_udata *udata)
  746. {
  747. int err;
  748. struct rxe_dev *rxe = to_rdev(dev);
  749. struct rxe_cq *cq;
  750. if (attr->flags)
  751. return ERR_PTR(-EINVAL);
  752. err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector, udata);
  753. if (err)
  754. goto err1;
  755. cq = rxe_alloc(&rxe->cq_pool);
  756. if (!cq) {
  757. err = -ENOMEM;
  758. goto err1;
  759. }
  760. err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
  761. context, udata);
  762. if (err)
  763. goto err2;
  764. return &cq->ibcq;
  765. err2:
  766. rxe_drop_ref(cq);
  767. err1:
  768. return ERR_PTR(err);
  769. }
  770. static int rxe_destroy_cq(struct ib_cq *ibcq)
  771. {
  772. struct rxe_cq *cq = to_rcq(ibcq);
  773. rxe_drop_ref(cq);
  774. return 0;
  775. }
  776. static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
  777. {
  778. int err;
  779. struct rxe_cq *cq = to_rcq(ibcq);
  780. struct rxe_dev *rxe = to_rdev(ibcq->device);
  781. err = rxe_cq_chk_attr(rxe, cq, cqe, 0, udata);
  782. if (err)
  783. goto err1;
  784. err = rxe_cq_resize_queue(cq, cqe, udata);
  785. if (err)
  786. goto err1;
  787. return 0;
  788. err1:
  789. return err;
  790. }
  791. static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
  792. {
  793. int i;
  794. struct rxe_cq *cq = to_rcq(ibcq);
  795. struct rxe_cqe *cqe;
  796. unsigned long flags;
  797. spin_lock_irqsave(&cq->cq_lock, flags);
  798. for (i = 0; i < num_entries; i++) {
  799. cqe = queue_head(cq->queue);
  800. if (!cqe)
  801. break;
  802. memcpy(wc++, &cqe->ibwc, sizeof(*wc));
  803. advance_consumer(cq->queue);
  804. }
  805. spin_unlock_irqrestore(&cq->cq_lock, flags);
  806. return i;
  807. }
  808. static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
  809. {
  810. struct rxe_cq *cq = to_rcq(ibcq);
  811. int count = queue_count(cq->queue);
  812. return (count > wc_cnt) ? wc_cnt : count;
  813. }
  814. static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
  815. {
  816. struct rxe_cq *cq = to_rcq(ibcq);
  817. unsigned long irq_flags;
  818. int ret = 0;
  819. spin_lock_irqsave(&cq->cq_lock, irq_flags);
  820. if (cq->notify != IB_CQ_NEXT_COMP)
  821. cq->notify = flags & IB_CQ_SOLICITED_MASK;
  822. if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
  823. ret = 1;
  824. spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
  825. return ret;
  826. }
  827. static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
  828. {
  829. struct rxe_dev *rxe = to_rdev(ibpd->device);
  830. struct rxe_pd *pd = to_rpd(ibpd);
  831. struct rxe_mem *mr;
  832. int err;
  833. mr = rxe_alloc(&rxe->mr_pool);
  834. if (!mr) {
  835. err = -ENOMEM;
  836. goto err1;
  837. }
  838. rxe_add_index(mr);
  839. rxe_add_ref(pd);
  840. err = rxe_mem_init_dma(rxe, pd, access, mr);
  841. if (err)
  842. goto err2;
  843. return &mr->ibmr;
  844. err2:
  845. rxe_drop_ref(pd);
  846. rxe_drop_index(mr);
  847. rxe_drop_ref(mr);
  848. err1:
  849. return ERR_PTR(err);
  850. }
  851. static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
  852. u64 start,
  853. u64 length,
  854. u64 iova,
  855. int access, struct ib_udata *udata)
  856. {
  857. int err;
  858. struct rxe_dev *rxe = to_rdev(ibpd->device);
  859. struct rxe_pd *pd = to_rpd(ibpd);
  860. struct rxe_mem *mr;
  861. mr = rxe_alloc(&rxe->mr_pool);
  862. if (!mr) {
  863. err = -ENOMEM;
  864. goto err2;
  865. }
  866. rxe_add_index(mr);
  867. rxe_add_ref(pd);
  868. err = rxe_mem_init_user(rxe, pd, start, length, iova,
  869. access, udata, mr);
  870. if (err)
  871. goto err3;
  872. return &mr->ibmr;
  873. err3:
  874. rxe_drop_ref(pd);
  875. rxe_drop_index(mr);
  876. rxe_drop_ref(mr);
  877. err2:
  878. return ERR_PTR(err);
  879. }
  880. static int rxe_dereg_mr(struct ib_mr *ibmr)
  881. {
  882. struct rxe_mem *mr = to_rmr(ibmr);
  883. mr->state = RXE_MEM_STATE_ZOMBIE;
  884. rxe_drop_ref(mr->pd);
  885. rxe_drop_index(mr);
  886. rxe_drop_ref(mr);
  887. return 0;
  888. }
  889. static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
  890. enum ib_mr_type mr_type,
  891. u32 max_num_sg)
  892. {
  893. struct rxe_dev *rxe = to_rdev(ibpd->device);
  894. struct rxe_pd *pd = to_rpd(ibpd);
  895. struct rxe_mem *mr;
  896. int err;
  897. if (mr_type != IB_MR_TYPE_MEM_REG)
  898. return ERR_PTR(-EINVAL);
  899. mr = rxe_alloc(&rxe->mr_pool);
  900. if (!mr) {
  901. err = -ENOMEM;
  902. goto err1;
  903. }
  904. rxe_add_index(mr);
  905. rxe_add_ref(pd);
  906. err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
  907. if (err)
  908. goto err2;
  909. return &mr->ibmr;
  910. err2:
  911. rxe_drop_ref(pd);
  912. rxe_drop_index(mr);
  913. rxe_drop_ref(mr);
  914. err1:
  915. return ERR_PTR(err);
  916. }
  917. static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
  918. {
  919. struct rxe_mem *mr = to_rmr(ibmr);
  920. struct rxe_map *map;
  921. struct rxe_phys_buf *buf;
  922. if (unlikely(mr->nbuf == mr->num_buf))
  923. return -ENOMEM;
  924. map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
  925. buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
  926. buf->addr = addr;
  927. buf->size = ibmr->page_size;
  928. mr->nbuf++;
  929. return 0;
  930. }
  931. static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
  932. int sg_nents, unsigned int *sg_offset)
  933. {
  934. struct rxe_mem *mr = to_rmr(ibmr);
  935. int n;
  936. mr->nbuf = 0;
  937. n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
  938. mr->va = ibmr->iova;
  939. mr->iova = ibmr->iova;
  940. mr->length = ibmr->length;
  941. mr->page_shift = ilog2(ibmr->page_size);
  942. mr->page_mask = ibmr->page_size - 1;
  943. mr->offset = mr->iova & mr->page_mask;
  944. return n;
  945. }
  946. static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
  947. {
  948. int err;
  949. struct rxe_dev *rxe = to_rdev(ibqp->device);
  950. struct rxe_qp *qp = to_rqp(ibqp);
  951. struct rxe_mc_grp *grp;
  952. /* takes a ref on grp if successful */
  953. err = rxe_mcast_get_grp(rxe, mgid, &grp);
  954. if (err)
  955. return err;
  956. err = rxe_mcast_add_grp_elem(rxe, qp, grp);
  957. rxe_drop_ref(grp);
  958. return err;
  959. }
  960. static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
  961. {
  962. struct rxe_dev *rxe = to_rdev(ibqp->device);
  963. struct rxe_qp *qp = to_rqp(ibqp);
  964. return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
  965. }
  966. static ssize_t rxe_show_parent(struct device *device,
  967. struct device_attribute *attr, char *buf)
  968. {
  969. struct rxe_dev *rxe = container_of(device, struct rxe_dev,
  970. ib_dev.dev);
  971. char *name;
  972. name = rxe->ifc_ops->parent_name(rxe, 1);
  973. return snprintf(buf, 16, "%s\n", name);
  974. }
  975. static DEVICE_ATTR(parent, S_IRUGO, rxe_show_parent, NULL);
  976. static struct device_attribute *rxe_dev_attributes[] = {
  977. &dev_attr_parent,
  978. };
  979. int rxe_register_device(struct rxe_dev *rxe)
  980. {
  981. int err;
  982. int i;
  983. struct ib_device *dev = &rxe->ib_dev;
  984. strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
  985. strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
  986. dev->owner = THIS_MODULE;
  987. dev->node_type = RDMA_NODE_IB_CA;
  988. dev->phys_port_cnt = 1;
  989. dev->num_comp_vectors = RXE_NUM_COMP_VECTORS;
  990. dev->dma_device = rxe->ifc_ops->dma_device(rxe);
  991. dev->local_dma_lkey = 0;
  992. dev->node_guid = rxe->ifc_ops->node_guid(rxe);
  993. dev->dma_ops = &rxe_dma_mapping_ops;
  994. dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
  995. dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
  996. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
  997. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
  998. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
  999. | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
  1000. | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
  1001. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
  1002. | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
  1003. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
  1004. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
  1005. | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
  1006. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
  1007. | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
  1008. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
  1009. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
  1010. | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
  1011. | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
  1012. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
  1013. | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
  1014. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
  1015. | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
  1016. | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
  1017. | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
  1018. | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
  1019. | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
  1020. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
  1021. | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
  1022. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
  1023. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
  1024. | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
  1025. | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
  1026. ;
  1027. dev->query_device = rxe_query_device;
  1028. dev->modify_device = rxe_modify_device;
  1029. dev->query_port = rxe_query_port;
  1030. dev->modify_port = rxe_modify_port;
  1031. dev->get_link_layer = rxe_get_link_layer;
  1032. dev->query_gid = rxe_query_gid;
  1033. dev->get_netdev = rxe_get_netdev;
  1034. dev->add_gid = rxe_add_gid;
  1035. dev->del_gid = rxe_del_gid;
  1036. dev->query_pkey = rxe_query_pkey;
  1037. dev->alloc_ucontext = rxe_alloc_ucontext;
  1038. dev->dealloc_ucontext = rxe_dealloc_ucontext;
  1039. dev->mmap = rxe_mmap;
  1040. dev->get_port_immutable = rxe_port_immutable;
  1041. dev->alloc_pd = rxe_alloc_pd;
  1042. dev->dealloc_pd = rxe_dealloc_pd;
  1043. dev->create_ah = rxe_create_ah;
  1044. dev->modify_ah = rxe_modify_ah;
  1045. dev->query_ah = rxe_query_ah;
  1046. dev->destroy_ah = rxe_destroy_ah;
  1047. dev->create_srq = rxe_create_srq;
  1048. dev->modify_srq = rxe_modify_srq;
  1049. dev->query_srq = rxe_query_srq;
  1050. dev->destroy_srq = rxe_destroy_srq;
  1051. dev->post_srq_recv = rxe_post_srq_recv;
  1052. dev->create_qp = rxe_create_qp;
  1053. dev->modify_qp = rxe_modify_qp;
  1054. dev->query_qp = rxe_query_qp;
  1055. dev->destroy_qp = rxe_destroy_qp;
  1056. dev->post_send = rxe_post_send;
  1057. dev->post_recv = rxe_post_recv;
  1058. dev->create_cq = rxe_create_cq;
  1059. dev->destroy_cq = rxe_destroy_cq;
  1060. dev->resize_cq = rxe_resize_cq;
  1061. dev->poll_cq = rxe_poll_cq;
  1062. dev->peek_cq = rxe_peek_cq;
  1063. dev->req_notify_cq = rxe_req_notify_cq;
  1064. dev->get_dma_mr = rxe_get_dma_mr;
  1065. dev->reg_user_mr = rxe_reg_user_mr;
  1066. dev->dereg_mr = rxe_dereg_mr;
  1067. dev->alloc_mr = rxe_alloc_mr;
  1068. dev->map_mr_sg = rxe_map_mr_sg;
  1069. dev->attach_mcast = rxe_attach_mcast;
  1070. dev->detach_mcast = rxe_detach_mcast;
  1071. err = ib_register_device(dev, NULL);
  1072. if (err) {
  1073. pr_warn("rxe_register_device failed, err = %d\n", err);
  1074. goto err1;
  1075. }
  1076. for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
  1077. err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
  1078. if (err) {
  1079. pr_warn("device_create_file failed, i = %d, err = %d\n",
  1080. i, err);
  1081. goto err2;
  1082. }
  1083. }
  1084. return 0;
  1085. err2:
  1086. ib_unregister_device(dev);
  1087. err1:
  1088. return err;
  1089. }
  1090. int rxe_unregister_device(struct rxe_dev *rxe)
  1091. {
  1092. int i;
  1093. struct ib_device *dev = &rxe->ib_dev;
  1094. for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
  1095. device_remove_file(&dev->dev, rxe_dev_attributes[i]);
  1096. ib_unregister_device(dev);
  1097. return 0;
  1098. }