rxe_verbs.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356
  1. /*
  2. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/dma-mapping.h>
  34. #include "rxe.h"
  35. #include "rxe_loc.h"
  36. #include "rxe_queue.h"
  37. static int rxe_query_device(struct ib_device *dev,
  38. struct ib_device_attr *attr,
  39. struct ib_udata *uhw)
  40. {
  41. struct rxe_dev *rxe = to_rdev(dev);
  42. if (uhw->inlen || uhw->outlen)
  43. return -EINVAL;
  44. *attr = rxe->attr;
  45. return 0;
  46. }
  47. static void rxe_eth_speed_to_ib_speed(int speed, u8 *active_speed,
  48. u8 *active_width)
  49. {
  50. if (speed <= 1000) {
  51. *active_width = IB_WIDTH_1X;
  52. *active_speed = IB_SPEED_SDR;
  53. } else if (speed <= 10000) {
  54. *active_width = IB_WIDTH_1X;
  55. *active_speed = IB_SPEED_FDR10;
  56. } else if (speed <= 20000) {
  57. *active_width = IB_WIDTH_4X;
  58. *active_speed = IB_SPEED_DDR;
  59. } else if (speed <= 30000) {
  60. *active_width = IB_WIDTH_4X;
  61. *active_speed = IB_SPEED_QDR;
  62. } else if (speed <= 40000) {
  63. *active_width = IB_WIDTH_4X;
  64. *active_speed = IB_SPEED_FDR10;
  65. } else {
  66. *active_width = IB_WIDTH_4X;
  67. *active_speed = IB_SPEED_EDR;
  68. }
  69. }
  70. static int rxe_query_port(struct ib_device *dev,
  71. u8 port_num, struct ib_port_attr *attr)
  72. {
  73. struct rxe_dev *rxe = to_rdev(dev);
  74. struct rxe_port *port;
  75. u32 speed;
  76. if (unlikely(port_num != 1)) {
  77. pr_warn("invalid port_number %d\n", port_num);
  78. goto err1;
  79. }
  80. port = &rxe->port;
  81. /* *attr being zeroed by the caller, avoid zeroing it here */
  82. *attr = port->attr;
  83. mutex_lock(&rxe->usdev_lock);
  84. if (rxe->ndev->ethtool_ops->get_link_ksettings) {
  85. struct ethtool_link_ksettings ks;
  86. rxe->ndev->ethtool_ops->get_link_ksettings(rxe->ndev, &ks);
  87. speed = ks.base.speed;
  88. } else if (rxe->ndev->ethtool_ops->get_settings) {
  89. struct ethtool_cmd cmd;
  90. rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd);
  91. speed = cmd.speed;
  92. } else {
  93. pr_warn("%s speed is unknown, defaulting to 1000\n",
  94. rxe->ndev->name);
  95. speed = 1000;
  96. }
  97. rxe_eth_speed_to_ib_speed(speed, &attr->active_speed,
  98. &attr->active_width);
  99. mutex_unlock(&rxe->usdev_lock);
  100. return 0;
  101. err1:
  102. return -EINVAL;
  103. }
  104. static int rxe_query_gid(struct ib_device *device,
  105. u8 port_num, int index, union ib_gid *gid)
  106. {
  107. int ret;
  108. if (index > RXE_PORT_GID_TBL_LEN)
  109. return -EINVAL;
  110. ret = ib_get_cached_gid(device, port_num, index, gid, NULL);
  111. if (ret == -EAGAIN) {
  112. memcpy(gid, &zgid, sizeof(*gid));
  113. return 0;
  114. }
  115. return ret;
  116. }
  117. static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int
  118. index, const union ib_gid *gid,
  119. const struct ib_gid_attr *attr, void **context)
  120. {
  121. if (index >= RXE_PORT_GID_TBL_LEN)
  122. return -EINVAL;
  123. return 0;
  124. }
  125. static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int
  126. index, void **context)
  127. {
  128. if (index >= RXE_PORT_GID_TBL_LEN)
  129. return -EINVAL;
  130. return 0;
  131. }
  132. static struct net_device *rxe_get_netdev(struct ib_device *device,
  133. u8 port_num)
  134. {
  135. struct rxe_dev *rxe = to_rdev(device);
  136. if (rxe->ndev) {
  137. dev_hold(rxe->ndev);
  138. return rxe->ndev;
  139. }
  140. return NULL;
  141. }
  142. static int rxe_query_pkey(struct ib_device *device,
  143. u8 port_num, u16 index, u16 *pkey)
  144. {
  145. struct rxe_dev *rxe = to_rdev(device);
  146. struct rxe_port *port;
  147. if (unlikely(port_num != 1)) {
  148. dev_warn(device->dev.parent, "invalid port_num = %d\n",
  149. port_num);
  150. goto err1;
  151. }
  152. port = &rxe->port;
  153. if (unlikely(index >= port->attr.pkey_tbl_len)) {
  154. dev_warn(device->dev.parent, "invalid index = %d\n",
  155. index);
  156. goto err1;
  157. }
  158. *pkey = port->pkey_tbl[index];
  159. return 0;
  160. err1:
  161. return -EINVAL;
  162. }
  163. static int rxe_modify_device(struct ib_device *dev,
  164. int mask, struct ib_device_modify *attr)
  165. {
  166. struct rxe_dev *rxe = to_rdev(dev);
  167. if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
  168. rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
  169. if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
  170. memcpy(rxe->ib_dev.node_desc,
  171. attr->node_desc, sizeof(rxe->ib_dev.node_desc));
  172. }
  173. return 0;
  174. }
  175. static int rxe_modify_port(struct ib_device *dev,
  176. u8 port_num, int mask, struct ib_port_modify *attr)
  177. {
  178. struct rxe_dev *rxe = to_rdev(dev);
  179. struct rxe_port *port;
  180. if (unlikely(port_num != 1)) {
  181. pr_warn("invalid port_num = %d\n", port_num);
  182. goto err1;
  183. }
  184. port = &rxe->port;
  185. port->attr.port_cap_flags |= attr->set_port_cap_mask;
  186. port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
  187. if (mask & IB_PORT_RESET_QKEY_CNTR)
  188. port->attr.qkey_viol_cntr = 0;
  189. return 0;
  190. err1:
  191. return -EINVAL;
  192. }
  193. static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
  194. u8 port_num)
  195. {
  196. struct rxe_dev *rxe = to_rdev(dev);
  197. return rxe_link_layer(rxe, port_num);
  198. }
  199. static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
  200. struct ib_udata *udata)
  201. {
  202. struct rxe_dev *rxe = to_rdev(dev);
  203. struct rxe_ucontext *uc;
  204. uc = rxe_alloc(&rxe->uc_pool);
  205. return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
  206. }
  207. static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
  208. {
  209. struct rxe_ucontext *uc = to_ruc(ibuc);
  210. rxe_drop_ref(uc);
  211. return 0;
  212. }
  213. static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
  214. struct ib_port_immutable *immutable)
  215. {
  216. int err;
  217. struct ib_port_attr attr;
  218. immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
  219. err = ib_query_port(dev, port_num, &attr);
  220. if (err)
  221. return err;
  222. immutable->pkey_tbl_len = attr.pkey_tbl_len;
  223. immutable->gid_tbl_len = attr.gid_tbl_len;
  224. immutable->max_mad_size = IB_MGMT_MAD_SIZE;
  225. return 0;
  226. }
  227. static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
  228. struct ib_ucontext *context,
  229. struct ib_udata *udata)
  230. {
  231. struct rxe_dev *rxe = to_rdev(dev);
  232. struct rxe_pd *pd;
  233. pd = rxe_alloc(&rxe->pd_pool);
  234. return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
  235. }
  236. static int rxe_dealloc_pd(struct ib_pd *ibpd)
  237. {
  238. struct rxe_pd *pd = to_rpd(ibpd);
  239. rxe_drop_ref(pd);
  240. return 0;
  241. }
  242. static int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr,
  243. struct rxe_av *av)
  244. {
  245. int err;
  246. union ib_gid sgid;
  247. struct ib_gid_attr sgid_attr;
  248. err = ib_get_cached_gid(&rxe->ib_dev, attr->port_num,
  249. attr->grh.sgid_index, &sgid,
  250. &sgid_attr);
  251. if (err) {
  252. pr_err("Failed to query sgid. err = %d\n", err);
  253. return err;
  254. }
  255. err = rxe_av_from_attr(rxe, attr->port_num, av, attr);
  256. if (!err)
  257. err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid);
  258. if (sgid_attr.ndev)
  259. dev_put(sgid_attr.ndev);
  260. return err;
  261. }
  262. static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
  263. struct ib_udata *udata)
  264. {
  265. int err;
  266. struct rxe_dev *rxe = to_rdev(ibpd->device);
  267. struct rxe_pd *pd = to_rpd(ibpd);
  268. struct rxe_ah *ah;
  269. err = rxe_av_chk_attr(rxe, attr);
  270. if (err)
  271. goto err1;
  272. ah = rxe_alloc(&rxe->ah_pool);
  273. if (!ah) {
  274. err = -ENOMEM;
  275. goto err1;
  276. }
  277. rxe_add_ref(pd);
  278. ah->pd = pd;
  279. err = rxe_init_av(rxe, attr, &ah->av);
  280. if (err)
  281. goto err2;
  282. return &ah->ibah;
  283. err2:
  284. rxe_drop_ref(pd);
  285. rxe_drop_ref(ah);
  286. err1:
  287. return ERR_PTR(err);
  288. }
  289. static int rxe_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
  290. {
  291. int err;
  292. struct rxe_dev *rxe = to_rdev(ibah->device);
  293. struct rxe_ah *ah = to_rah(ibah);
  294. err = rxe_av_chk_attr(rxe, attr);
  295. if (err)
  296. return err;
  297. err = rxe_init_av(rxe, attr, &ah->av);
  298. if (err)
  299. return err;
  300. return 0;
  301. }
  302. static int rxe_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
  303. {
  304. struct rxe_dev *rxe = to_rdev(ibah->device);
  305. struct rxe_ah *ah = to_rah(ibah);
  306. rxe_av_to_attr(rxe, &ah->av, attr);
  307. return 0;
  308. }
  309. static int rxe_destroy_ah(struct ib_ah *ibah)
  310. {
  311. struct rxe_ah *ah = to_rah(ibah);
  312. rxe_drop_ref(ah->pd);
  313. rxe_drop_ref(ah);
  314. return 0;
  315. }
  316. static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
  317. {
  318. int err;
  319. int i;
  320. u32 length;
  321. struct rxe_recv_wqe *recv_wqe;
  322. int num_sge = ibwr->num_sge;
  323. if (unlikely(queue_full(rq->queue))) {
  324. err = -ENOMEM;
  325. goto err1;
  326. }
  327. if (unlikely(num_sge > rq->max_sge)) {
  328. err = -EINVAL;
  329. goto err1;
  330. }
  331. length = 0;
  332. for (i = 0; i < num_sge; i++)
  333. length += ibwr->sg_list[i].length;
  334. recv_wqe = producer_addr(rq->queue);
  335. recv_wqe->wr_id = ibwr->wr_id;
  336. recv_wqe->num_sge = num_sge;
  337. memcpy(recv_wqe->dma.sge, ibwr->sg_list,
  338. num_sge * sizeof(struct ib_sge));
  339. recv_wqe->dma.length = length;
  340. recv_wqe->dma.resid = length;
  341. recv_wqe->dma.num_sge = num_sge;
  342. recv_wqe->dma.cur_sge = 0;
  343. recv_wqe->dma.sge_offset = 0;
  344. /* make sure all changes to the work queue are written before we
  345. * update the producer pointer
  346. */
  347. smp_wmb();
  348. advance_producer(rq->queue);
  349. return 0;
  350. err1:
  351. return err;
  352. }
  353. static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
  354. struct ib_srq_init_attr *init,
  355. struct ib_udata *udata)
  356. {
  357. int err;
  358. struct rxe_dev *rxe = to_rdev(ibpd->device);
  359. struct rxe_pd *pd = to_rpd(ibpd);
  360. struct rxe_srq *srq;
  361. struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
  362. err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
  363. if (err)
  364. goto err1;
  365. srq = rxe_alloc(&rxe->srq_pool);
  366. if (!srq) {
  367. err = -ENOMEM;
  368. goto err1;
  369. }
  370. rxe_add_index(srq);
  371. rxe_add_ref(pd);
  372. srq->pd = pd;
  373. err = rxe_srq_from_init(rxe, srq, init, context, udata);
  374. if (err)
  375. goto err2;
  376. return &srq->ibsrq;
  377. err2:
  378. rxe_drop_ref(pd);
  379. rxe_drop_index(srq);
  380. rxe_drop_ref(srq);
  381. err1:
  382. return ERR_PTR(err);
  383. }
  384. static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
  385. enum ib_srq_attr_mask mask,
  386. struct ib_udata *udata)
  387. {
  388. int err;
  389. struct rxe_srq *srq = to_rsrq(ibsrq);
  390. struct rxe_dev *rxe = to_rdev(ibsrq->device);
  391. err = rxe_srq_chk_attr(rxe, srq, attr, mask);
  392. if (err)
  393. goto err1;
  394. err = rxe_srq_from_attr(rxe, srq, attr, mask, udata);
  395. if (err)
  396. goto err1;
  397. return 0;
  398. err1:
  399. return err;
  400. }
  401. static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
  402. {
  403. struct rxe_srq *srq = to_rsrq(ibsrq);
  404. if (srq->error)
  405. return -EINVAL;
  406. attr->max_wr = srq->rq.queue->buf->index_mask;
  407. attr->max_sge = srq->rq.max_sge;
  408. attr->srq_limit = srq->limit;
  409. return 0;
  410. }
  411. static int rxe_destroy_srq(struct ib_srq *ibsrq)
  412. {
  413. struct rxe_srq *srq = to_rsrq(ibsrq);
  414. if (srq->rq.queue)
  415. rxe_queue_cleanup(srq->rq.queue);
  416. rxe_drop_ref(srq->pd);
  417. rxe_drop_index(srq);
  418. rxe_drop_ref(srq);
  419. return 0;
  420. }
  421. static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
  422. struct ib_recv_wr **bad_wr)
  423. {
  424. int err = 0;
  425. unsigned long flags;
  426. struct rxe_srq *srq = to_rsrq(ibsrq);
  427. spin_lock_irqsave(&srq->rq.producer_lock, flags);
  428. while (wr) {
  429. err = post_one_recv(&srq->rq, wr);
  430. if (unlikely(err))
  431. break;
  432. wr = wr->next;
  433. }
  434. spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
  435. if (err)
  436. *bad_wr = wr;
  437. return err;
  438. }
  439. static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
  440. struct ib_qp_init_attr *init,
  441. struct ib_udata *udata)
  442. {
  443. int err;
  444. struct rxe_dev *rxe = to_rdev(ibpd->device);
  445. struct rxe_pd *pd = to_rpd(ibpd);
  446. struct rxe_qp *qp;
  447. err = rxe_qp_chk_init(rxe, init);
  448. if (err)
  449. goto err1;
  450. qp = rxe_alloc(&rxe->qp_pool);
  451. if (!qp) {
  452. err = -ENOMEM;
  453. goto err1;
  454. }
  455. if (udata) {
  456. if (udata->inlen) {
  457. err = -EINVAL;
  458. goto err2;
  459. }
  460. qp->is_user = 1;
  461. }
  462. rxe_add_index(qp);
  463. err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd);
  464. if (err)
  465. goto err3;
  466. return &qp->ibqp;
  467. err3:
  468. rxe_drop_index(qp);
  469. err2:
  470. rxe_drop_ref(qp);
  471. err1:
  472. return ERR_PTR(err);
  473. }
  474. static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  475. int mask, struct ib_udata *udata)
  476. {
  477. int err;
  478. struct rxe_dev *rxe = to_rdev(ibqp->device);
  479. struct rxe_qp *qp = to_rqp(ibqp);
  480. err = rxe_qp_chk_attr(rxe, qp, attr, mask);
  481. if (err)
  482. goto err1;
  483. err = rxe_qp_from_attr(qp, attr, mask, udata);
  484. if (err)
  485. goto err1;
  486. return 0;
  487. err1:
  488. return err;
  489. }
  490. static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  491. int mask, struct ib_qp_init_attr *init)
  492. {
  493. struct rxe_qp *qp = to_rqp(ibqp);
  494. rxe_qp_to_init(qp, init);
  495. rxe_qp_to_attr(qp, attr, mask);
  496. return 0;
  497. }
  498. static int rxe_destroy_qp(struct ib_qp *ibqp)
  499. {
  500. struct rxe_qp *qp = to_rqp(ibqp);
  501. rxe_qp_destroy(qp);
  502. rxe_drop_index(qp);
  503. rxe_drop_ref(qp);
  504. return 0;
  505. }
  506. static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
  507. unsigned int mask, unsigned int length)
  508. {
  509. int num_sge = ibwr->num_sge;
  510. struct rxe_sq *sq = &qp->sq;
  511. if (unlikely(num_sge > sq->max_sge))
  512. goto err1;
  513. if (unlikely(mask & WR_ATOMIC_MASK)) {
  514. if (length < 8)
  515. goto err1;
  516. if (atomic_wr(ibwr)->remote_addr & 0x7)
  517. goto err1;
  518. }
  519. if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
  520. (length > sq->max_inline)))
  521. goto err1;
  522. return 0;
  523. err1:
  524. return -EINVAL;
  525. }
  526. static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
  527. struct ib_send_wr *ibwr)
  528. {
  529. wr->wr_id = ibwr->wr_id;
  530. wr->num_sge = ibwr->num_sge;
  531. wr->opcode = ibwr->opcode;
  532. wr->send_flags = ibwr->send_flags;
  533. if (qp_type(qp) == IB_QPT_UD ||
  534. qp_type(qp) == IB_QPT_SMI ||
  535. qp_type(qp) == IB_QPT_GSI) {
  536. wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
  537. wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
  538. if (qp_type(qp) == IB_QPT_GSI)
  539. wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
  540. if (wr->opcode == IB_WR_SEND_WITH_IMM)
  541. wr->ex.imm_data = ibwr->ex.imm_data;
  542. } else {
  543. switch (wr->opcode) {
  544. case IB_WR_RDMA_WRITE_WITH_IMM:
  545. wr->ex.imm_data = ibwr->ex.imm_data;
  546. case IB_WR_RDMA_READ:
  547. case IB_WR_RDMA_WRITE:
  548. wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
  549. wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
  550. break;
  551. case IB_WR_SEND_WITH_IMM:
  552. wr->ex.imm_data = ibwr->ex.imm_data;
  553. break;
  554. case IB_WR_SEND_WITH_INV:
  555. wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
  556. break;
  557. case IB_WR_ATOMIC_CMP_AND_SWP:
  558. case IB_WR_ATOMIC_FETCH_AND_ADD:
  559. wr->wr.atomic.remote_addr =
  560. atomic_wr(ibwr)->remote_addr;
  561. wr->wr.atomic.compare_add =
  562. atomic_wr(ibwr)->compare_add;
  563. wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
  564. wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
  565. break;
  566. case IB_WR_LOCAL_INV:
  567. wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
  568. break;
  569. case IB_WR_REG_MR:
  570. wr->wr.reg.mr = reg_wr(ibwr)->mr;
  571. wr->wr.reg.key = reg_wr(ibwr)->key;
  572. wr->wr.reg.access = reg_wr(ibwr)->access;
  573. break;
  574. default:
  575. break;
  576. }
  577. }
  578. }
  579. static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
  580. unsigned int mask, unsigned int length,
  581. struct rxe_send_wqe *wqe)
  582. {
  583. int num_sge = ibwr->num_sge;
  584. struct ib_sge *sge;
  585. int i;
  586. u8 *p;
  587. init_send_wr(qp, &wqe->wr, ibwr);
  588. if (qp_type(qp) == IB_QPT_UD ||
  589. qp_type(qp) == IB_QPT_SMI ||
  590. qp_type(qp) == IB_QPT_GSI)
  591. memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
  592. if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
  593. p = wqe->dma.inline_data;
  594. sge = ibwr->sg_list;
  595. for (i = 0; i < num_sge; i++, sge++) {
  596. if (qp->is_user && copy_from_user(p, (__user void *)
  597. (uintptr_t)sge->addr, sge->length))
  598. return -EFAULT;
  599. else if (!qp->is_user)
  600. memcpy(p, (void *)(uintptr_t)sge->addr,
  601. sge->length);
  602. p += sge->length;
  603. }
  604. } else if (mask & WR_REG_MASK) {
  605. wqe->mask = mask;
  606. wqe->state = wqe_state_posted;
  607. return 0;
  608. } else
  609. memcpy(wqe->dma.sge, ibwr->sg_list,
  610. num_sge * sizeof(struct ib_sge));
  611. wqe->iova = (mask & WR_ATOMIC_MASK) ?
  612. atomic_wr(ibwr)->remote_addr :
  613. rdma_wr(ibwr)->remote_addr;
  614. wqe->mask = mask;
  615. wqe->dma.length = length;
  616. wqe->dma.resid = length;
  617. wqe->dma.num_sge = num_sge;
  618. wqe->dma.cur_sge = 0;
  619. wqe->dma.sge_offset = 0;
  620. wqe->state = wqe_state_posted;
  621. wqe->ssn = atomic_add_return(1, &qp->ssn);
  622. return 0;
  623. }
  624. static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
  625. unsigned int mask, u32 length)
  626. {
  627. int err;
  628. struct rxe_sq *sq = &qp->sq;
  629. struct rxe_send_wqe *send_wqe;
  630. unsigned long flags;
  631. err = validate_send_wr(qp, ibwr, mask, length);
  632. if (err)
  633. return err;
  634. spin_lock_irqsave(&qp->sq.sq_lock, flags);
  635. if (unlikely(queue_full(sq->queue))) {
  636. err = -ENOMEM;
  637. goto err1;
  638. }
  639. send_wqe = producer_addr(sq->queue);
  640. err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
  641. if (unlikely(err))
  642. goto err1;
  643. /*
  644. * make sure all changes to the work queue are
  645. * written before we update the producer pointer
  646. */
  647. smp_wmb();
  648. advance_producer(sq->queue);
  649. spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
  650. return 0;
  651. err1:
  652. spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
  653. return err;
  654. }
  655. static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
  656. struct ib_send_wr **bad_wr)
  657. {
  658. int err = 0;
  659. unsigned int mask;
  660. unsigned int length = 0;
  661. int i;
  662. int must_sched;
  663. while (wr) {
  664. mask = wr_opcode_mask(wr->opcode, qp);
  665. if (unlikely(!mask)) {
  666. err = -EINVAL;
  667. *bad_wr = wr;
  668. break;
  669. }
  670. if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
  671. !(mask & WR_INLINE_MASK))) {
  672. err = -EINVAL;
  673. *bad_wr = wr;
  674. break;
  675. }
  676. length = 0;
  677. for (i = 0; i < wr->num_sge; i++)
  678. length += wr->sg_list[i].length;
  679. err = post_one_send(qp, wr, mask, length);
  680. if (err) {
  681. *bad_wr = wr;
  682. break;
  683. }
  684. wr = wr->next;
  685. }
  686. /*
  687. * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
  688. * and the requester call ip_local_out_sk() that takes spin_lock_bh.
  689. */
  690. must_sched = (qp_type(qp) == IB_QPT_GSI) ||
  691. (queue_count(qp->sq.queue) > 1);
  692. rxe_run_task(&qp->req.task, must_sched);
  693. return err;
  694. }
  695. static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
  696. struct ib_send_wr **bad_wr)
  697. {
  698. struct rxe_qp *qp = to_rqp(ibqp);
  699. if (unlikely(!qp->valid)) {
  700. *bad_wr = wr;
  701. return -EINVAL;
  702. }
  703. if (unlikely(qp->req.state < QP_STATE_READY)) {
  704. *bad_wr = wr;
  705. return -EINVAL;
  706. }
  707. if (qp->is_user) {
  708. /* Utilize process context to do protocol processing */
  709. rxe_run_task(&qp->req.task, 0);
  710. return 0;
  711. } else
  712. return rxe_post_send_kernel(qp, wr, bad_wr);
  713. }
  714. static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
  715. struct ib_recv_wr **bad_wr)
  716. {
  717. int err = 0;
  718. struct rxe_qp *qp = to_rqp(ibqp);
  719. struct rxe_rq *rq = &qp->rq;
  720. unsigned long flags;
  721. if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
  722. *bad_wr = wr;
  723. err = -EINVAL;
  724. goto err1;
  725. }
  726. if (unlikely(qp->srq)) {
  727. *bad_wr = wr;
  728. err = -EINVAL;
  729. goto err1;
  730. }
  731. spin_lock_irqsave(&rq->producer_lock, flags);
  732. while (wr) {
  733. err = post_one_recv(rq, wr);
  734. if (unlikely(err)) {
  735. *bad_wr = wr;
  736. break;
  737. }
  738. wr = wr->next;
  739. }
  740. spin_unlock_irqrestore(&rq->producer_lock, flags);
  741. err1:
  742. return err;
  743. }
  744. static struct ib_cq *rxe_create_cq(struct ib_device *dev,
  745. const struct ib_cq_init_attr *attr,
  746. struct ib_ucontext *context,
  747. struct ib_udata *udata)
  748. {
  749. int err;
  750. struct rxe_dev *rxe = to_rdev(dev);
  751. struct rxe_cq *cq;
  752. if (attr->flags)
  753. return ERR_PTR(-EINVAL);
  754. err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector, udata);
  755. if (err)
  756. goto err1;
  757. cq = rxe_alloc(&rxe->cq_pool);
  758. if (!cq) {
  759. err = -ENOMEM;
  760. goto err1;
  761. }
  762. err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
  763. context, udata);
  764. if (err)
  765. goto err2;
  766. return &cq->ibcq;
  767. err2:
  768. rxe_drop_ref(cq);
  769. err1:
  770. return ERR_PTR(err);
  771. }
  772. static int rxe_destroy_cq(struct ib_cq *ibcq)
  773. {
  774. struct rxe_cq *cq = to_rcq(ibcq);
  775. rxe_drop_ref(cq);
  776. return 0;
  777. }
  778. static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
  779. {
  780. int err;
  781. struct rxe_cq *cq = to_rcq(ibcq);
  782. struct rxe_dev *rxe = to_rdev(ibcq->device);
  783. err = rxe_cq_chk_attr(rxe, cq, cqe, 0, udata);
  784. if (err)
  785. goto err1;
  786. err = rxe_cq_resize_queue(cq, cqe, udata);
  787. if (err)
  788. goto err1;
  789. return 0;
  790. err1:
  791. return err;
  792. }
  793. static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
  794. {
  795. int i;
  796. struct rxe_cq *cq = to_rcq(ibcq);
  797. struct rxe_cqe *cqe;
  798. unsigned long flags;
  799. spin_lock_irqsave(&cq->cq_lock, flags);
  800. for (i = 0; i < num_entries; i++) {
  801. cqe = queue_head(cq->queue);
  802. if (!cqe)
  803. break;
  804. memcpy(wc++, &cqe->ibwc, sizeof(*wc));
  805. advance_consumer(cq->queue);
  806. }
  807. spin_unlock_irqrestore(&cq->cq_lock, flags);
  808. return i;
  809. }
  810. static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
  811. {
  812. struct rxe_cq *cq = to_rcq(ibcq);
  813. int count = queue_count(cq->queue);
  814. return (count > wc_cnt) ? wc_cnt : count;
  815. }
  816. static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
  817. {
  818. struct rxe_cq *cq = to_rcq(ibcq);
  819. unsigned long irq_flags;
  820. int ret = 0;
  821. spin_lock_irqsave(&cq->cq_lock, irq_flags);
  822. if (cq->notify != IB_CQ_NEXT_COMP)
  823. cq->notify = flags & IB_CQ_SOLICITED_MASK;
  824. if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
  825. ret = 1;
  826. spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
  827. return ret;
  828. }
  829. static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
  830. {
  831. struct rxe_dev *rxe = to_rdev(ibpd->device);
  832. struct rxe_pd *pd = to_rpd(ibpd);
  833. struct rxe_mem *mr;
  834. int err;
  835. mr = rxe_alloc(&rxe->mr_pool);
  836. if (!mr) {
  837. err = -ENOMEM;
  838. goto err1;
  839. }
  840. rxe_add_index(mr);
  841. rxe_add_ref(pd);
  842. err = rxe_mem_init_dma(rxe, pd, access, mr);
  843. if (err)
  844. goto err2;
  845. return &mr->ibmr;
  846. err2:
  847. rxe_drop_ref(pd);
  848. rxe_drop_index(mr);
  849. rxe_drop_ref(mr);
  850. err1:
  851. return ERR_PTR(err);
  852. }
  853. static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
  854. u64 start,
  855. u64 length,
  856. u64 iova,
  857. int access, struct ib_udata *udata)
  858. {
  859. int err;
  860. struct rxe_dev *rxe = to_rdev(ibpd->device);
  861. struct rxe_pd *pd = to_rpd(ibpd);
  862. struct rxe_mem *mr;
  863. mr = rxe_alloc(&rxe->mr_pool);
  864. if (!mr) {
  865. err = -ENOMEM;
  866. goto err2;
  867. }
  868. rxe_add_index(mr);
  869. rxe_add_ref(pd);
  870. err = rxe_mem_init_user(rxe, pd, start, length, iova,
  871. access, udata, mr);
  872. if (err)
  873. goto err3;
  874. return &mr->ibmr;
  875. err3:
  876. rxe_drop_ref(pd);
  877. rxe_drop_index(mr);
  878. rxe_drop_ref(mr);
  879. err2:
  880. return ERR_PTR(err);
  881. }
  882. static int rxe_dereg_mr(struct ib_mr *ibmr)
  883. {
  884. struct rxe_mem *mr = to_rmr(ibmr);
  885. mr->state = RXE_MEM_STATE_ZOMBIE;
  886. rxe_drop_ref(mr->pd);
  887. rxe_drop_index(mr);
  888. rxe_drop_ref(mr);
  889. return 0;
  890. }
  891. static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
  892. enum ib_mr_type mr_type,
  893. u32 max_num_sg)
  894. {
  895. struct rxe_dev *rxe = to_rdev(ibpd->device);
  896. struct rxe_pd *pd = to_rpd(ibpd);
  897. struct rxe_mem *mr;
  898. int err;
  899. if (mr_type != IB_MR_TYPE_MEM_REG)
  900. return ERR_PTR(-EINVAL);
  901. mr = rxe_alloc(&rxe->mr_pool);
  902. if (!mr) {
  903. err = -ENOMEM;
  904. goto err1;
  905. }
  906. rxe_add_index(mr);
  907. rxe_add_ref(pd);
  908. err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
  909. if (err)
  910. goto err2;
  911. return &mr->ibmr;
  912. err2:
  913. rxe_drop_ref(pd);
  914. rxe_drop_index(mr);
  915. rxe_drop_ref(mr);
  916. err1:
  917. return ERR_PTR(err);
  918. }
  919. static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
  920. {
  921. struct rxe_mem *mr = to_rmr(ibmr);
  922. struct rxe_map *map;
  923. struct rxe_phys_buf *buf;
  924. if (unlikely(mr->nbuf == mr->num_buf))
  925. return -ENOMEM;
  926. map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
  927. buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
  928. buf->addr = addr;
  929. buf->size = ibmr->page_size;
  930. mr->nbuf++;
  931. return 0;
  932. }
  933. static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
  934. int sg_nents, unsigned int *sg_offset)
  935. {
  936. struct rxe_mem *mr = to_rmr(ibmr);
  937. int n;
  938. mr->nbuf = 0;
  939. n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
  940. mr->va = ibmr->iova;
  941. mr->iova = ibmr->iova;
  942. mr->length = ibmr->length;
  943. mr->page_shift = ilog2(ibmr->page_size);
  944. mr->page_mask = ibmr->page_size - 1;
  945. mr->offset = mr->iova & mr->page_mask;
  946. return n;
  947. }
  948. static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
  949. {
  950. int err;
  951. struct rxe_dev *rxe = to_rdev(ibqp->device);
  952. struct rxe_qp *qp = to_rqp(ibqp);
  953. struct rxe_mc_grp *grp;
  954. /* takes a ref on grp if successful */
  955. err = rxe_mcast_get_grp(rxe, mgid, &grp);
  956. if (err)
  957. return err;
  958. err = rxe_mcast_add_grp_elem(rxe, qp, grp);
  959. rxe_drop_ref(grp);
  960. return err;
  961. }
  962. static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
  963. {
  964. struct rxe_dev *rxe = to_rdev(ibqp->device);
  965. struct rxe_qp *qp = to_rqp(ibqp);
  966. return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
  967. }
  968. static ssize_t rxe_show_parent(struct device *device,
  969. struct device_attribute *attr, char *buf)
  970. {
  971. struct rxe_dev *rxe = container_of(device, struct rxe_dev,
  972. ib_dev.dev);
  973. return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
  974. }
  975. static DEVICE_ATTR(parent, S_IRUGO, rxe_show_parent, NULL);
  976. static struct device_attribute *rxe_dev_attributes[] = {
  977. &dev_attr_parent,
  978. };
  979. int rxe_register_device(struct rxe_dev *rxe)
  980. {
  981. int err;
  982. int i;
  983. struct ib_device *dev = &rxe->ib_dev;
  984. strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
  985. strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
  986. dev->owner = THIS_MODULE;
  987. dev->node_type = RDMA_NODE_IB_CA;
  988. dev->phys_port_cnt = 1;
  989. dev->num_comp_vectors = RXE_NUM_COMP_VECTORS;
  990. dev->dev.parent = rxe_dma_device(rxe);
  991. dev->local_dma_lkey = 0;
  992. dev->node_guid = rxe_node_guid(rxe);
  993. dev->dev.dma_ops = &dma_virt_ops;
  994. dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
  995. dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
  996. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
  997. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
  998. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
  999. | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
  1000. | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
  1001. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
  1002. | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
  1003. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
  1004. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
  1005. | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
  1006. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
  1007. | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
  1008. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
  1009. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
  1010. | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
  1011. | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
  1012. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
  1013. | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
  1014. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
  1015. | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
  1016. | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
  1017. | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
  1018. | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
  1019. | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
  1020. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
  1021. | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
  1022. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
  1023. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
  1024. | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
  1025. | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
  1026. ;
  1027. dev->query_device = rxe_query_device;
  1028. dev->modify_device = rxe_modify_device;
  1029. dev->query_port = rxe_query_port;
  1030. dev->modify_port = rxe_modify_port;
  1031. dev->get_link_layer = rxe_get_link_layer;
  1032. dev->query_gid = rxe_query_gid;
  1033. dev->get_netdev = rxe_get_netdev;
  1034. dev->add_gid = rxe_add_gid;
  1035. dev->del_gid = rxe_del_gid;
  1036. dev->query_pkey = rxe_query_pkey;
  1037. dev->alloc_ucontext = rxe_alloc_ucontext;
  1038. dev->dealloc_ucontext = rxe_dealloc_ucontext;
  1039. dev->mmap = rxe_mmap;
  1040. dev->get_port_immutable = rxe_port_immutable;
  1041. dev->alloc_pd = rxe_alloc_pd;
  1042. dev->dealloc_pd = rxe_dealloc_pd;
  1043. dev->create_ah = rxe_create_ah;
  1044. dev->modify_ah = rxe_modify_ah;
  1045. dev->query_ah = rxe_query_ah;
  1046. dev->destroy_ah = rxe_destroy_ah;
  1047. dev->create_srq = rxe_create_srq;
  1048. dev->modify_srq = rxe_modify_srq;
  1049. dev->query_srq = rxe_query_srq;
  1050. dev->destroy_srq = rxe_destroy_srq;
  1051. dev->post_srq_recv = rxe_post_srq_recv;
  1052. dev->create_qp = rxe_create_qp;
  1053. dev->modify_qp = rxe_modify_qp;
  1054. dev->query_qp = rxe_query_qp;
  1055. dev->destroy_qp = rxe_destroy_qp;
  1056. dev->post_send = rxe_post_send;
  1057. dev->post_recv = rxe_post_recv;
  1058. dev->create_cq = rxe_create_cq;
  1059. dev->destroy_cq = rxe_destroy_cq;
  1060. dev->resize_cq = rxe_resize_cq;
  1061. dev->poll_cq = rxe_poll_cq;
  1062. dev->peek_cq = rxe_peek_cq;
  1063. dev->req_notify_cq = rxe_req_notify_cq;
  1064. dev->get_dma_mr = rxe_get_dma_mr;
  1065. dev->reg_user_mr = rxe_reg_user_mr;
  1066. dev->dereg_mr = rxe_dereg_mr;
  1067. dev->alloc_mr = rxe_alloc_mr;
  1068. dev->map_mr_sg = rxe_map_mr_sg;
  1069. dev->attach_mcast = rxe_attach_mcast;
  1070. dev->detach_mcast = rxe_detach_mcast;
  1071. err = ib_register_device(dev, NULL);
  1072. if (err) {
  1073. pr_warn("rxe_register_device failed, err = %d\n", err);
  1074. goto err1;
  1075. }
  1076. for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
  1077. err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
  1078. if (err) {
  1079. pr_warn("device_create_file failed, i = %d, err = %d\n",
  1080. i, err);
  1081. goto err2;
  1082. }
  1083. }
  1084. return 0;
  1085. err2:
  1086. ib_unregister_device(dev);
  1087. err1:
  1088. return err;
  1089. }
  1090. int rxe_unregister_device(struct rxe_dev *rxe)
  1091. {
  1092. int i;
  1093. struct ib_device *dev = &rxe->ib_dev;
  1094. for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
  1095. device_remove_file(&dev->dev, rxe_dev_attributes[i]);
  1096. ib_unregister_device(dev);
  1097. return 0;
  1098. }