rxe_verbs.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373
  1. /*
  2. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/dma-mapping.h>
  34. #include <net/addrconf.h>
  35. #include "rxe.h"
  36. #include "rxe_loc.h"
  37. #include "rxe_queue.h"
  38. #include "rxe_hw_counters.h"
  39. static int rxe_query_device(struct ib_device *dev,
  40. struct ib_device_attr *attr,
  41. struct ib_udata *uhw)
  42. {
  43. struct rxe_dev *rxe = to_rdev(dev);
  44. if (uhw->inlen || uhw->outlen)
  45. return -EINVAL;
  46. *attr = rxe->attr;
  47. return 0;
  48. }
  49. static void rxe_eth_speed_to_ib_speed(int speed, u8 *active_speed,
  50. u8 *active_width)
  51. {
  52. if (speed <= 1000) {
  53. *active_width = IB_WIDTH_1X;
  54. *active_speed = IB_SPEED_SDR;
  55. } else if (speed <= 10000) {
  56. *active_width = IB_WIDTH_1X;
  57. *active_speed = IB_SPEED_FDR10;
  58. } else if (speed <= 20000) {
  59. *active_width = IB_WIDTH_4X;
  60. *active_speed = IB_SPEED_DDR;
  61. } else if (speed <= 30000) {
  62. *active_width = IB_WIDTH_4X;
  63. *active_speed = IB_SPEED_QDR;
  64. } else if (speed <= 40000) {
  65. *active_width = IB_WIDTH_4X;
  66. *active_speed = IB_SPEED_FDR10;
  67. } else {
  68. *active_width = IB_WIDTH_4X;
  69. *active_speed = IB_SPEED_EDR;
  70. }
  71. }
  72. static int rxe_query_port(struct ib_device *dev,
  73. u8 port_num, struct ib_port_attr *attr)
  74. {
  75. struct rxe_dev *rxe = to_rdev(dev);
  76. struct rxe_port *port;
  77. u32 speed;
  78. if (unlikely(port_num != 1)) {
  79. pr_warn("invalid port_number %d\n", port_num);
  80. goto err1;
  81. }
  82. port = &rxe->port;
  83. /* *attr being zeroed by the caller, avoid zeroing it here */
  84. *attr = port->attr;
  85. mutex_lock(&rxe->usdev_lock);
  86. if (rxe->ndev->ethtool_ops->get_link_ksettings) {
  87. struct ethtool_link_ksettings ks;
  88. rxe->ndev->ethtool_ops->get_link_ksettings(rxe->ndev, &ks);
  89. speed = ks.base.speed;
  90. } else if (rxe->ndev->ethtool_ops->get_settings) {
  91. struct ethtool_cmd cmd;
  92. rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd);
  93. speed = cmd.speed;
  94. } else {
  95. pr_warn("%s speed is unknown, defaulting to 1000\n",
  96. rxe->ndev->name);
  97. speed = 1000;
  98. }
  99. rxe_eth_speed_to_ib_speed(speed, &attr->active_speed,
  100. &attr->active_width);
  101. mutex_unlock(&rxe->usdev_lock);
  102. return 0;
  103. err1:
  104. return -EINVAL;
  105. }
  106. static int rxe_query_gid(struct ib_device *device,
  107. u8 port_num, int index, union ib_gid *gid)
  108. {
  109. int ret;
  110. if (index > RXE_PORT_GID_TBL_LEN)
  111. return -EINVAL;
  112. ret = ib_get_cached_gid(device, port_num, index, gid, NULL);
  113. if (ret == -EAGAIN) {
  114. memcpy(gid, &zgid, sizeof(*gid));
  115. return 0;
  116. }
  117. return ret;
  118. }
  119. static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int
  120. index, const union ib_gid *gid,
  121. const struct ib_gid_attr *attr, void **context)
  122. {
  123. if (index >= RXE_PORT_GID_TBL_LEN)
  124. return -EINVAL;
  125. return 0;
  126. }
  127. static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int
  128. index, void **context)
  129. {
  130. if (index >= RXE_PORT_GID_TBL_LEN)
  131. return -EINVAL;
  132. return 0;
  133. }
  134. static struct net_device *rxe_get_netdev(struct ib_device *device,
  135. u8 port_num)
  136. {
  137. struct rxe_dev *rxe = to_rdev(device);
  138. if (rxe->ndev) {
  139. dev_hold(rxe->ndev);
  140. return rxe->ndev;
  141. }
  142. return NULL;
  143. }
  144. static int rxe_query_pkey(struct ib_device *device,
  145. u8 port_num, u16 index, u16 *pkey)
  146. {
  147. struct rxe_dev *rxe = to_rdev(device);
  148. struct rxe_port *port;
  149. if (unlikely(port_num != 1)) {
  150. dev_warn(device->dev.parent, "invalid port_num = %d\n",
  151. port_num);
  152. goto err1;
  153. }
  154. port = &rxe->port;
  155. if (unlikely(index >= port->attr.pkey_tbl_len)) {
  156. dev_warn(device->dev.parent, "invalid index = %d\n",
  157. index);
  158. goto err1;
  159. }
  160. *pkey = port->pkey_tbl[index];
  161. return 0;
  162. err1:
  163. return -EINVAL;
  164. }
  165. static int rxe_modify_device(struct ib_device *dev,
  166. int mask, struct ib_device_modify *attr)
  167. {
  168. struct rxe_dev *rxe = to_rdev(dev);
  169. if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
  170. rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
  171. if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
  172. memcpy(rxe->ib_dev.node_desc,
  173. attr->node_desc, sizeof(rxe->ib_dev.node_desc));
  174. }
  175. return 0;
  176. }
  177. static int rxe_modify_port(struct ib_device *dev,
  178. u8 port_num, int mask, struct ib_port_modify *attr)
  179. {
  180. struct rxe_dev *rxe = to_rdev(dev);
  181. struct rxe_port *port;
  182. if (unlikely(port_num != 1)) {
  183. pr_warn("invalid port_num = %d\n", port_num);
  184. goto err1;
  185. }
  186. port = &rxe->port;
  187. port->attr.port_cap_flags |= attr->set_port_cap_mask;
  188. port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
  189. if (mask & IB_PORT_RESET_QKEY_CNTR)
  190. port->attr.qkey_viol_cntr = 0;
  191. return 0;
  192. err1:
  193. return -EINVAL;
  194. }
  195. static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
  196. u8 port_num)
  197. {
  198. struct rxe_dev *rxe = to_rdev(dev);
  199. return rxe_link_layer(rxe, port_num);
  200. }
  201. static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
  202. struct ib_udata *udata)
  203. {
  204. struct rxe_dev *rxe = to_rdev(dev);
  205. struct rxe_ucontext *uc;
  206. uc = rxe_alloc(&rxe->uc_pool);
  207. return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
  208. }
  209. static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
  210. {
  211. struct rxe_ucontext *uc = to_ruc(ibuc);
  212. rxe_drop_ref(uc);
  213. return 0;
  214. }
  215. static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
  216. struct ib_port_immutable *immutable)
  217. {
  218. int err;
  219. struct ib_port_attr attr;
  220. immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
  221. err = ib_query_port(dev, port_num, &attr);
  222. if (err)
  223. return err;
  224. immutable->pkey_tbl_len = attr.pkey_tbl_len;
  225. immutable->gid_tbl_len = attr.gid_tbl_len;
  226. immutable->max_mad_size = IB_MGMT_MAD_SIZE;
  227. return 0;
  228. }
  229. static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
  230. struct ib_ucontext *context,
  231. struct ib_udata *udata)
  232. {
  233. struct rxe_dev *rxe = to_rdev(dev);
  234. struct rxe_pd *pd;
  235. pd = rxe_alloc(&rxe->pd_pool);
  236. return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
  237. }
  238. static int rxe_dealloc_pd(struct ib_pd *ibpd)
  239. {
  240. struct rxe_pd *pd = to_rpd(ibpd);
  241. rxe_drop_ref(pd);
  242. return 0;
  243. }
  244. static int rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr,
  245. struct rxe_av *av)
  246. {
  247. int err;
  248. union ib_gid sgid;
  249. struct ib_gid_attr sgid_attr;
  250. err = ib_get_cached_gid(&rxe->ib_dev, rdma_ah_get_port_num(attr),
  251. rdma_ah_read_grh(attr)->sgid_index, &sgid,
  252. &sgid_attr);
  253. if (err) {
  254. pr_err("Failed to query sgid. err = %d\n", err);
  255. return err;
  256. }
  257. err = rxe_av_from_attr(rxe, rdma_ah_get_port_num(attr), av, attr);
  258. if (!err)
  259. err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid);
  260. if (sgid_attr.ndev)
  261. dev_put(sgid_attr.ndev);
  262. return err;
  263. }
  264. static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
  265. struct rdma_ah_attr *attr,
  266. struct ib_udata *udata)
  267. {
  268. int err;
  269. struct rxe_dev *rxe = to_rdev(ibpd->device);
  270. struct rxe_pd *pd = to_rpd(ibpd);
  271. struct rxe_ah *ah;
  272. err = rxe_av_chk_attr(rxe, attr);
  273. if (err)
  274. goto err1;
  275. ah = rxe_alloc(&rxe->ah_pool);
  276. if (!ah) {
  277. err = -ENOMEM;
  278. goto err1;
  279. }
  280. rxe_add_ref(pd);
  281. ah->pd = pd;
  282. err = rxe_init_av(rxe, attr, &ah->av);
  283. if (err)
  284. goto err2;
  285. return &ah->ibah;
  286. err2:
  287. rxe_drop_ref(pd);
  288. rxe_drop_ref(ah);
  289. err1:
  290. return ERR_PTR(err);
  291. }
  292. static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
  293. {
  294. int err;
  295. struct rxe_dev *rxe = to_rdev(ibah->device);
  296. struct rxe_ah *ah = to_rah(ibah);
  297. err = rxe_av_chk_attr(rxe, attr);
  298. if (err)
  299. return err;
  300. err = rxe_init_av(rxe, attr, &ah->av);
  301. if (err)
  302. return err;
  303. return 0;
  304. }
  305. static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
  306. {
  307. struct rxe_dev *rxe = to_rdev(ibah->device);
  308. struct rxe_ah *ah = to_rah(ibah);
  309. memset(attr, 0, sizeof(*attr));
  310. attr->type = ibah->type;
  311. rxe_av_to_attr(rxe, &ah->av, attr);
  312. return 0;
  313. }
  314. static int rxe_destroy_ah(struct ib_ah *ibah)
  315. {
  316. struct rxe_ah *ah = to_rah(ibah);
  317. rxe_drop_ref(ah->pd);
  318. rxe_drop_ref(ah);
  319. return 0;
  320. }
  321. static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
  322. {
  323. int err;
  324. int i;
  325. u32 length;
  326. struct rxe_recv_wqe *recv_wqe;
  327. int num_sge = ibwr->num_sge;
  328. if (unlikely(queue_full(rq->queue))) {
  329. err = -ENOMEM;
  330. goto err1;
  331. }
  332. if (unlikely(num_sge > rq->max_sge)) {
  333. err = -EINVAL;
  334. goto err1;
  335. }
  336. length = 0;
  337. for (i = 0; i < num_sge; i++)
  338. length += ibwr->sg_list[i].length;
  339. recv_wqe = producer_addr(rq->queue);
  340. recv_wqe->wr_id = ibwr->wr_id;
  341. recv_wqe->num_sge = num_sge;
  342. memcpy(recv_wqe->dma.sge, ibwr->sg_list,
  343. num_sge * sizeof(struct ib_sge));
  344. recv_wqe->dma.length = length;
  345. recv_wqe->dma.resid = length;
  346. recv_wqe->dma.num_sge = num_sge;
  347. recv_wqe->dma.cur_sge = 0;
  348. recv_wqe->dma.sge_offset = 0;
  349. /* make sure all changes to the work queue are written before we
  350. * update the producer pointer
  351. */
  352. smp_wmb();
  353. advance_producer(rq->queue);
  354. return 0;
  355. err1:
  356. return err;
  357. }
  358. static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
  359. struct ib_srq_init_attr *init,
  360. struct ib_udata *udata)
  361. {
  362. int err;
  363. struct rxe_dev *rxe = to_rdev(ibpd->device);
  364. struct rxe_pd *pd = to_rpd(ibpd);
  365. struct rxe_srq *srq;
  366. struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
  367. err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
  368. if (err)
  369. goto err1;
  370. srq = rxe_alloc(&rxe->srq_pool);
  371. if (!srq) {
  372. err = -ENOMEM;
  373. goto err1;
  374. }
  375. rxe_add_index(srq);
  376. rxe_add_ref(pd);
  377. srq->pd = pd;
  378. err = rxe_srq_from_init(rxe, srq, init, context, udata);
  379. if (err)
  380. goto err2;
  381. return &srq->ibsrq;
  382. err2:
  383. rxe_drop_ref(pd);
  384. rxe_drop_index(srq);
  385. rxe_drop_ref(srq);
  386. err1:
  387. return ERR_PTR(err);
  388. }
  389. static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
  390. enum ib_srq_attr_mask mask,
  391. struct ib_udata *udata)
  392. {
  393. int err;
  394. struct rxe_srq *srq = to_rsrq(ibsrq);
  395. struct rxe_dev *rxe = to_rdev(ibsrq->device);
  396. err = rxe_srq_chk_attr(rxe, srq, attr, mask);
  397. if (err)
  398. goto err1;
  399. err = rxe_srq_from_attr(rxe, srq, attr, mask, udata);
  400. if (err)
  401. goto err1;
  402. return 0;
  403. err1:
  404. return err;
  405. }
  406. static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
  407. {
  408. struct rxe_srq *srq = to_rsrq(ibsrq);
  409. if (srq->error)
  410. return -EINVAL;
  411. attr->max_wr = srq->rq.queue->buf->index_mask;
  412. attr->max_sge = srq->rq.max_sge;
  413. attr->srq_limit = srq->limit;
  414. return 0;
  415. }
  416. static int rxe_destroy_srq(struct ib_srq *ibsrq)
  417. {
  418. struct rxe_srq *srq = to_rsrq(ibsrq);
  419. if (srq->rq.queue)
  420. rxe_queue_cleanup(srq->rq.queue);
  421. rxe_drop_ref(srq->pd);
  422. rxe_drop_index(srq);
  423. rxe_drop_ref(srq);
  424. return 0;
  425. }
  426. static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
  427. struct ib_recv_wr **bad_wr)
  428. {
  429. int err = 0;
  430. unsigned long flags;
  431. struct rxe_srq *srq = to_rsrq(ibsrq);
  432. spin_lock_irqsave(&srq->rq.producer_lock, flags);
  433. while (wr) {
  434. err = post_one_recv(&srq->rq, wr);
  435. if (unlikely(err))
  436. break;
  437. wr = wr->next;
  438. }
  439. spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
  440. if (err)
  441. *bad_wr = wr;
  442. return err;
  443. }
  444. static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
  445. struct ib_qp_init_attr *init,
  446. struct ib_udata *udata)
  447. {
  448. int err;
  449. struct rxe_dev *rxe = to_rdev(ibpd->device);
  450. struct rxe_pd *pd = to_rpd(ibpd);
  451. struct rxe_qp *qp;
  452. err = rxe_qp_chk_init(rxe, init);
  453. if (err)
  454. goto err1;
  455. qp = rxe_alloc(&rxe->qp_pool);
  456. if (!qp) {
  457. err = -ENOMEM;
  458. goto err1;
  459. }
  460. if (udata) {
  461. if (udata->inlen) {
  462. err = -EINVAL;
  463. goto err2;
  464. }
  465. qp->is_user = 1;
  466. }
  467. rxe_add_index(qp);
  468. err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd);
  469. if (err)
  470. goto err3;
  471. return &qp->ibqp;
  472. err3:
  473. rxe_drop_index(qp);
  474. err2:
  475. rxe_drop_ref(qp);
  476. err1:
  477. return ERR_PTR(err);
  478. }
  479. static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  480. int mask, struct ib_udata *udata)
  481. {
  482. int err;
  483. struct rxe_dev *rxe = to_rdev(ibqp->device);
  484. struct rxe_qp *qp = to_rqp(ibqp);
  485. err = rxe_qp_chk_attr(rxe, qp, attr, mask);
  486. if (err)
  487. goto err1;
  488. err = rxe_qp_from_attr(qp, attr, mask, udata);
  489. if (err)
  490. goto err1;
  491. return 0;
  492. err1:
  493. return err;
  494. }
  495. static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  496. int mask, struct ib_qp_init_attr *init)
  497. {
  498. struct rxe_qp *qp = to_rqp(ibqp);
  499. rxe_qp_to_init(qp, init);
  500. rxe_qp_to_attr(qp, attr, mask);
  501. return 0;
  502. }
  503. static int rxe_destroy_qp(struct ib_qp *ibqp)
  504. {
  505. struct rxe_qp *qp = to_rqp(ibqp);
  506. rxe_qp_destroy(qp);
  507. rxe_drop_index(qp);
  508. rxe_drop_ref(qp);
  509. return 0;
  510. }
  511. static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
  512. unsigned int mask, unsigned int length)
  513. {
  514. int num_sge = ibwr->num_sge;
  515. struct rxe_sq *sq = &qp->sq;
  516. if (unlikely(num_sge > sq->max_sge))
  517. goto err1;
  518. if (unlikely(mask & WR_ATOMIC_MASK)) {
  519. if (length < 8)
  520. goto err1;
  521. if (atomic_wr(ibwr)->remote_addr & 0x7)
  522. goto err1;
  523. }
  524. if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
  525. (length > sq->max_inline)))
  526. goto err1;
  527. return 0;
  528. err1:
  529. return -EINVAL;
  530. }
  531. static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
  532. struct ib_send_wr *ibwr)
  533. {
  534. wr->wr_id = ibwr->wr_id;
  535. wr->num_sge = ibwr->num_sge;
  536. wr->opcode = ibwr->opcode;
  537. wr->send_flags = ibwr->send_flags;
  538. if (qp_type(qp) == IB_QPT_UD ||
  539. qp_type(qp) == IB_QPT_SMI ||
  540. qp_type(qp) == IB_QPT_GSI) {
  541. wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
  542. wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
  543. if (qp_type(qp) == IB_QPT_GSI)
  544. wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
  545. if (wr->opcode == IB_WR_SEND_WITH_IMM)
  546. wr->ex.imm_data = ibwr->ex.imm_data;
  547. } else {
  548. switch (wr->opcode) {
  549. case IB_WR_RDMA_WRITE_WITH_IMM:
  550. wr->ex.imm_data = ibwr->ex.imm_data;
  551. case IB_WR_RDMA_READ:
  552. case IB_WR_RDMA_WRITE:
  553. wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
  554. wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
  555. break;
  556. case IB_WR_SEND_WITH_IMM:
  557. wr->ex.imm_data = ibwr->ex.imm_data;
  558. break;
  559. case IB_WR_SEND_WITH_INV:
  560. wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
  561. break;
  562. case IB_WR_ATOMIC_CMP_AND_SWP:
  563. case IB_WR_ATOMIC_FETCH_AND_ADD:
  564. wr->wr.atomic.remote_addr =
  565. atomic_wr(ibwr)->remote_addr;
  566. wr->wr.atomic.compare_add =
  567. atomic_wr(ibwr)->compare_add;
  568. wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
  569. wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
  570. break;
  571. case IB_WR_LOCAL_INV:
  572. wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
  573. break;
  574. case IB_WR_REG_MR:
  575. wr->wr.reg.mr = reg_wr(ibwr)->mr;
  576. wr->wr.reg.key = reg_wr(ibwr)->key;
  577. wr->wr.reg.access = reg_wr(ibwr)->access;
  578. break;
  579. default:
  580. break;
  581. }
  582. }
  583. }
  584. static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
  585. unsigned int mask, unsigned int length,
  586. struct rxe_send_wqe *wqe)
  587. {
  588. int num_sge = ibwr->num_sge;
  589. struct ib_sge *sge;
  590. int i;
  591. u8 *p;
  592. init_send_wr(qp, &wqe->wr, ibwr);
  593. if (qp_type(qp) == IB_QPT_UD ||
  594. qp_type(qp) == IB_QPT_SMI ||
  595. qp_type(qp) == IB_QPT_GSI)
  596. memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
  597. if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
  598. p = wqe->dma.inline_data;
  599. sge = ibwr->sg_list;
  600. for (i = 0; i < num_sge; i++, sge++) {
  601. if (qp->is_user && copy_from_user(p, (__user void *)
  602. (uintptr_t)sge->addr, sge->length))
  603. return -EFAULT;
  604. else if (!qp->is_user)
  605. memcpy(p, (void *)(uintptr_t)sge->addr,
  606. sge->length);
  607. p += sge->length;
  608. }
  609. } else if (mask & WR_REG_MASK) {
  610. wqe->mask = mask;
  611. wqe->state = wqe_state_posted;
  612. return 0;
  613. } else
  614. memcpy(wqe->dma.sge, ibwr->sg_list,
  615. num_sge * sizeof(struct ib_sge));
  616. wqe->iova = (mask & WR_ATOMIC_MASK) ?
  617. atomic_wr(ibwr)->remote_addr :
  618. rdma_wr(ibwr)->remote_addr;
  619. wqe->mask = mask;
  620. wqe->dma.length = length;
  621. wqe->dma.resid = length;
  622. wqe->dma.num_sge = num_sge;
  623. wqe->dma.cur_sge = 0;
  624. wqe->dma.sge_offset = 0;
  625. wqe->state = wqe_state_posted;
  626. wqe->ssn = atomic_add_return(1, &qp->ssn);
  627. return 0;
  628. }
  629. static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
  630. unsigned int mask, u32 length)
  631. {
  632. int err;
  633. struct rxe_sq *sq = &qp->sq;
  634. struct rxe_send_wqe *send_wqe;
  635. unsigned long flags;
  636. err = validate_send_wr(qp, ibwr, mask, length);
  637. if (err)
  638. return err;
  639. spin_lock_irqsave(&qp->sq.sq_lock, flags);
  640. if (unlikely(queue_full(sq->queue))) {
  641. err = -ENOMEM;
  642. goto err1;
  643. }
  644. send_wqe = producer_addr(sq->queue);
  645. err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
  646. if (unlikely(err))
  647. goto err1;
  648. /*
  649. * make sure all changes to the work queue are
  650. * written before we update the producer pointer
  651. */
  652. smp_wmb();
  653. advance_producer(sq->queue);
  654. spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
  655. return 0;
  656. err1:
  657. spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
  658. return err;
  659. }
  660. static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
  661. struct ib_send_wr **bad_wr)
  662. {
  663. int err = 0;
  664. unsigned int mask;
  665. unsigned int length = 0;
  666. int i;
  667. int must_sched;
  668. while (wr) {
  669. mask = wr_opcode_mask(wr->opcode, qp);
  670. if (unlikely(!mask)) {
  671. err = -EINVAL;
  672. *bad_wr = wr;
  673. break;
  674. }
  675. if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
  676. !(mask & WR_INLINE_MASK))) {
  677. err = -EINVAL;
  678. *bad_wr = wr;
  679. break;
  680. }
  681. length = 0;
  682. for (i = 0; i < wr->num_sge; i++)
  683. length += wr->sg_list[i].length;
  684. err = post_one_send(qp, wr, mask, length);
  685. if (err) {
  686. *bad_wr = wr;
  687. break;
  688. }
  689. wr = wr->next;
  690. }
  691. /*
  692. * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
  693. * and the requester call ip_local_out_sk() that takes spin_lock_bh.
  694. */
  695. must_sched = (qp_type(qp) == IB_QPT_GSI) ||
  696. (queue_count(qp->sq.queue) > 1);
  697. rxe_run_task(&qp->req.task, must_sched);
  698. return err;
  699. }
  700. static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
  701. struct ib_send_wr **bad_wr)
  702. {
  703. struct rxe_qp *qp = to_rqp(ibqp);
  704. if (unlikely(!qp->valid)) {
  705. *bad_wr = wr;
  706. return -EINVAL;
  707. }
  708. if (unlikely(qp->req.state < QP_STATE_READY)) {
  709. *bad_wr = wr;
  710. return -EINVAL;
  711. }
  712. if (qp->is_user) {
  713. /* Utilize process context to do protocol processing */
  714. rxe_run_task(&qp->req.task, 0);
  715. return 0;
  716. } else
  717. return rxe_post_send_kernel(qp, wr, bad_wr);
  718. }
  719. static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
  720. struct ib_recv_wr **bad_wr)
  721. {
  722. int err = 0;
  723. struct rxe_qp *qp = to_rqp(ibqp);
  724. struct rxe_rq *rq = &qp->rq;
  725. unsigned long flags;
  726. if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
  727. *bad_wr = wr;
  728. err = -EINVAL;
  729. goto err1;
  730. }
  731. if (unlikely(qp->srq)) {
  732. *bad_wr = wr;
  733. err = -EINVAL;
  734. goto err1;
  735. }
  736. spin_lock_irqsave(&rq->producer_lock, flags);
  737. while (wr) {
  738. err = post_one_recv(rq, wr);
  739. if (unlikely(err)) {
  740. *bad_wr = wr;
  741. break;
  742. }
  743. wr = wr->next;
  744. }
  745. spin_unlock_irqrestore(&rq->producer_lock, flags);
  746. err1:
  747. return err;
  748. }
  749. static struct ib_cq *rxe_create_cq(struct ib_device *dev,
  750. const struct ib_cq_init_attr *attr,
  751. struct ib_ucontext *context,
  752. struct ib_udata *udata)
  753. {
  754. int err;
  755. struct rxe_dev *rxe = to_rdev(dev);
  756. struct rxe_cq *cq;
  757. if (attr->flags)
  758. return ERR_PTR(-EINVAL);
  759. err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector, udata);
  760. if (err)
  761. goto err1;
  762. cq = rxe_alloc(&rxe->cq_pool);
  763. if (!cq) {
  764. err = -ENOMEM;
  765. goto err1;
  766. }
  767. err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
  768. context, udata);
  769. if (err)
  770. goto err2;
  771. return &cq->ibcq;
  772. err2:
  773. rxe_drop_ref(cq);
  774. err1:
  775. return ERR_PTR(err);
  776. }
  777. static int rxe_destroy_cq(struct ib_cq *ibcq)
  778. {
  779. struct rxe_cq *cq = to_rcq(ibcq);
  780. rxe_drop_ref(cq);
  781. return 0;
  782. }
  783. static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
  784. {
  785. int err;
  786. struct rxe_cq *cq = to_rcq(ibcq);
  787. struct rxe_dev *rxe = to_rdev(ibcq->device);
  788. err = rxe_cq_chk_attr(rxe, cq, cqe, 0, udata);
  789. if (err)
  790. goto err1;
  791. err = rxe_cq_resize_queue(cq, cqe, udata);
  792. if (err)
  793. goto err1;
  794. return 0;
  795. err1:
  796. return err;
  797. }
  798. static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
  799. {
  800. int i;
  801. struct rxe_cq *cq = to_rcq(ibcq);
  802. struct rxe_cqe *cqe;
  803. unsigned long flags;
  804. spin_lock_irqsave(&cq->cq_lock, flags);
  805. for (i = 0; i < num_entries; i++) {
  806. cqe = queue_head(cq->queue);
  807. if (!cqe)
  808. break;
  809. memcpy(wc++, &cqe->ibwc, sizeof(*wc));
  810. advance_consumer(cq->queue);
  811. }
  812. spin_unlock_irqrestore(&cq->cq_lock, flags);
  813. return i;
  814. }
  815. static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
  816. {
  817. struct rxe_cq *cq = to_rcq(ibcq);
  818. int count = queue_count(cq->queue);
  819. return (count > wc_cnt) ? wc_cnt : count;
  820. }
  821. static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
  822. {
  823. struct rxe_cq *cq = to_rcq(ibcq);
  824. unsigned long irq_flags;
  825. int ret = 0;
  826. spin_lock_irqsave(&cq->cq_lock, irq_flags);
  827. if (cq->notify != IB_CQ_NEXT_COMP)
  828. cq->notify = flags & IB_CQ_SOLICITED_MASK;
  829. if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
  830. ret = 1;
  831. spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
  832. return ret;
  833. }
  834. static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
  835. {
  836. struct rxe_dev *rxe = to_rdev(ibpd->device);
  837. struct rxe_pd *pd = to_rpd(ibpd);
  838. struct rxe_mem *mr;
  839. int err;
  840. mr = rxe_alloc(&rxe->mr_pool);
  841. if (!mr) {
  842. err = -ENOMEM;
  843. goto err1;
  844. }
  845. rxe_add_index(mr);
  846. rxe_add_ref(pd);
  847. err = rxe_mem_init_dma(rxe, pd, access, mr);
  848. if (err)
  849. goto err2;
  850. return &mr->ibmr;
  851. err2:
  852. rxe_drop_ref(pd);
  853. rxe_drop_index(mr);
  854. rxe_drop_ref(mr);
  855. err1:
  856. return ERR_PTR(err);
  857. }
  858. static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
  859. u64 start,
  860. u64 length,
  861. u64 iova,
  862. int access, struct ib_udata *udata)
  863. {
  864. int err;
  865. struct rxe_dev *rxe = to_rdev(ibpd->device);
  866. struct rxe_pd *pd = to_rpd(ibpd);
  867. struct rxe_mem *mr;
  868. mr = rxe_alloc(&rxe->mr_pool);
  869. if (!mr) {
  870. err = -ENOMEM;
  871. goto err2;
  872. }
  873. rxe_add_index(mr);
  874. rxe_add_ref(pd);
  875. err = rxe_mem_init_user(rxe, pd, start, length, iova,
  876. access, udata, mr);
  877. if (err)
  878. goto err3;
  879. return &mr->ibmr;
  880. err3:
  881. rxe_drop_ref(pd);
  882. rxe_drop_index(mr);
  883. rxe_drop_ref(mr);
  884. err2:
  885. return ERR_PTR(err);
  886. }
  887. static int rxe_dereg_mr(struct ib_mr *ibmr)
  888. {
  889. struct rxe_mem *mr = to_rmr(ibmr);
  890. mr->state = RXE_MEM_STATE_ZOMBIE;
  891. rxe_drop_ref(mr->pd);
  892. rxe_drop_index(mr);
  893. rxe_drop_ref(mr);
  894. return 0;
  895. }
  896. static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
  897. enum ib_mr_type mr_type,
  898. u32 max_num_sg)
  899. {
  900. struct rxe_dev *rxe = to_rdev(ibpd->device);
  901. struct rxe_pd *pd = to_rpd(ibpd);
  902. struct rxe_mem *mr;
  903. int err;
  904. if (mr_type != IB_MR_TYPE_MEM_REG)
  905. return ERR_PTR(-EINVAL);
  906. mr = rxe_alloc(&rxe->mr_pool);
  907. if (!mr) {
  908. err = -ENOMEM;
  909. goto err1;
  910. }
  911. rxe_add_index(mr);
  912. rxe_add_ref(pd);
  913. err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
  914. if (err)
  915. goto err2;
  916. return &mr->ibmr;
  917. err2:
  918. rxe_drop_ref(pd);
  919. rxe_drop_index(mr);
  920. rxe_drop_ref(mr);
  921. err1:
  922. return ERR_PTR(err);
  923. }
  924. static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
  925. {
  926. struct rxe_mem *mr = to_rmr(ibmr);
  927. struct rxe_map *map;
  928. struct rxe_phys_buf *buf;
  929. if (unlikely(mr->nbuf == mr->num_buf))
  930. return -ENOMEM;
  931. map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
  932. buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
  933. buf->addr = addr;
  934. buf->size = ibmr->page_size;
  935. mr->nbuf++;
  936. return 0;
  937. }
  938. static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
  939. int sg_nents, unsigned int *sg_offset)
  940. {
  941. struct rxe_mem *mr = to_rmr(ibmr);
  942. int n;
  943. mr->nbuf = 0;
  944. n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
  945. mr->va = ibmr->iova;
  946. mr->iova = ibmr->iova;
  947. mr->length = ibmr->length;
  948. mr->page_shift = ilog2(ibmr->page_size);
  949. mr->page_mask = ibmr->page_size - 1;
  950. mr->offset = mr->iova & mr->page_mask;
  951. return n;
  952. }
  953. static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
  954. {
  955. int err;
  956. struct rxe_dev *rxe = to_rdev(ibqp->device);
  957. struct rxe_qp *qp = to_rqp(ibqp);
  958. struct rxe_mc_grp *grp;
  959. /* takes a ref on grp if successful */
  960. err = rxe_mcast_get_grp(rxe, mgid, &grp);
  961. if (err)
  962. return err;
  963. err = rxe_mcast_add_grp_elem(rxe, qp, grp);
  964. rxe_drop_ref(grp);
  965. return err;
  966. }
  967. static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
  968. {
  969. struct rxe_dev *rxe = to_rdev(ibqp->device);
  970. struct rxe_qp *qp = to_rqp(ibqp);
  971. return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
  972. }
  973. static ssize_t rxe_show_parent(struct device *device,
  974. struct device_attribute *attr, char *buf)
  975. {
  976. struct rxe_dev *rxe = container_of(device, struct rxe_dev,
  977. ib_dev.dev);
  978. return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
  979. }
  980. static DEVICE_ATTR(parent, S_IRUGO, rxe_show_parent, NULL);
  981. static struct device_attribute *rxe_dev_attributes[] = {
  982. &dev_attr_parent,
  983. };
  984. int rxe_register_device(struct rxe_dev *rxe)
  985. {
  986. int err;
  987. int i;
  988. struct ib_device *dev = &rxe->ib_dev;
  989. strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
  990. strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
  991. dev->owner = THIS_MODULE;
  992. dev->node_type = RDMA_NODE_IB_CA;
  993. dev->phys_port_cnt = 1;
  994. dev->num_comp_vectors = num_possible_cpus();
  995. dev->dev.parent = rxe_dma_device(rxe);
  996. dev->local_dma_lkey = 0;
  997. addrconf_addr_eui48((unsigned char *)&dev->node_guid,
  998. rxe->ndev->dev_addr);
  999. dev->dev.dma_ops = &dma_virt_ops;
  1000. dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
  1001. dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
  1002. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
  1003. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
  1004. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
  1005. | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
  1006. | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
  1007. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
  1008. | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
  1009. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
  1010. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
  1011. | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
  1012. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
  1013. | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
  1014. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
  1015. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
  1016. | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
  1017. | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
  1018. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
  1019. | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
  1020. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
  1021. | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
  1022. | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
  1023. | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
  1024. | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
  1025. | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
  1026. | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
  1027. | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
  1028. | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
  1029. | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
  1030. | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
  1031. | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
  1032. ;
  1033. dev->query_device = rxe_query_device;
  1034. dev->modify_device = rxe_modify_device;
  1035. dev->query_port = rxe_query_port;
  1036. dev->modify_port = rxe_modify_port;
  1037. dev->get_link_layer = rxe_get_link_layer;
  1038. dev->query_gid = rxe_query_gid;
  1039. dev->get_netdev = rxe_get_netdev;
  1040. dev->add_gid = rxe_add_gid;
  1041. dev->del_gid = rxe_del_gid;
  1042. dev->query_pkey = rxe_query_pkey;
  1043. dev->alloc_ucontext = rxe_alloc_ucontext;
  1044. dev->dealloc_ucontext = rxe_dealloc_ucontext;
  1045. dev->mmap = rxe_mmap;
  1046. dev->get_port_immutable = rxe_port_immutable;
  1047. dev->alloc_pd = rxe_alloc_pd;
  1048. dev->dealloc_pd = rxe_dealloc_pd;
  1049. dev->create_ah = rxe_create_ah;
  1050. dev->modify_ah = rxe_modify_ah;
  1051. dev->query_ah = rxe_query_ah;
  1052. dev->destroy_ah = rxe_destroy_ah;
  1053. dev->create_srq = rxe_create_srq;
  1054. dev->modify_srq = rxe_modify_srq;
  1055. dev->query_srq = rxe_query_srq;
  1056. dev->destroy_srq = rxe_destroy_srq;
  1057. dev->post_srq_recv = rxe_post_srq_recv;
  1058. dev->create_qp = rxe_create_qp;
  1059. dev->modify_qp = rxe_modify_qp;
  1060. dev->query_qp = rxe_query_qp;
  1061. dev->destroy_qp = rxe_destroy_qp;
  1062. dev->post_send = rxe_post_send;
  1063. dev->post_recv = rxe_post_recv;
  1064. dev->create_cq = rxe_create_cq;
  1065. dev->destroy_cq = rxe_destroy_cq;
  1066. dev->resize_cq = rxe_resize_cq;
  1067. dev->poll_cq = rxe_poll_cq;
  1068. dev->peek_cq = rxe_peek_cq;
  1069. dev->req_notify_cq = rxe_req_notify_cq;
  1070. dev->get_dma_mr = rxe_get_dma_mr;
  1071. dev->reg_user_mr = rxe_reg_user_mr;
  1072. dev->dereg_mr = rxe_dereg_mr;
  1073. dev->alloc_mr = rxe_alloc_mr;
  1074. dev->map_mr_sg = rxe_map_mr_sg;
  1075. dev->attach_mcast = rxe_attach_mcast;
  1076. dev->detach_mcast = rxe_detach_mcast;
  1077. dev->get_hw_stats = rxe_ib_get_hw_stats;
  1078. dev->alloc_hw_stats = rxe_ib_alloc_hw_stats;
  1079. rxe->tfm = crypto_alloc_shash("crc32", 0, 0);
  1080. if (IS_ERR(rxe->tfm)) {
  1081. pr_err("failed to allocate crc algorithm err:%ld\n",
  1082. PTR_ERR(rxe->tfm));
  1083. return PTR_ERR(rxe->tfm);
  1084. }
  1085. err = ib_register_device(dev, NULL);
  1086. if (err) {
  1087. pr_warn("rxe_register_device failed, err = %d\n", err);
  1088. goto err1;
  1089. }
  1090. for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
  1091. err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
  1092. if (err) {
  1093. pr_warn("device_create_file failed, i = %d, err = %d\n",
  1094. i, err);
  1095. goto err2;
  1096. }
  1097. }
  1098. return 0;
  1099. err2:
  1100. ib_unregister_device(dev);
  1101. err1:
  1102. crypto_free_shash(rxe->tfm);
  1103. return err;
  1104. }
  1105. int rxe_unregister_device(struct rxe_dev *rxe)
  1106. {
  1107. int i;
  1108. struct ib_device *dev = &rxe->ib_dev;
  1109. for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
  1110. device_remove_file(&dev->dev, rxe_dev_attributes[i]);
  1111. ib_unregister_device(dev);
  1112. return 0;
  1113. }