verbs.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354
  1. /*
  2. * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
  4. * Copyright (c) 2004 Intel Corporation. All rights reserved.
  5. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  6. * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
  7. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  8. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
  9. *
  10. * This software is available to you under a choice of one of two
  11. * licenses. You may choose to be licensed under the terms of the GNU
  12. * General Public License (GPL) Version 2, available from the file
  13. * COPYING in the main directory of this source tree, or the
  14. * OpenIB.org BSD license below:
  15. *
  16. * Redistribution and use in source and binary forms, with or
  17. * without modification, are permitted provided that the following
  18. * conditions are met:
  19. *
  20. * - Redistributions of source code must retain the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer.
  23. *
  24. * - Redistributions in binary form must reproduce the above
  25. * copyright notice, this list of conditions and the following
  26. * disclaimer in the documentation and/or other materials
  27. * provided with the distribution.
  28. *
  29. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36. * SOFTWARE.
  37. */
  38. #include <linux/errno.h>
  39. #include <linux/err.h>
  40. #include <linux/export.h>
  41. #include <linux/string.h>
  42. #include <linux/slab.h>
  43. #include <rdma/ib_verbs.h>
  44. #include <rdma/ib_cache.h>
  45. #include <rdma/ib_addr.h>
  46. int ib_rate_to_mult(enum ib_rate rate)
  47. {
  48. switch (rate) {
  49. case IB_RATE_2_5_GBPS: return 1;
  50. case IB_RATE_5_GBPS: return 2;
  51. case IB_RATE_10_GBPS: return 4;
  52. case IB_RATE_20_GBPS: return 8;
  53. case IB_RATE_30_GBPS: return 12;
  54. case IB_RATE_40_GBPS: return 16;
  55. case IB_RATE_60_GBPS: return 24;
  56. case IB_RATE_80_GBPS: return 32;
  57. case IB_RATE_120_GBPS: return 48;
  58. default: return -1;
  59. }
  60. }
  61. EXPORT_SYMBOL(ib_rate_to_mult);
  62. enum ib_rate mult_to_ib_rate(int mult)
  63. {
  64. switch (mult) {
  65. case 1: return IB_RATE_2_5_GBPS;
  66. case 2: return IB_RATE_5_GBPS;
  67. case 4: return IB_RATE_10_GBPS;
  68. case 8: return IB_RATE_20_GBPS;
  69. case 12: return IB_RATE_30_GBPS;
  70. case 16: return IB_RATE_40_GBPS;
  71. case 24: return IB_RATE_60_GBPS;
  72. case 32: return IB_RATE_80_GBPS;
  73. case 48: return IB_RATE_120_GBPS;
  74. default: return IB_RATE_PORT_CURRENT;
  75. }
  76. }
  77. EXPORT_SYMBOL(mult_to_ib_rate);
  78. int ib_rate_to_mbps(enum ib_rate rate)
  79. {
  80. switch (rate) {
  81. case IB_RATE_2_5_GBPS: return 2500;
  82. case IB_RATE_5_GBPS: return 5000;
  83. case IB_RATE_10_GBPS: return 10000;
  84. case IB_RATE_20_GBPS: return 20000;
  85. case IB_RATE_30_GBPS: return 30000;
  86. case IB_RATE_40_GBPS: return 40000;
  87. case IB_RATE_60_GBPS: return 60000;
  88. case IB_RATE_80_GBPS: return 80000;
  89. case IB_RATE_120_GBPS: return 120000;
  90. case IB_RATE_14_GBPS: return 14062;
  91. case IB_RATE_56_GBPS: return 56250;
  92. case IB_RATE_112_GBPS: return 112500;
  93. case IB_RATE_168_GBPS: return 168750;
  94. case IB_RATE_25_GBPS: return 25781;
  95. case IB_RATE_100_GBPS: return 103125;
  96. case IB_RATE_200_GBPS: return 206250;
  97. case IB_RATE_300_GBPS: return 309375;
  98. default: return -1;
  99. }
  100. }
  101. EXPORT_SYMBOL(ib_rate_to_mbps);
  102. enum rdma_transport_type
  103. rdma_node_get_transport(enum rdma_node_type node_type)
  104. {
  105. switch (node_type) {
  106. case RDMA_NODE_IB_CA:
  107. case RDMA_NODE_IB_SWITCH:
  108. case RDMA_NODE_IB_ROUTER:
  109. return RDMA_TRANSPORT_IB;
  110. case RDMA_NODE_RNIC:
  111. return RDMA_TRANSPORT_IWARP;
  112. case RDMA_NODE_USNIC:
  113. return RDMA_TRANSPORT_USNIC;
  114. default:
  115. BUG();
  116. return 0;
  117. }
  118. }
  119. EXPORT_SYMBOL(rdma_node_get_transport);
  120. enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
  121. {
  122. if (device->get_link_layer)
  123. return device->get_link_layer(device, port_num);
  124. switch (rdma_node_get_transport(device->node_type)) {
  125. case RDMA_TRANSPORT_IB:
  126. return IB_LINK_LAYER_INFINIBAND;
  127. case RDMA_TRANSPORT_IWARP:
  128. case RDMA_TRANSPORT_USNIC:
  129. return IB_LINK_LAYER_ETHERNET;
  130. default:
  131. return IB_LINK_LAYER_UNSPECIFIED;
  132. }
  133. }
  134. EXPORT_SYMBOL(rdma_port_get_link_layer);
  135. /* Protection domains */
  136. struct ib_pd *ib_alloc_pd(struct ib_device *device)
  137. {
  138. struct ib_pd *pd;
  139. pd = device->alloc_pd(device, NULL, NULL);
  140. if (!IS_ERR(pd)) {
  141. pd->device = device;
  142. pd->uobject = NULL;
  143. atomic_set(&pd->usecnt, 0);
  144. }
  145. return pd;
  146. }
  147. EXPORT_SYMBOL(ib_alloc_pd);
  148. int ib_dealloc_pd(struct ib_pd *pd)
  149. {
  150. if (atomic_read(&pd->usecnt))
  151. return -EBUSY;
  152. return pd->device->dealloc_pd(pd);
  153. }
  154. EXPORT_SYMBOL(ib_dealloc_pd);
  155. /* Address handles */
  156. struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
  157. {
  158. struct ib_ah *ah;
  159. ah = pd->device->create_ah(pd, ah_attr);
  160. if (!IS_ERR(ah)) {
  161. ah->device = pd->device;
  162. ah->pd = pd;
  163. ah->uobject = NULL;
  164. atomic_inc(&pd->usecnt);
  165. }
  166. return ah;
  167. }
  168. EXPORT_SYMBOL(ib_create_ah);
  169. int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
  170. struct ib_grh *grh, struct ib_ah_attr *ah_attr)
  171. {
  172. u32 flow_class;
  173. u16 gid_index;
  174. int ret;
  175. int is_eth = (rdma_port_get_link_layer(device, port_num) ==
  176. IB_LINK_LAYER_ETHERNET);
  177. memset(ah_attr, 0, sizeof *ah_attr);
  178. if (is_eth) {
  179. if (!(wc->wc_flags & IB_WC_GRH))
  180. return -EPROTOTYPE;
  181. if (wc->wc_flags & IB_WC_WITH_SMAC &&
  182. wc->wc_flags & IB_WC_WITH_VLAN) {
  183. memcpy(ah_attr->dmac, wc->smac, ETH_ALEN);
  184. ah_attr->vlan_id = wc->vlan_id;
  185. } else {
  186. ret = rdma_addr_find_dmac_by_grh(&grh->dgid, &grh->sgid,
  187. ah_attr->dmac, &ah_attr->vlan_id);
  188. if (ret)
  189. return ret;
  190. }
  191. } else {
  192. ah_attr->vlan_id = 0xffff;
  193. }
  194. ah_attr->dlid = wc->slid;
  195. ah_attr->sl = wc->sl;
  196. ah_attr->src_path_bits = wc->dlid_path_bits;
  197. ah_attr->port_num = port_num;
  198. if (wc->wc_flags & IB_WC_GRH) {
  199. ah_attr->ah_flags = IB_AH_GRH;
  200. ah_attr->grh.dgid = grh->sgid;
  201. ret = ib_find_cached_gid(device, &grh->dgid, &port_num,
  202. &gid_index);
  203. if (ret)
  204. return ret;
  205. ah_attr->grh.sgid_index = (u8) gid_index;
  206. flow_class = be32_to_cpu(grh->version_tclass_flow);
  207. ah_attr->grh.flow_label = flow_class & 0xFFFFF;
  208. ah_attr->grh.hop_limit = 0xFF;
  209. ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
  210. }
  211. return 0;
  212. }
  213. EXPORT_SYMBOL(ib_init_ah_from_wc);
  214. struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
  215. struct ib_grh *grh, u8 port_num)
  216. {
  217. struct ib_ah_attr ah_attr;
  218. int ret;
  219. ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
  220. if (ret)
  221. return ERR_PTR(ret);
  222. return ib_create_ah(pd, &ah_attr);
  223. }
  224. EXPORT_SYMBOL(ib_create_ah_from_wc);
  225. int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
  226. {
  227. return ah->device->modify_ah ?
  228. ah->device->modify_ah(ah, ah_attr) :
  229. -ENOSYS;
  230. }
  231. EXPORT_SYMBOL(ib_modify_ah);
  232. int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
  233. {
  234. return ah->device->query_ah ?
  235. ah->device->query_ah(ah, ah_attr) :
  236. -ENOSYS;
  237. }
  238. EXPORT_SYMBOL(ib_query_ah);
  239. int ib_destroy_ah(struct ib_ah *ah)
  240. {
  241. struct ib_pd *pd;
  242. int ret;
  243. pd = ah->pd;
  244. ret = ah->device->destroy_ah(ah);
  245. if (!ret)
  246. atomic_dec(&pd->usecnt);
  247. return ret;
  248. }
  249. EXPORT_SYMBOL(ib_destroy_ah);
  250. /* Shared receive queues */
  251. struct ib_srq *ib_create_srq(struct ib_pd *pd,
  252. struct ib_srq_init_attr *srq_init_attr)
  253. {
  254. struct ib_srq *srq;
  255. if (!pd->device->create_srq)
  256. return ERR_PTR(-ENOSYS);
  257. srq = pd->device->create_srq(pd, srq_init_attr, NULL);
  258. if (!IS_ERR(srq)) {
  259. srq->device = pd->device;
  260. srq->pd = pd;
  261. srq->uobject = NULL;
  262. srq->event_handler = srq_init_attr->event_handler;
  263. srq->srq_context = srq_init_attr->srq_context;
  264. srq->srq_type = srq_init_attr->srq_type;
  265. if (srq->srq_type == IB_SRQT_XRC) {
  266. srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
  267. srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq;
  268. atomic_inc(&srq->ext.xrc.xrcd->usecnt);
  269. atomic_inc(&srq->ext.xrc.cq->usecnt);
  270. }
  271. atomic_inc(&pd->usecnt);
  272. atomic_set(&srq->usecnt, 0);
  273. }
  274. return srq;
  275. }
  276. EXPORT_SYMBOL(ib_create_srq);
  277. int ib_modify_srq(struct ib_srq *srq,
  278. struct ib_srq_attr *srq_attr,
  279. enum ib_srq_attr_mask srq_attr_mask)
  280. {
  281. return srq->device->modify_srq ?
  282. srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
  283. -ENOSYS;
  284. }
  285. EXPORT_SYMBOL(ib_modify_srq);
  286. int ib_query_srq(struct ib_srq *srq,
  287. struct ib_srq_attr *srq_attr)
  288. {
  289. return srq->device->query_srq ?
  290. srq->device->query_srq(srq, srq_attr) : -ENOSYS;
  291. }
  292. EXPORT_SYMBOL(ib_query_srq);
  293. int ib_destroy_srq(struct ib_srq *srq)
  294. {
  295. struct ib_pd *pd;
  296. enum ib_srq_type srq_type;
  297. struct ib_xrcd *uninitialized_var(xrcd);
  298. struct ib_cq *uninitialized_var(cq);
  299. int ret;
  300. if (atomic_read(&srq->usecnt))
  301. return -EBUSY;
  302. pd = srq->pd;
  303. srq_type = srq->srq_type;
  304. if (srq_type == IB_SRQT_XRC) {
  305. xrcd = srq->ext.xrc.xrcd;
  306. cq = srq->ext.xrc.cq;
  307. }
  308. ret = srq->device->destroy_srq(srq);
  309. if (!ret) {
  310. atomic_dec(&pd->usecnt);
  311. if (srq_type == IB_SRQT_XRC) {
  312. atomic_dec(&xrcd->usecnt);
  313. atomic_dec(&cq->usecnt);
  314. }
  315. }
  316. return ret;
  317. }
  318. EXPORT_SYMBOL(ib_destroy_srq);
  319. /* Queue pairs */
  320. static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
  321. {
  322. struct ib_qp *qp = context;
  323. unsigned long flags;
  324. spin_lock_irqsave(&qp->device->event_handler_lock, flags);
  325. list_for_each_entry(event->element.qp, &qp->open_list, open_list)
  326. if (event->element.qp->event_handler)
  327. event->element.qp->event_handler(event, event->element.qp->qp_context);
  328. spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
  329. }
  330. static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
  331. {
  332. mutex_lock(&xrcd->tgt_qp_mutex);
  333. list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
  334. mutex_unlock(&xrcd->tgt_qp_mutex);
  335. }
  336. static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
  337. void (*event_handler)(struct ib_event *, void *),
  338. void *qp_context)
  339. {
  340. struct ib_qp *qp;
  341. unsigned long flags;
  342. qp = kzalloc(sizeof *qp, GFP_KERNEL);
  343. if (!qp)
  344. return ERR_PTR(-ENOMEM);
  345. qp->real_qp = real_qp;
  346. atomic_inc(&real_qp->usecnt);
  347. qp->device = real_qp->device;
  348. qp->event_handler = event_handler;
  349. qp->qp_context = qp_context;
  350. qp->qp_num = real_qp->qp_num;
  351. qp->qp_type = real_qp->qp_type;
  352. spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
  353. list_add(&qp->open_list, &real_qp->open_list);
  354. spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
  355. return qp;
  356. }
  357. struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
  358. struct ib_qp_open_attr *qp_open_attr)
  359. {
  360. struct ib_qp *qp, *real_qp;
  361. if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
  362. return ERR_PTR(-EINVAL);
  363. qp = ERR_PTR(-EINVAL);
  364. mutex_lock(&xrcd->tgt_qp_mutex);
  365. list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
  366. if (real_qp->qp_num == qp_open_attr->qp_num) {
  367. qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
  368. qp_open_attr->qp_context);
  369. break;
  370. }
  371. }
  372. mutex_unlock(&xrcd->tgt_qp_mutex);
  373. return qp;
  374. }
  375. EXPORT_SYMBOL(ib_open_qp);
  376. struct ib_qp *ib_create_qp(struct ib_pd *pd,
  377. struct ib_qp_init_attr *qp_init_attr)
  378. {
  379. struct ib_qp *qp, *real_qp;
  380. struct ib_device *device;
  381. device = pd ? pd->device : qp_init_attr->xrcd->device;
  382. qp = device->create_qp(pd, qp_init_attr, NULL);
  383. if (!IS_ERR(qp)) {
  384. qp->device = device;
  385. qp->real_qp = qp;
  386. qp->uobject = NULL;
  387. qp->qp_type = qp_init_attr->qp_type;
  388. atomic_set(&qp->usecnt, 0);
  389. if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
  390. qp->event_handler = __ib_shared_qp_event_handler;
  391. qp->qp_context = qp;
  392. qp->pd = NULL;
  393. qp->send_cq = qp->recv_cq = NULL;
  394. qp->srq = NULL;
  395. qp->xrcd = qp_init_attr->xrcd;
  396. atomic_inc(&qp_init_attr->xrcd->usecnt);
  397. INIT_LIST_HEAD(&qp->open_list);
  398. real_qp = qp;
  399. qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
  400. qp_init_attr->qp_context);
  401. if (!IS_ERR(qp))
  402. __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
  403. else
  404. real_qp->device->destroy_qp(real_qp);
  405. } else {
  406. qp->event_handler = qp_init_attr->event_handler;
  407. qp->qp_context = qp_init_attr->qp_context;
  408. if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
  409. qp->recv_cq = NULL;
  410. qp->srq = NULL;
  411. } else {
  412. qp->recv_cq = qp_init_attr->recv_cq;
  413. atomic_inc(&qp_init_attr->recv_cq->usecnt);
  414. qp->srq = qp_init_attr->srq;
  415. if (qp->srq)
  416. atomic_inc(&qp_init_attr->srq->usecnt);
  417. }
  418. qp->pd = pd;
  419. qp->send_cq = qp_init_attr->send_cq;
  420. qp->xrcd = NULL;
  421. atomic_inc(&pd->usecnt);
  422. atomic_inc(&qp_init_attr->send_cq->usecnt);
  423. }
  424. }
  425. return qp;
  426. }
  427. EXPORT_SYMBOL(ib_create_qp);
  428. static const struct {
  429. int valid;
  430. enum ib_qp_attr_mask req_param[IB_QPT_MAX];
  431. enum ib_qp_attr_mask req_param_add_eth[IB_QPT_MAX];
  432. enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
  433. enum ib_qp_attr_mask opt_param_add_eth[IB_QPT_MAX];
  434. } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
  435. [IB_QPS_RESET] = {
  436. [IB_QPS_RESET] = { .valid = 1 },
  437. [IB_QPS_INIT] = {
  438. .valid = 1,
  439. .req_param = {
  440. [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
  441. IB_QP_PORT |
  442. IB_QP_QKEY),
  443. [IB_QPT_RAW_PACKET] = IB_QP_PORT,
  444. [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
  445. IB_QP_PORT |
  446. IB_QP_ACCESS_FLAGS),
  447. [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
  448. IB_QP_PORT |
  449. IB_QP_ACCESS_FLAGS),
  450. [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
  451. IB_QP_PORT |
  452. IB_QP_ACCESS_FLAGS),
  453. [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
  454. IB_QP_PORT |
  455. IB_QP_ACCESS_FLAGS),
  456. [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
  457. IB_QP_QKEY),
  458. [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
  459. IB_QP_QKEY),
  460. }
  461. },
  462. },
  463. [IB_QPS_INIT] = {
  464. [IB_QPS_RESET] = { .valid = 1 },
  465. [IB_QPS_ERR] = { .valid = 1 },
  466. [IB_QPS_INIT] = {
  467. .valid = 1,
  468. .opt_param = {
  469. [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
  470. IB_QP_PORT |
  471. IB_QP_QKEY),
  472. [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
  473. IB_QP_PORT |
  474. IB_QP_ACCESS_FLAGS),
  475. [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
  476. IB_QP_PORT |
  477. IB_QP_ACCESS_FLAGS),
  478. [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
  479. IB_QP_PORT |
  480. IB_QP_ACCESS_FLAGS),
  481. [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
  482. IB_QP_PORT |
  483. IB_QP_ACCESS_FLAGS),
  484. [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
  485. IB_QP_QKEY),
  486. [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
  487. IB_QP_QKEY),
  488. }
  489. },
  490. [IB_QPS_RTR] = {
  491. .valid = 1,
  492. .req_param = {
  493. [IB_QPT_UC] = (IB_QP_AV |
  494. IB_QP_PATH_MTU |
  495. IB_QP_DEST_QPN |
  496. IB_QP_RQ_PSN),
  497. [IB_QPT_RC] = (IB_QP_AV |
  498. IB_QP_PATH_MTU |
  499. IB_QP_DEST_QPN |
  500. IB_QP_RQ_PSN |
  501. IB_QP_MAX_DEST_RD_ATOMIC |
  502. IB_QP_MIN_RNR_TIMER),
  503. [IB_QPT_XRC_INI] = (IB_QP_AV |
  504. IB_QP_PATH_MTU |
  505. IB_QP_DEST_QPN |
  506. IB_QP_RQ_PSN),
  507. [IB_QPT_XRC_TGT] = (IB_QP_AV |
  508. IB_QP_PATH_MTU |
  509. IB_QP_DEST_QPN |
  510. IB_QP_RQ_PSN |
  511. IB_QP_MAX_DEST_RD_ATOMIC |
  512. IB_QP_MIN_RNR_TIMER),
  513. },
  514. .req_param_add_eth = {
  515. [IB_QPT_RC] = (IB_QP_SMAC),
  516. [IB_QPT_UC] = (IB_QP_SMAC),
  517. [IB_QPT_XRC_INI] = (IB_QP_SMAC),
  518. [IB_QPT_XRC_TGT] = (IB_QP_SMAC)
  519. },
  520. .opt_param = {
  521. [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
  522. IB_QP_QKEY),
  523. [IB_QPT_UC] = (IB_QP_ALT_PATH |
  524. IB_QP_ACCESS_FLAGS |
  525. IB_QP_PKEY_INDEX),
  526. [IB_QPT_RC] = (IB_QP_ALT_PATH |
  527. IB_QP_ACCESS_FLAGS |
  528. IB_QP_PKEY_INDEX),
  529. [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
  530. IB_QP_ACCESS_FLAGS |
  531. IB_QP_PKEY_INDEX),
  532. [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
  533. IB_QP_ACCESS_FLAGS |
  534. IB_QP_PKEY_INDEX),
  535. [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
  536. IB_QP_QKEY),
  537. [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
  538. IB_QP_QKEY),
  539. },
  540. .opt_param_add_eth = {
  541. [IB_QPT_RC] = (IB_QP_ALT_SMAC |
  542. IB_QP_VID |
  543. IB_QP_ALT_VID),
  544. [IB_QPT_UC] = (IB_QP_ALT_SMAC |
  545. IB_QP_VID |
  546. IB_QP_ALT_VID),
  547. [IB_QPT_XRC_INI] = (IB_QP_ALT_SMAC |
  548. IB_QP_VID |
  549. IB_QP_ALT_VID),
  550. [IB_QPT_XRC_TGT] = (IB_QP_ALT_SMAC |
  551. IB_QP_VID |
  552. IB_QP_ALT_VID)
  553. }
  554. }
  555. },
  556. [IB_QPS_RTR] = {
  557. [IB_QPS_RESET] = { .valid = 1 },
  558. [IB_QPS_ERR] = { .valid = 1 },
  559. [IB_QPS_RTS] = {
  560. .valid = 1,
  561. .req_param = {
  562. [IB_QPT_UD] = IB_QP_SQ_PSN,
  563. [IB_QPT_UC] = IB_QP_SQ_PSN,
  564. [IB_QPT_RC] = (IB_QP_TIMEOUT |
  565. IB_QP_RETRY_CNT |
  566. IB_QP_RNR_RETRY |
  567. IB_QP_SQ_PSN |
  568. IB_QP_MAX_QP_RD_ATOMIC),
  569. [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
  570. IB_QP_RETRY_CNT |
  571. IB_QP_RNR_RETRY |
  572. IB_QP_SQ_PSN |
  573. IB_QP_MAX_QP_RD_ATOMIC),
  574. [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
  575. IB_QP_SQ_PSN),
  576. [IB_QPT_SMI] = IB_QP_SQ_PSN,
  577. [IB_QPT_GSI] = IB_QP_SQ_PSN,
  578. },
  579. .opt_param = {
  580. [IB_QPT_UD] = (IB_QP_CUR_STATE |
  581. IB_QP_QKEY),
  582. [IB_QPT_UC] = (IB_QP_CUR_STATE |
  583. IB_QP_ALT_PATH |
  584. IB_QP_ACCESS_FLAGS |
  585. IB_QP_PATH_MIG_STATE),
  586. [IB_QPT_RC] = (IB_QP_CUR_STATE |
  587. IB_QP_ALT_PATH |
  588. IB_QP_ACCESS_FLAGS |
  589. IB_QP_MIN_RNR_TIMER |
  590. IB_QP_PATH_MIG_STATE),
  591. [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
  592. IB_QP_ALT_PATH |
  593. IB_QP_ACCESS_FLAGS |
  594. IB_QP_PATH_MIG_STATE),
  595. [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
  596. IB_QP_ALT_PATH |
  597. IB_QP_ACCESS_FLAGS |
  598. IB_QP_MIN_RNR_TIMER |
  599. IB_QP_PATH_MIG_STATE),
  600. [IB_QPT_SMI] = (IB_QP_CUR_STATE |
  601. IB_QP_QKEY),
  602. [IB_QPT_GSI] = (IB_QP_CUR_STATE |
  603. IB_QP_QKEY),
  604. }
  605. }
  606. },
  607. [IB_QPS_RTS] = {
  608. [IB_QPS_RESET] = { .valid = 1 },
  609. [IB_QPS_ERR] = { .valid = 1 },
  610. [IB_QPS_RTS] = {
  611. .valid = 1,
  612. .opt_param = {
  613. [IB_QPT_UD] = (IB_QP_CUR_STATE |
  614. IB_QP_QKEY),
  615. [IB_QPT_UC] = (IB_QP_CUR_STATE |
  616. IB_QP_ACCESS_FLAGS |
  617. IB_QP_ALT_PATH |
  618. IB_QP_PATH_MIG_STATE),
  619. [IB_QPT_RC] = (IB_QP_CUR_STATE |
  620. IB_QP_ACCESS_FLAGS |
  621. IB_QP_ALT_PATH |
  622. IB_QP_PATH_MIG_STATE |
  623. IB_QP_MIN_RNR_TIMER),
  624. [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
  625. IB_QP_ACCESS_FLAGS |
  626. IB_QP_ALT_PATH |
  627. IB_QP_PATH_MIG_STATE),
  628. [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
  629. IB_QP_ACCESS_FLAGS |
  630. IB_QP_ALT_PATH |
  631. IB_QP_PATH_MIG_STATE |
  632. IB_QP_MIN_RNR_TIMER),
  633. [IB_QPT_SMI] = (IB_QP_CUR_STATE |
  634. IB_QP_QKEY),
  635. [IB_QPT_GSI] = (IB_QP_CUR_STATE |
  636. IB_QP_QKEY),
  637. }
  638. },
  639. [IB_QPS_SQD] = {
  640. .valid = 1,
  641. .opt_param = {
  642. [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  643. [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  644. [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  645. [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  646. [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
  647. [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  648. [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
  649. }
  650. },
  651. },
  652. [IB_QPS_SQD] = {
  653. [IB_QPS_RESET] = { .valid = 1 },
  654. [IB_QPS_ERR] = { .valid = 1 },
  655. [IB_QPS_RTS] = {
  656. .valid = 1,
  657. .opt_param = {
  658. [IB_QPT_UD] = (IB_QP_CUR_STATE |
  659. IB_QP_QKEY),
  660. [IB_QPT_UC] = (IB_QP_CUR_STATE |
  661. IB_QP_ALT_PATH |
  662. IB_QP_ACCESS_FLAGS |
  663. IB_QP_PATH_MIG_STATE),
  664. [IB_QPT_RC] = (IB_QP_CUR_STATE |
  665. IB_QP_ALT_PATH |
  666. IB_QP_ACCESS_FLAGS |
  667. IB_QP_MIN_RNR_TIMER |
  668. IB_QP_PATH_MIG_STATE),
  669. [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
  670. IB_QP_ALT_PATH |
  671. IB_QP_ACCESS_FLAGS |
  672. IB_QP_PATH_MIG_STATE),
  673. [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
  674. IB_QP_ALT_PATH |
  675. IB_QP_ACCESS_FLAGS |
  676. IB_QP_MIN_RNR_TIMER |
  677. IB_QP_PATH_MIG_STATE),
  678. [IB_QPT_SMI] = (IB_QP_CUR_STATE |
  679. IB_QP_QKEY),
  680. [IB_QPT_GSI] = (IB_QP_CUR_STATE |
  681. IB_QP_QKEY),
  682. }
  683. },
  684. [IB_QPS_SQD] = {
  685. .valid = 1,
  686. .opt_param = {
  687. [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
  688. IB_QP_QKEY),
  689. [IB_QPT_UC] = (IB_QP_AV |
  690. IB_QP_ALT_PATH |
  691. IB_QP_ACCESS_FLAGS |
  692. IB_QP_PKEY_INDEX |
  693. IB_QP_PATH_MIG_STATE),
  694. [IB_QPT_RC] = (IB_QP_PORT |
  695. IB_QP_AV |
  696. IB_QP_TIMEOUT |
  697. IB_QP_RETRY_CNT |
  698. IB_QP_RNR_RETRY |
  699. IB_QP_MAX_QP_RD_ATOMIC |
  700. IB_QP_MAX_DEST_RD_ATOMIC |
  701. IB_QP_ALT_PATH |
  702. IB_QP_ACCESS_FLAGS |
  703. IB_QP_PKEY_INDEX |
  704. IB_QP_MIN_RNR_TIMER |
  705. IB_QP_PATH_MIG_STATE),
  706. [IB_QPT_XRC_INI] = (IB_QP_PORT |
  707. IB_QP_AV |
  708. IB_QP_TIMEOUT |
  709. IB_QP_RETRY_CNT |
  710. IB_QP_RNR_RETRY |
  711. IB_QP_MAX_QP_RD_ATOMIC |
  712. IB_QP_ALT_PATH |
  713. IB_QP_ACCESS_FLAGS |
  714. IB_QP_PKEY_INDEX |
  715. IB_QP_PATH_MIG_STATE),
  716. [IB_QPT_XRC_TGT] = (IB_QP_PORT |
  717. IB_QP_AV |
  718. IB_QP_TIMEOUT |
  719. IB_QP_MAX_DEST_RD_ATOMIC |
  720. IB_QP_ALT_PATH |
  721. IB_QP_ACCESS_FLAGS |
  722. IB_QP_PKEY_INDEX |
  723. IB_QP_MIN_RNR_TIMER |
  724. IB_QP_PATH_MIG_STATE),
  725. [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
  726. IB_QP_QKEY),
  727. [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
  728. IB_QP_QKEY),
  729. }
  730. }
  731. },
  732. [IB_QPS_SQE] = {
  733. [IB_QPS_RESET] = { .valid = 1 },
  734. [IB_QPS_ERR] = { .valid = 1 },
  735. [IB_QPS_RTS] = {
  736. .valid = 1,
  737. .opt_param = {
  738. [IB_QPT_UD] = (IB_QP_CUR_STATE |
  739. IB_QP_QKEY),
  740. [IB_QPT_UC] = (IB_QP_CUR_STATE |
  741. IB_QP_ACCESS_FLAGS),
  742. [IB_QPT_SMI] = (IB_QP_CUR_STATE |
  743. IB_QP_QKEY),
  744. [IB_QPT_GSI] = (IB_QP_CUR_STATE |
  745. IB_QP_QKEY),
  746. }
  747. }
  748. },
  749. [IB_QPS_ERR] = {
  750. [IB_QPS_RESET] = { .valid = 1 },
  751. [IB_QPS_ERR] = { .valid = 1 }
  752. }
  753. };
  754. int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
  755. enum ib_qp_type type, enum ib_qp_attr_mask mask,
  756. enum rdma_link_layer ll)
  757. {
  758. enum ib_qp_attr_mask req_param, opt_param;
  759. if (cur_state < 0 || cur_state > IB_QPS_ERR ||
  760. next_state < 0 || next_state > IB_QPS_ERR)
  761. return 0;
  762. if (mask & IB_QP_CUR_STATE &&
  763. cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
  764. cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
  765. return 0;
  766. if (!qp_state_table[cur_state][next_state].valid)
  767. return 0;
  768. req_param = qp_state_table[cur_state][next_state].req_param[type];
  769. opt_param = qp_state_table[cur_state][next_state].opt_param[type];
  770. if (ll == IB_LINK_LAYER_ETHERNET) {
  771. req_param |= qp_state_table[cur_state][next_state].
  772. req_param_add_eth[type];
  773. opt_param |= qp_state_table[cur_state][next_state].
  774. opt_param_add_eth[type];
  775. }
  776. if ((mask & req_param) != req_param)
  777. return 0;
  778. if (mask & ~(req_param | opt_param | IB_QP_STATE))
  779. return 0;
  780. return 1;
  781. }
  782. EXPORT_SYMBOL(ib_modify_qp_is_ok);
  783. int ib_modify_qp(struct ib_qp *qp,
  784. struct ib_qp_attr *qp_attr,
  785. int qp_attr_mask)
  786. {
  787. return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
  788. }
  789. EXPORT_SYMBOL(ib_modify_qp);
  790. int ib_query_qp(struct ib_qp *qp,
  791. struct ib_qp_attr *qp_attr,
  792. int qp_attr_mask,
  793. struct ib_qp_init_attr *qp_init_attr)
  794. {
  795. return qp->device->query_qp ?
  796. qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
  797. -ENOSYS;
  798. }
  799. EXPORT_SYMBOL(ib_query_qp);
  800. int ib_close_qp(struct ib_qp *qp)
  801. {
  802. struct ib_qp *real_qp;
  803. unsigned long flags;
  804. real_qp = qp->real_qp;
  805. if (real_qp == qp)
  806. return -EINVAL;
  807. spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
  808. list_del(&qp->open_list);
  809. spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
  810. atomic_dec(&real_qp->usecnt);
  811. kfree(qp);
  812. return 0;
  813. }
  814. EXPORT_SYMBOL(ib_close_qp);
  815. static int __ib_destroy_shared_qp(struct ib_qp *qp)
  816. {
  817. struct ib_xrcd *xrcd;
  818. struct ib_qp *real_qp;
  819. int ret;
  820. real_qp = qp->real_qp;
  821. xrcd = real_qp->xrcd;
  822. mutex_lock(&xrcd->tgt_qp_mutex);
  823. ib_close_qp(qp);
  824. if (atomic_read(&real_qp->usecnt) == 0)
  825. list_del(&real_qp->xrcd_list);
  826. else
  827. real_qp = NULL;
  828. mutex_unlock(&xrcd->tgt_qp_mutex);
  829. if (real_qp) {
  830. ret = ib_destroy_qp(real_qp);
  831. if (!ret)
  832. atomic_dec(&xrcd->usecnt);
  833. else
  834. __ib_insert_xrcd_qp(xrcd, real_qp);
  835. }
  836. return 0;
  837. }
  838. int ib_destroy_qp(struct ib_qp *qp)
  839. {
  840. struct ib_pd *pd;
  841. struct ib_cq *scq, *rcq;
  842. struct ib_srq *srq;
  843. int ret;
  844. if (atomic_read(&qp->usecnt))
  845. return -EBUSY;
  846. if (qp->real_qp != qp)
  847. return __ib_destroy_shared_qp(qp);
  848. pd = qp->pd;
  849. scq = qp->send_cq;
  850. rcq = qp->recv_cq;
  851. srq = qp->srq;
  852. ret = qp->device->destroy_qp(qp);
  853. if (!ret) {
  854. if (pd)
  855. atomic_dec(&pd->usecnt);
  856. if (scq)
  857. atomic_dec(&scq->usecnt);
  858. if (rcq)
  859. atomic_dec(&rcq->usecnt);
  860. if (srq)
  861. atomic_dec(&srq->usecnt);
  862. }
  863. return ret;
  864. }
  865. EXPORT_SYMBOL(ib_destroy_qp);
  866. /* Completion queues */
  867. struct ib_cq *ib_create_cq(struct ib_device *device,
  868. ib_comp_handler comp_handler,
  869. void (*event_handler)(struct ib_event *, void *),
  870. void *cq_context, int cqe, int comp_vector)
  871. {
  872. struct ib_cq *cq;
  873. cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
  874. if (!IS_ERR(cq)) {
  875. cq->device = device;
  876. cq->uobject = NULL;
  877. cq->comp_handler = comp_handler;
  878. cq->event_handler = event_handler;
  879. cq->cq_context = cq_context;
  880. atomic_set(&cq->usecnt, 0);
  881. }
  882. return cq;
  883. }
  884. EXPORT_SYMBOL(ib_create_cq);
  885. int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
  886. {
  887. return cq->device->modify_cq ?
  888. cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
  889. }
  890. EXPORT_SYMBOL(ib_modify_cq);
  891. int ib_destroy_cq(struct ib_cq *cq)
  892. {
  893. if (atomic_read(&cq->usecnt))
  894. return -EBUSY;
  895. return cq->device->destroy_cq(cq);
  896. }
  897. EXPORT_SYMBOL(ib_destroy_cq);
  898. int ib_resize_cq(struct ib_cq *cq, int cqe)
  899. {
  900. return cq->device->resize_cq ?
  901. cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
  902. }
  903. EXPORT_SYMBOL(ib_resize_cq);
  904. /* Memory regions */
  905. struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
  906. {
  907. struct ib_mr *mr;
  908. int err;
  909. err = ib_check_mr_access(mr_access_flags);
  910. if (err)
  911. return ERR_PTR(err);
  912. mr = pd->device->get_dma_mr(pd, mr_access_flags);
  913. if (!IS_ERR(mr)) {
  914. mr->device = pd->device;
  915. mr->pd = pd;
  916. mr->uobject = NULL;
  917. atomic_inc(&pd->usecnt);
  918. atomic_set(&mr->usecnt, 0);
  919. }
  920. return mr;
  921. }
  922. EXPORT_SYMBOL(ib_get_dma_mr);
  923. struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
  924. struct ib_phys_buf *phys_buf_array,
  925. int num_phys_buf,
  926. int mr_access_flags,
  927. u64 *iova_start)
  928. {
  929. struct ib_mr *mr;
  930. int err;
  931. err = ib_check_mr_access(mr_access_flags);
  932. if (err)
  933. return ERR_PTR(err);
  934. if (!pd->device->reg_phys_mr)
  935. return ERR_PTR(-ENOSYS);
  936. mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
  937. mr_access_flags, iova_start);
  938. if (!IS_ERR(mr)) {
  939. mr->device = pd->device;
  940. mr->pd = pd;
  941. mr->uobject = NULL;
  942. atomic_inc(&pd->usecnt);
  943. atomic_set(&mr->usecnt, 0);
  944. }
  945. return mr;
  946. }
  947. EXPORT_SYMBOL(ib_reg_phys_mr);
  948. int ib_rereg_phys_mr(struct ib_mr *mr,
  949. int mr_rereg_mask,
  950. struct ib_pd *pd,
  951. struct ib_phys_buf *phys_buf_array,
  952. int num_phys_buf,
  953. int mr_access_flags,
  954. u64 *iova_start)
  955. {
  956. struct ib_pd *old_pd;
  957. int ret;
  958. ret = ib_check_mr_access(mr_access_flags);
  959. if (ret)
  960. return ret;
  961. if (!mr->device->rereg_phys_mr)
  962. return -ENOSYS;
  963. if (atomic_read(&mr->usecnt))
  964. return -EBUSY;
  965. old_pd = mr->pd;
  966. ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
  967. phys_buf_array, num_phys_buf,
  968. mr_access_flags, iova_start);
  969. if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
  970. atomic_dec(&old_pd->usecnt);
  971. atomic_inc(&pd->usecnt);
  972. }
  973. return ret;
  974. }
  975. EXPORT_SYMBOL(ib_rereg_phys_mr);
  976. int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
  977. {
  978. return mr->device->query_mr ?
  979. mr->device->query_mr(mr, mr_attr) : -ENOSYS;
  980. }
  981. EXPORT_SYMBOL(ib_query_mr);
  982. int ib_dereg_mr(struct ib_mr *mr)
  983. {
  984. struct ib_pd *pd;
  985. int ret;
  986. if (atomic_read(&mr->usecnt))
  987. return -EBUSY;
  988. pd = mr->pd;
  989. ret = mr->device->dereg_mr(mr);
  990. if (!ret)
  991. atomic_dec(&pd->usecnt);
  992. return ret;
  993. }
  994. EXPORT_SYMBOL(ib_dereg_mr);
  995. struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
  996. {
  997. struct ib_mr *mr;
  998. if (!pd->device->alloc_fast_reg_mr)
  999. return ERR_PTR(-ENOSYS);
  1000. mr = pd->device->alloc_fast_reg_mr(pd, max_page_list_len);
  1001. if (!IS_ERR(mr)) {
  1002. mr->device = pd->device;
  1003. mr->pd = pd;
  1004. mr->uobject = NULL;
  1005. atomic_inc(&pd->usecnt);
  1006. atomic_set(&mr->usecnt, 0);
  1007. }
  1008. return mr;
  1009. }
  1010. EXPORT_SYMBOL(ib_alloc_fast_reg_mr);
  1011. struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device,
  1012. int max_page_list_len)
  1013. {
  1014. struct ib_fast_reg_page_list *page_list;
  1015. if (!device->alloc_fast_reg_page_list)
  1016. return ERR_PTR(-ENOSYS);
  1017. page_list = device->alloc_fast_reg_page_list(device, max_page_list_len);
  1018. if (!IS_ERR(page_list)) {
  1019. page_list->device = device;
  1020. page_list->max_page_list_len = max_page_list_len;
  1021. }
  1022. return page_list;
  1023. }
  1024. EXPORT_SYMBOL(ib_alloc_fast_reg_page_list);
  1025. void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
  1026. {
  1027. page_list->device->free_fast_reg_page_list(page_list);
  1028. }
  1029. EXPORT_SYMBOL(ib_free_fast_reg_page_list);
  1030. /* Memory windows */
  1031. struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
  1032. {
  1033. struct ib_mw *mw;
  1034. if (!pd->device->alloc_mw)
  1035. return ERR_PTR(-ENOSYS);
  1036. mw = pd->device->alloc_mw(pd, type);
  1037. if (!IS_ERR(mw)) {
  1038. mw->device = pd->device;
  1039. mw->pd = pd;
  1040. mw->uobject = NULL;
  1041. mw->type = type;
  1042. atomic_inc(&pd->usecnt);
  1043. }
  1044. return mw;
  1045. }
  1046. EXPORT_SYMBOL(ib_alloc_mw);
  1047. int ib_dealloc_mw(struct ib_mw *mw)
  1048. {
  1049. struct ib_pd *pd;
  1050. int ret;
  1051. pd = mw->pd;
  1052. ret = mw->device->dealloc_mw(mw);
  1053. if (!ret)
  1054. atomic_dec(&pd->usecnt);
  1055. return ret;
  1056. }
  1057. EXPORT_SYMBOL(ib_dealloc_mw);
  1058. /* "Fast" memory regions */
  1059. struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
  1060. int mr_access_flags,
  1061. struct ib_fmr_attr *fmr_attr)
  1062. {
  1063. struct ib_fmr *fmr;
  1064. if (!pd->device->alloc_fmr)
  1065. return ERR_PTR(-ENOSYS);
  1066. fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
  1067. if (!IS_ERR(fmr)) {
  1068. fmr->device = pd->device;
  1069. fmr->pd = pd;
  1070. atomic_inc(&pd->usecnt);
  1071. }
  1072. return fmr;
  1073. }
  1074. EXPORT_SYMBOL(ib_alloc_fmr);
  1075. int ib_unmap_fmr(struct list_head *fmr_list)
  1076. {
  1077. struct ib_fmr *fmr;
  1078. if (list_empty(fmr_list))
  1079. return 0;
  1080. fmr = list_entry(fmr_list->next, struct ib_fmr, list);
  1081. return fmr->device->unmap_fmr(fmr_list);
  1082. }
  1083. EXPORT_SYMBOL(ib_unmap_fmr);
  1084. int ib_dealloc_fmr(struct ib_fmr *fmr)
  1085. {
  1086. struct ib_pd *pd;
  1087. int ret;
  1088. pd = fmr->pd;
  1089. ret = fmr->device->dealloc_fmr(fmr);
  1090. if (!ret)
  1091. atomic_dec(&pd->usecnt);
  1092. return ret;
  1093. }
  1094. EXPORT_SYMBOL(ib_dealloc_fmr);
  1095. /* Multicast groups */
  1096. int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
  1097. {
  1098. int ret;
  1099. if (!qp->device->attach_mcast)
  1100. return -ENOSYS;
  1101. if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
  1102. return -EINVAL;
  1103. ret = qp->device->attach_mcast(qp, gid, lid);
  1104. if (!ret)
  1105. atomic_inc(&qp->usecnt);
  1106. return ret;
  1107. }
  1108. EXPORT_SYMBOL(ib_attach_mcast);
  1109. int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
  1110. {
  1111. int ret;
  1112. if (!qp->device->detach_mcast)
  1113. return -ENOSYS;
  1114. if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
  1115. return -EINVAL;
  1116. ret = qp->device->detach_mcast(qp, gid, lid);
  1117. if (!ret)
  1118. atomic_dec(&qp->usecnt);
  1119. return ret;
  1120. }
  1121. EXPORT_SYMBOL(ib_detach_mcast);
  1122. struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
  1123. {
  1124. struct ib_xrcd *xrcd;
  1125. if (!device->alloc_xrcd)
  1126. return ERR_PTR(-ENOSYS);
  1127. xrcd = device->alloc_xrcd(device, NULL, NULL);
  1128. if (!IS_ERR(xrcd)) {
  1129. xrcd->device = device;
  1130. xrcd->inode = NULL;
  1131. atomic_set(&xrcd->usecnt, 0);
  1132. mutex_init(&xrcd->tgt_qp_mutex);
  1133. INIT_LIST_HEAD(&xrcd->tgt_qp_list);
  1134. }
  1135. return xrcd;
  1136. }
  1137. EXPORT_SYMBOL(ib_alloc_xrcd);
  1138. int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
  1139. {
  1140. struct ib_qp *qp;
  1141. int ret;
  1142. if (atomic_read(&xrcd->usecnt))
  1143. return -EBUSY;
  1144. while (!list_empty(&xrcd->tgt_qp_list)) {
  1145. qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
  1146. ret = ib_destroy_qp(qp);
  1147. if (ret)
  1148. return ret;
  1149. }
  1150. return xrcd->device->dealloc_xrcd(xrcd);
  1151. }
  1152. EXPORT_SYMBOL(ib_dealloc_xrcd);
  1153. struct ib_flow *ib_create_flow(struct ib_qp *qp,
  1154. struct ib_flow_attr *flow_attr,
  1155. int domain)
  1156. {
  1157. struct ib_flow *flow_id;
  1158. if (!qp->device->create_flow)
  1159. return ERR_PTR(-ENOSYS);
  1160. flow_id = qp->device->create_flow(qp, flow_attr, domain);
  1161. if (!IS_ERR(flow_id))
  1162. atomic_inc(&qp->usecnt);
  1163. return flow_id;
  1164. }
  1165. EXPORT_SYMBOL(ib_create_flow);
  1166. int ib_destroy_flow(struct ib_flow *flow_id)
  1167. {
  1168. int err;
  1169. struct ib_qp *qp = flow_id->qp;
  1170. err = qp->device->destroy_flow(flow_id);
  1171. if (!err)
  1172. atomic_dec(&qp->usecnt);
  1173. return err;
  1174. }
  1175. EXPORT_SYMBOL(ib_destroy_flow);