verbs.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865
  1. /*
  2. * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
  4. * Copyright (c) 2004 Intel Corporation. All rights reserved.
  5. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  6. * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
  7. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  8. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
  9. *
  10. * This software is available to you under a choice of one of two
  11. * licenses. You may choose to be licensed under the terms of the GNU
  12. * General Public License (GPL) Version 2, available from the file
  13. * COPYING in the main directory of this source tree, or the
  14. * OpenIB.org BSD license below:
  15. *
  16. * Redistribution and use in source and binary forms, with or
  17. * without modification, are permitted provided that the following
  18. * conditions are met:
  19. *
  20. * - Redistributions of source code must retain the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer.
  23. *
  24. * - Redistributions in binary form must reproduce the above
  25. * copyright notice, this list of conditions and the following
  26. * disclaimer in the documentation and/or other materials
  27. * provided with the distribution.
  28. *
  29. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36. * SOFTWARE.
  37. */
  38. #include <linux/errno.h>
  39. #include <linux/err.h>
  40. #include <linux/export.h>
  41. #include <linux/string.h>
  42. #include <linux/slab.h>
  43. #include <linux/in.h>
  44. #include <linux/in6.h>
  45. #include <net/addrconf.h>
  46. #include <rdma/ib_verbs.h>
  47. #include <rdma/ib_cache.h>
  48. #include <rdma/ib_addr.h>
  49. #include "core_priv.h"
  50. static const char * const ib_events[] = {
  51. [IB_EVENT_CQ_ERR] = "CQ error",
  52. [IB_EVENT_QP_FATAL] = "QP fatal error",
  53. [IB_EVENT_QP_REQ_ERR] = "QP request error",
  54. [IB_EVENT_QP_ACCESS_ERR] = "QP access error",
  55. [IB_EVENT_COMM_EST] = "communication established",
  56. [IB_EVENT_SQ_DRAINED] = "send queue drained",
  57. [IB_EVENT_PATH_MIG] = "path migration successful",
  58. [IB_EVENT_PATH_MIG_ERR] = "path migration error",
  59. [IB_EVENT_DEVICE_FATAL] = "device fatal error",
  60. [IB_EVENT_PORT_ACTIVE] = "port active",
  61. [IB_EVENT_PORT_ERR] = "port error",
  62. [IB_EVENT_LID_CHANGE] = "LID change",
  63. [IB_EVENT_PKEY_CHANGE] = "P_key change",
  64. [IB_EVENT_SM_CHANGE] = "SM change",
  65. [IB_EVENT_SRQ_ERR] = "SRQ error",
  66. [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached",
  67. [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached",
  68. [IB_EVENT_CLIENT_REREGISTER] = "client reregister",
  69. [IB_EVENT_GID_CHANGE] = "GID changed",
  70. };
  71. const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
  72. {
  73. size_t index = event;
  74. return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
  75. ib_events[index] : "unrecognized event";
  76. }
  77. EXPORT_SYMBOL(ib_event_msg);
  78. static const char * const wc_statuses[] = {
  79. [IB_WC_SUCCESS] = "success",
  80. [IB_WC_LOC_LEN_ERR] = "local length error",
  81. [IB_WC_LOC_QP_OP_ERR] = "local QP operation error",
  82. [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
  83. [IB_WC_LOC_PROT_ERR] = "local protection error",
  84. [IB_WC_WR_FLUSH_ERR] = "WR flushed",
  85. [IB_WC_MW_BIND_ERR] = "memory management operation error",
  86. [IB_WC_BAD_RESP_ERR] = "bad response error",
  87. [IB_WC_LOC_ACCESS_ERR] = "local access error",
  88. [IB_WC_REM_INV_REQ_ERR] = "invalid request error",
  89. [IB_WC_REM_ACCESS_ERR] = "remote access error",
  90. [IB_WC_REM_OP_ERR] = "remote operation error",
  91. [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
  92. [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded",
  93. [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error",
  94. [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request",
  95. [IB_WC_REM_ABORT_ERR] = "operation aborted",
  96. [IB_WC_INV_EECN_ERR] = "invalid EE context number",
  97. [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state",
  98. [IB_WC_FATAL_ERR] = "fatal error",
  99. [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error",
  100. [IB_WC_GENERAL_ERR] = "general error",
  101. };
  102. const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
  103. {
  104. size_t index = status;
  105. return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
  106. wc_statuses[index] : "unrecognized status";
  107. }
  108. EXPORT_SYMBOL(ib_wc_status_msg);
  109. __attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
  110. {
  111. switch (rate) {
  112. case IB_RATE_2_5_GBPS: return 1;
  113. case IB_RATE_5_GBPS: return 2;
  114. case IB_RATE_10_GBPS: return 4;
  115. case IB_RATE_20_GBPS: return 8;
  116. case IB_RATE_30_GBPS: return 12;
  117. case IB_RATE_40_GBPS: return 16;
  118. case IB_RATE_60_GBPS: return 24;
  119. case IB_RATE_80_GBPS: return 32;
  120. case IB_RATE_120_GBPS: return 48;
  121. default: return -1;
  122. }
  123. }
  124. EXPORT_SYMBOL(ib_rate_to_mult);
  125. __attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
  126. {
  127. switch (mult) {
  128. case 1: return IB_RATE_2_5_GBPS;
  129. case 2: return IB_RATE_5_GBPS;
  130. case 4: return IB_RATE_10_GBPS;
  131. case 8: return IB_RATE_20_GBPS;
  132. case 12: return IB_RATE_30_GBPS;
  133. case 16: return IB_RATE_40_GBPS;
  134. case 24: return IB_RATE_60_GBPS;
  135. case 32: return IB_RATE_80_GBPS;
  136. case 48: return IB_RATE_120_GBPS;
  137. default: return IB_RATE_PORT_CURRENT;
  138. }
  139. }
  140. EXPORT_SYMBOL(mult_to_ib_rate);
  141. __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
  142. {
  143. switch (rate) {
  144. case IB_RATE_2_5_GBPS: return 2500;
  145. case IB_RATE_5_GBPS: return 5000;
  146. case IB_RATE_10_GBPS: return 10000;
  147. case IB_RATE_20_GBPS: return 20000;
  148. case IB_RATE_30_GBPS: return 30000;
  149. case IB_RATE_40_GBPS: return 40000;
  150. case IB_RATE_60_GBPS: return 60000;
  151. case IB_RATE_80_GBPS: return 80000;
  152. case IB_RATE_120_GBPS: return 120000;
  153. case IB_RATE_14_GBPS: return 14062;
  154. case IB_RATE_56_GBPS: return 56250;
  155. case IB_RATE_112_GBPS: return 112500;
  156. case IB_RATE_168_GBPS: return 168750;
  157. case IB_RATE_25_GBPS: return 25781;
  158. case IB_RATE_100_GBPS: return 103125;
  159. case IB_RATE_200_GBPS: return 206250;
  160. case IB_RATE_300_GBPS: return 309375;
  161. default: return -1;
  162. }
  163. }
  164. EXPORT_SYMBOL(ib_rate_to_mbps);
  165. __attribute_const__ enum rdma_transport_type
  166. rdma_node_get_transport(enum rdma_node_type node_type)
  167. {
  168. switch (node_type) {
  169. case RDMA_NODE_IB_CA:
  170. case RDMA_NODE_IB_SWITCH:
  171. case RDMA_NODE_IB_ROUTER:
  172. return RDMA_TRANSPORT_IB;
  173. case RDMA_NODE_RNIC:
  174. return RDMA_TRANSPORT_IWARP;
  175. case RDMA_NODE_USNIC:
  176. return RDMA_TRANSPORT_USNIC;
  177. case RDMA_NODE_USNIC_UDP:
  178. return RDMA_TRANSPORT_USNIC_UDP;
  179. default:
  180. BUG();
  181. return 0;
  182. }
  183. }
  184. EXPORT_SYMBOL(rdma_node_get_transport);
  185. enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
  186. {
  187. if (device->get_link_layer)
  188. return device->get_link_layer(device, port_num);
  189. switch (rdma_node_get_transport(device->node_type)) {
  190. case RDMA_TRANSPORT_IB:
  191. return IB_LINK_LAYER_INFINIBAND;
  192. case RDMA_TRANSPORT_IWARP:
  193. case RDMA_TRANSPORT_USNIC:
  194. case RDMA_TRANSPORT_USNIC_UDP:
  195. return IB_LINK_LAYER_ETHERNET;
  196. default:
  197. return IB_LINK_LAYER_UNSPECIFIED;
  198. }
  199. }
  200. EXPORT_SYMBOL(rdma_port_get_link_layer);
  201. /* Protection domains */
  202. /**
  203. * ib_alloc_pd - Allocates an unused protection domain.
  204. * @device: The device on which to allocate the protection domain.
  205. *
  206. * A protection domain object provides an association between QPs, shared
  207. * receive queues, address handles, memory regions, and memory windows.
  208. *
  209. * Every PD has a local_dma_lkey which can be used as the lkey value for local
  210. * memory operations.
  211. */
  212. struct ib_pd *ib_alloc_pd(struct ib_device *device)
  213. {
  214. struct ib_pd *pd;
  215. pd = device->alloc_pd(device, NULL, NULL);
  216. if (IS_ERR(pd))
  217. return pd;
  218. pd->device = device;
  219. pd->uobject = NULL;
  220. pd->local_mr = NULL;
  221. atomic_set(&pd->usecnt, 0);
  222. if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
  223. pd->local_dma_lkey = device->local_dma_lkey;
  224. else {
  225. struct ib_mr *mr;
  226. mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
  227. if (IS_ERR(mr)) {
  228. ib_dealloc_pd(pd);
  229. return (struct ib_pd *)mr;
  230. }
  231. pd->local_mr = mr;
  232. pd->local_dma_lkey = pd->local_mr->lkey;
  233. }
  234. return pd;
  235. }
  236. EXPORT_SYMBOL(ib_alloc_pd);
  237. /**
  238. * ib_dealloc_pd - Deallocates a protection domain.
  239. * @pd: The protection domain to deallocate.
  240. *
  241. * It is an error to call this function while any resources in the pd still
  242. * exist. The caller is responsible to synchronously destroy them and
  243. * guarantee no new allocations will happen.
  244. */
  245. void ib_dealloc_pd(struct ib_pd *pd)
  246. {
  247. int ret;
  248. if (pd->local_mr) {
  249. ret = ib_dereg_mr(pd->local_mr);
  250. WARN_ON(ret);
  251. pd->local_mr = NULL;
  252. }
  253. /* uverbs manipulates usecnt with proper locking, while the kabi
  254. requires the caller to guarantee we can't race here. */
  255. WARN_ON(atomic_read(&pd->usecnt));
  256. /* Making delalloc_pd a void return is a WIP, no driver should return
  257. an error here. */
  258. ret = pd->device->dealloc_pd(pd);
  259. WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
  260. }
  261. EXPORT_SYMBOL(ib_dealloc_pd);
  262. /* Address handles */
  263. struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
  264. {
  265. struct ib_ah *ah;
  266. ah = pd->device->create_ah(pd, ah_attr);
  267. if (!IS_ERR(ah)) {
  268. ah->device = pd->device;
  269. ah->pd = pd;
  270. ah->uobject = NULL;
  271. atomic_inc(&pd->usecnt);
  272. }
  273. return ah;
  274. }
  275. EXPORT_SYMBOL(ib_create_ah);
  276. static int ib_get_header_version(const union rdma_network_hdr *hdr)
  277. {
  278. const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
  279. struct iphdr ip4h_checked;
  280. const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
  281. /* If it's IPv6, the version must be 6, otherwise, the first
  282. * 20 bytes (before the IPv4 header) are garbled.
  283. */
  284. if (ip6h->version != 6)
  285. return (ip4h->version == 4) ? 4 : 0;
  286. /* version may be 6 or 4 because the first 20 bytes could be garbled */
  287. /* RoCE v2 requires no options, thus header length
  288. * must be 5 words
  289. */
  290. if (ip4h->ihl != 5)
  291. return 6;
  292. /* Verify checksum.
  293. * We can't write on scattered buffers so we need to copy to
  294. * temp buffer.
  295. */
  296. memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
  297. ip4h_checked.check = 0;
  298. ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
  299. /* if IPv4 header checksum is OK, believe it */
  300. if (ip4h->check == ip4h_checked.check)
  301. return 4;
  302. return 6;
  303. }
  304. static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
  305. u8 port_num,
  306. const struct ib_grh *grh)
  307. {
  308. int grh_version;
  309. if (rdma_protocol_ib(device, port_num))
  310. return RDMA_NETWORK_IB;
  311. grh_version = ib_get_header_version((union rdma_network_hdr *)grh);
  312. if (grh_version == 4)
  313. return RDMA_NETWORK_IPV4;
  314. if (grh->next_hdr == IPPROTO_UDP)
  315. return RDMA_NETWORK_IPV6;
  316. return RDMA_NETWORK_ROCE_V1;
  317. }
  318. struct find_gid_index_context {
  319. u16 vlan_id;
  320. enum ib_gid_type gid_type;
  321. };
  322. static bool find_gid_index(const union ib_gid *gid,
  323. const struct ib_gid_attr *gid_attr,
  324. void *context)
  325. {
  326. struct find_gid_index_context *ctx =
  327. (struct find_gid_index_context *)context;
  328. if (ctx->gid_type != gid_attr->gid_type)
  329. return false;
  330. if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
  331. (is_vlan_dev(gid_attr->ndev) &&
  332. vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
  333. return false;
  334. return true;
  335. }
  336. static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num,
  337. u16 vlan_id, const union ib_gid *sgid,
  338. enum ib_gid_type gid_type,
  339. u16 *gid_index)
  340. {
  341. struct find_gid_index_context context = {.vlan_id = vlan_id,
  342. .gid_type = gid_type};
  343. return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index,
  344. &context, gid_index);
  345. }
  346. static int get_gids_from_rdma_hdr(union rdma_network_hdr *hdr,
  347. enum rdma_network_type net_type,
  348. union ib_gid *sgid, union ib_gid *dgid)
  349. {
  350. struct sockaddr_in src_in;
  351. struct sockaddr_in dst_in;
  352. __be32 src_saddr, dst_saddr;
  353. if (!sgid || !dgid)
  354. return -EINVAL;
  355. if (net_type == RDMA_NETWORK_IPV4) {
  356. memcpy(&src_in.sin_addr.s_addr,
  357. &hdr->roce4grh.saddr, 4);
  358. memcpy(&dst_in.sin_addr.s_addr,
  359. &hdr->roce4grh.daddr, 4);
  360. src_saddr = src_in.sin_addr.s_addr;
  361. dst_saddr = dst_in.sin_addr.s_addr;
  362. ipv6_addr_set_v4mapped(src_saddr,
  363. (struct in6_addr *)sgid);
  364. ipv6_addr_set_v4mapped(dst_saddr,
  365. (struct in6_addr *)dgid);
  366. return 0;
  367. } else if (net_type == RDMA_NETWORK_IPV6 ||
  368. net_type == RDMA_NETWORK_IB) {
  369. *dgid = hdr->ibgrh.dgid;
  370. *sgid = hdr->ibgrh.sgid;
  371. return 0;
  372. } else {
  373. return -EINVAL;
  374. }
  375. }
  376. int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
  377. const struct ib_wc *wc, const struct ib_grh *grh,
  378. struct ib_ah_attr *ah_attr)
  379. {
  380. u32 flow_class;
  381. u16 gid_index;
  382. int ret;
  383. enum rdma_network_type net_type = RDMA_NETWORK_IB;
  384. enum ib_gid_type gid_type = IB_GID_TYPE_IB;
  385. int hoplimit = 0xff;
  386. union ib_gid dgid;
  387. union ib_gid sgid;
  388. memset(ah_attr, 0, sizeof *ah_attr);
  389. if (rdma_cap_eth_ah(device, port_num)) {
  390. if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
  391. net_type = wc->network_hdr_type;
  392. else
  393. net_type = ib_get_net_type_by_grh(device, port_num, grh);
  394. gid_type = ib_network_to_gid_type(net_type);
  395. }
  396. ret = get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
  397. &sgid, &dgid);
  398. if (ret)
  399. return ret;
  400. if (rdma_protocol_roce(device, port_num)) {
  401. int if_index = 0;
  402. u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
  403. wc->vlan_id : 0xffff;
  404. struct net_device *idev;
  405. struct net_device *resolved_dev;
  406. if (!(wc->wc_flags & IB_WC_GRH))
  407. return -EPROTOTYPE;
  408. if (!device->get_netdev)
  409. return -EOPNOTSUPP;
  410. idev = device->get_netdev(device, port_num);
  411. if (!idev)
  412. return -ENODEV;
  413. ret = rdma_addr_find_l2_eth_by_grh(&dgid, &sgid,
  414. ah_attr->dmac,
  415. wc->wc_flags & IB_WC_WITH_VLAN ?
  416. NULL : &vlan_id,
  417. &if_index, &hoplimit);
  418. if (ret) {
  419. dev_put(idev);
  420. return ret;
  421. }
  422. resolved_dev = dev_get_by_index(&init_net, if_index);
  423. if (resolved_dev->flags & IFF_LOOPBACK) {
  424. dev_put(resolved_dev);
  425. resolved_dev = idev;
  426. dev_hold(resolved_dev);
  427. }
  428. rcu_read_lock();
  429. if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev,
  430. resolved_dev))
  431. ret = -EHOSTUNREACH;
  432. rcu_read_unlock();
  433. dev_put(idev);
  434. dev_put(resolved_dev);
  435. if (ret)
  436. return ret;
  437. ret = get_sgid_index_from_eth(device, port_num, vlan_id,
  438. &dgid, gid_type, &gid_index);
  439. if (ret)
  440. return ret;
  441. }
  442. ah_attr->dlid = wc->slid;
  443. ah_attr->sl = wc->sl;
  444. ah_attr->src_path_bits = wc->dlid_path_bits;
  445. ah_attr->port_num = port_num;
  446. if (wc->wc_flags & IB_WC_GRH) {
  447. ah_attr->ah_flags = IB_AH_GRH;
  448. ah_attr->grh.dgid = sgid;
  449. if (!rdma_cap_eth_ah(device, port_num)) {
  450. ret = ib_find_cached_gid_by_port(device, &dgid,
  451. IB_GID_TYPE_IB,
  452. port_num, NULL,
  453. &gid_index);
  454. if (ret)
  455. return ret;
  456. }
  457. ah_attr->grh.sgid_index = (u8) gid_index;
  458. flow_class = be32_to_cpu(grh->version_tclass_flow);
  459. ah_attr->grh.flow_label = flow_class & 0xFFFFF;
  460. ah_attr->grh.hop_limit = hoplimit;
  461. ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
  462. }
  463. return 0;
  464. }
  465. EXPORT_SYMBOL(ib_init_ah_from_wc);
  466. struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
  467. const struct ib_grh *grh, u8 port_num)
  468. {
  469. struct ib_ah_attr ah_attr;
  470. int ret;
  471. ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
  472. if (ret)
  473. return ERR_PTR(ret);
  474. return ib_create_ah(pd, &ah_attr);
  475. }
  476. EXPORT_SYMBOL(ib_create_ah_from_wc);
  477. int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
  478. {
  479. return ah->device->modify_ah ?
  480. ah->device->modify_ah(ah, ah_attr) :
  481. -ENOSYS;
  482. }
  483. EXPORT_SYMBOL(ib_modify_ah);
  484. int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
  485. {
  486. return ah->device->query_ah ?
  487. ah->device->query_ah(ah, ah_attr) :
  488. -ENOSYS;
  489. }
  490. EXPORT_SYMBOL(ib_query_ah);
  491. int ib_destroy_ah(struct ib_ah *ah)
  492. {
  493. struct ib_pd *pd;
  494. int ret;
  495. pd = ah->pd;
  496. ret = ah->device->destroy_ah(ah);
  497. if (!ret)
  498. atomic_dec(&pd->usecnt);
  499. return ret;
  500. }
  501. EXPORT_SYMBOL(ib_destroy_ah);
  502. /* Shared receive queues */
  503. struct ib_srq *ib_create_srq(struct ib_pd *pd,
  504. struct ib_srq_init_attr *srq_init_attr)
  505. {
  506. struct ib_srq *srq;
  507. if (!pd->device->create_srq)
  508. return ERR_PTR(-ENOSYS);
  509. srq = pd->device->create_srq(pd, srq_init_attr, NULL);
  510. if (!IS_ERR(srq)) {
  511. srq->device = pd->device;
  512. srq->pd = pd;
  513. srq->uobject = NULL;
  514. srq->event_handler = srq_init_attr->event_handler;
  515. srq->srq_context = srq_init_attr->srq_context;
  516. srq->srq_type = srq_init_attr->srq_type;
  517. if (srq->srq_type == IB_SRQT_XRC) {
  518. srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
  519. srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq;
  520. atomic_inc(&srq->ext.xrc.xrcd->usecnt);
  521. atomic_inc(&srq->ext.xrc.cq->usecnt);
  522. }
  523. atomic_inc(&pd->usecnt);
  524. atomic_set(&srq->usecnt, 0);
  525. }
  526. return srq;
  527. }
  528. EXPORT_SYMBOL(ib_create_srq);
  529. int ib_modify_srq(struct ib_srq *srq,
  530. struct ib_srq_attr *srq_attr,
  531. enum ib_srq_attr_mask srq_attr_mask)
  532. {
  533. return srq->device->modify_srq ?
  534. srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
  535. -ENOSYS;
  536. }
  537. EXPORT_SYMBOL(ib_modify_srq);
  538. int ib_query_srq(struct ib_srq *srq,
  539. struct ib_srq_attr *srq_attr)
  540. {
  541. return srq->device->query_srq ?
  542. srq->device->query_srq(srq, srq_attr) : -ENOSYS;
  543. }
  544. EXPORT_SYMBOL(ib_query_srq);
  545. int ib_destroy_srq(struct ib_srq *srq)
  546. {
  547. struct ib_pd *pd;
  548. enum ib_srq_type srq_type;
  549. struct ib_xrcd *uninitialized_var(xrcd);
  550. struct ib_cq *uninitialized_var(cq);
  551. int ret;
  552. if (atomic_read(&srq->usecnt))
  553. return -EBUSY;
  554. pd = srq->pd;
  555. srq_type = srq->srq_type;
  556. if (srq_type == IB_SRQT_XRC) {
  557. xrcd = srq->ext.xrc.xrcd;
  558. cq = srq->ext.xrc.cq;
  559. }
  560. ret = srq->device->destroy_srq(srq);
  561. if (!ret) {
  562. atomic_dec(&pd->usecnt);
  563. if (srq_type == IB_SRQT_XRC) {
  564. atomic_dec(&xrcd->usecnt);
  565. atomic_dec(&cq->usecnt);
  566. }
  567. }
  568. return ret;
  569. }
  570. EXPORT_SYMBOL(ib_destroy_srq);
  571. /* Queue pairs */
  572. static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
  573. {
  574. struct ib_qp *qp = context;
  575. unsigned long flags;
  576. spin_lock_irqsave(&qp->device->event_handler_lock, flags);
  577. list_for_each_entry(event->element.qp, &qp->open_list, open_list)
  578. if (event->element.qp->event_handler)
  579. event->element.qp->event_handler(event, event->element.qp->qp_context);
  580. spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
  581. }
  582. static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
  583. {
  584. mutex_lock(&xrcd->tgt_qp_mutex);
  585. list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
  586. mutex_unlock(&xrcd->tgt_qp_mutex);
  587. }
  588. static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
  589. void (*event_handler)(struct ib_event *, void *),
  590. void *qp_context)
  591. {
  592. struct ib_qp *qp;
  593. unsigned long flags;
  594. qp = kzalloc(sizeof *qp, GFP_KERNEL);
  595. if (!qp)
  596. return ERR_PTR(-ENOMEM);
  597. qp->real_qp = real_qp;
  598. atomic_inc(&real_qp->usecnt);
  599. qp->device = real_qp->device;
  600. qp->event_handler = event_handler;
  601. qp->qp_context = qp_context;
  602. qp->qp_num = real_qp->qp_num;
  603. qp->qp_type = real_qp->qp_type;
  604. spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
  605. list_add(&qp->open_list, &real_qp->open_list);
  606. spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
  607. return qp;
  608. }
  609. struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
  610. struct ib_qp_open_attr *qp_open_attr)
  611. {
  612. struct ib_qp *qp, *real_qp;
  613. if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
  614. return ERR_PTR(-EINVAL);
  615. qp = ERR_PTR(-EINVAL);
  616. mutex_lock(&xrcd->tgt_qp_mutex);
  617. list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
  618. if (real_qp->qp_num == qp_open_attr->qp_num) {
  619. qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
  620. qp_open_attr->qp_context);
  621. break;
  622. }
  623. }
  624. mutex_unlock(&xrcd->tgt_qp_mutex);
  625. return qp;
  626. }
  627. EXPORT_SYMBOL(ib_open_qp);
  628. struct ib_qp *ib_create_qp(struct ib_pd *pd,
  629. struct ib_qp_init_attr *qp_init_attr)
  630. {
  631. struct ib_qp *qp, *real_qp;
  632. struct ib_device *device;
  633. device = pd ? pd->device : qp_init_attr->xrcd->device;
  634. qp = device->create_qp(pd, qp_init_attr, NULL);
  635. if (!IS_ERR(qp)) {
  636. qp->device = device;
  637. qp->real_qp = qp;
  638. qp->uobject = NULL;
  639. qp->qp_type = qp_init_attr->qp_type;
  640. atomic_set(&qp->usecnt, 0);
  641. if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
  642. qp->event_handler = __ib_shared_qp_event_handler;
  643. qp->qp_context = qp;
  644. qp->pd = NULL;
  645. qp->send_cq = qp->recv_cq = NULL;
  646. qp->srq = NULL;
  647. qp->xrcd = qp_init_attr->xrcd;
  648. atomic_inc(&qp_init_attr->xrcd->usecnt);
  649. INIT_LIST_HEAD(&qp->open_list);
  650. real_qp = qp;
  651. qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
  652. qp_init_attr->qp_context);
  653. if (!IS_ERR(qp))
  654. __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
  655. else
  656. real_qp->device->destroy_qp(real_qp);
  657. } else {
  658. qp->event_handler = qp_init_attr->event_handler;
  659. qp->qp_context = qp_init_attr->qp_context;
  660. if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
  661. qp->recv_cq = NULL;
  662. qp->srq = NULL;
  663. } else {
  664. qp->recv_cq = qp_init_attr->recv_cq;
  665. atomic_inc(&qp_init_attr->recv_cq->usecnt);
  666. qp->srq = qp_init_attr->srq;
  667. if (qp->srq)
  668. atomic_inc(&qp_init_attr->srq->usecnt);
  669. }
  670. qp->pd = pd;
  671. qp->send_cq = qp_init_attr->send_cq;
  672. qp->xrcd = NULL;
  673. atomic_inc(&pd->usecnt);
  674. atomic_inc(&qp_init_attr->send_cq->usecnt);
  675. }
  676. }
  677. return qp;
  678. }
  679. EXPORT_SYMBOL(ib_create_qp);
  680. static const struct {
  681. int valid;
  682. enum ib_qp_attr_mask req_param[IB_QPT_MAX];
  683. enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
  684. } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
  685. [IB_QPS_RESET] = {
  686. [IB_QPS_RESET] = { .valid = 1 },
  687. [IB_QPS_INIT] = {
  688. .valid = 1,
  689. .req_param = {
  690. [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
  691. IB_QP_PORT |
  692. IB_QP_QKEY),
  693. [IB_QPT_RAW_PACKET] = IB_QP_PORT,
  694. [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
  695. IB_QP_PORT |
  696. IB_QP_ACCESS_FLAGS),
  697. [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
  698. IB_QP_PORT |
  699. IB_QP_ACCESS_FLAGS),
  700. [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
  701. IB_QP_PORT |
  702. IB_QP_ACCESS_FLAGS),
  703. [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
  704. IB_QP_PORT |
  705. IB_QP_ACCESS_FLAGS),
  706. [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
  707. IB_QP_QKEY),
  708. [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
  709. IB_QP_QKEY),
  710. }
  711. },
  712. },
  713. [IB_QPS_INIT] = {
  714. [IB_QPS_RESET] = { .valid = 1 },
  715. [IB_QPS_ERR] = { .valid = 1 },
  716. [IB_QPS_INIT] = {
  717. .valid = 1,
  718. .opt_param = {
  719. [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
  720. IB_QP_PORT |
  721. IB_QP_QKEY),
  722. [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
  723. IB_QP_PORT |
  724. IB_QP_ACCESS_FLAGS),
  725. [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
  726. IB_QP_PORT |
  727. IB_QP_ACCESS_FLAGS),
  728. [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
  729. IB_QP_PORT |
  730. IB_QP_ACCESS_FLAGS),
  731. [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
  732. IB_QP_PORT |
  733. IB_QP_ACCESS_FLAGS),
  734. [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
  735. IB_QP_QKEY),
  736. [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
  737. IB_QP_QKEY),
  738. }
  739. },
  740. [IB_QPS_RTR] = {
  741. .valid = 1,
  742. .req_param = {
  743. [IB_QPT_UC] = (IB_QP_AV |
  744. IB_QP_PATH_MTU |
  745. IB_QP_DEST_QPN |
  746. IB_QP_RQ_PSN),
  747. [IB_QPT_RC] = (IB_QP_AV |
  748. IB_QP_PATH_MTU |
  749. IB_QP_DEST_QPN |
  750. IB_QP_RQ_PSN |
  751. IB_QP_MAX_DEST_RD_ATOMIC |
  752. IB_QP_MIN_RNR_TIMER),
  753. [IB_QPT_XRC_INI] = (IB_QP_AV |
  754. IB_QP_PATH_MTU |
  755. IB_QP_DEST_QPN |
  756. IB_QP_RQ_PSN),
  757. [IB_QPT_XRC_TGT] = (IB_QP_AV |
  758. IB_QP_PATH_MTU |
  759. IB_QP_DEST_QPN |
  760. IB_QP_RQ_PSN |
  761. IB_QP_MAX_DEST_RD_ATOMIC |
  762. IB_QP_MIN_RNR_TIMER),
  763. },
  764. .opt_param = {
  765. [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
  766. IB_QP_QKEY),
  767. [IB_QPT_UC] = (IB_QP_ALT_PATH |
  768. IB_QP_ACCESS_FLAGS |
  769. IB_QP_PKEY_INDEX),
  770. [IB_QPT_RC] = (IB_QP_ALT_PATH |
  771. IB_QP_ACCESS_FLAGS |
  772. IB_QP_PKEY_INDEX),
  773. [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
  774. IB_QP_ACCESS_FLAGS |
  775. IB_QP_PKEY_INDEX),
  776. [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
  777. IB_QP_ACCESS_FLAGS |
  778. IB_QP_PKEY_INDEX),
  779. [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
  780. IB_QP_QKEY),
  781. [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
  782. IB_QP_QKEY),
  783. },
  784. },
  785. },
  786. [IB_QPS_RTR] = {
  787. [IB_QPS_RESET] = { .valid = 1 },
  788. [IB_QPS_ERR] = { .valid = 1 },
  789. [IB_QPS_RTS] = {
  790. .valid = 1,
  791. .req_param = {
  792. [IB_QPT_UD] = IB_QP_SQ_PSN,
  793. [IB_QPT_UC] = IB_QP_SQ_PSN,
  794. [IB_QPT_RC] = (IB_QP_TIMEOUT |
  795. IB_QP_RETRY_CNT |
  796. IB_QP_RNR_RETRY |
  797. IB_QP_SQ_PSN |
  798. IB_QP_MAX_QP_RD_ATOMIC),
  799. [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
  800. IB_QP_RETRY_CNT |
  801. IB_QP_RNR_RETRY |
  802. IB_QP_SQ_PSN |
  803. IB_QP_MAX_QP_RD_ATOMIC),
  804. [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
  805. IB_QP_SQ_PSN),
  806. [IB_QPT_SMI] = IB_QP_SQ_PSN,
  807. [IB_QPT_GSI] = IB_QP_SQ_PSN,
  808. },
  809. .opt_param = {
  810. [IB_QPT_UD] = (IB_QP_CUR_STATE |
  811. IB_QP_QKEY),
  812. [IB_QPT_UC] = (IB_QP_CUR_STATE |
  813. IB_QP_ALT_PATH |
  814. IB_QP_ACCESS_FLAGS |
  815. IB_QP_PATH_MIG_STATE),
  816. [IB_QPT_RC] = (IB_QP_CUR_STATE |
  817. IB_QP_ALT_PATH |
  818. IB_QP_ACCESS_FLAGS |
  819. IB_QP_MIN_RNR_TIMER |
  820. IB_QP_PATH_MIG_STATE),
  821. [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
  822. IB_QP_ALT_PATH |
  823. IB_QP_ACCESS_FLAGS |
  824. IB_QP_PATH_MIG_STATE),
  825. [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
  826. IB_QP_ALT_PATH |
  827. IB_QP_ACCESS_FLAGS |
  828. IB_QP_MIN_RNR_TIMER |
  829. IB_QP_PATH_MIG_STATE),
  830. [IB_QPT_SMI] = (IB_QP_CUR_STATE |
  831. IB_QP_QKEY),
  832. [IB_QPT_GSI] = (IB_QP_CUR_STATE |
  833. IB_QP_QKEY),
  834. }
  835. }
  836. },
  837. [IB_QPS_RTS] = {
  838. [IB_QPS_RESET] = { .valid = 1 },
  839. [IB_QPS_ERR] = { .valid = 1 },
  840. [IB_QPS_RTS] = {
  841. .valid = 1,
  842. .opt_param = {
  843. [IB_QPT_UD] = (IB_QP_CUR_STATE |
  844. IB_QP_QKEY),
  845. [IB_QPT_UC] = (IB_QP_CUR_STATE |
  846. IB_QP_ACCESS_FLAGS |
  847. IB_QP_ALT_PATH |
  848. IB_QP_PATH_MIG_STATE),
  849. [IB_QPT_RC] = (IB_QP_CUR_STATE |
  850. IB_QP_ACCESS_FLAGS |
  851. IB_QP_ALT_PATH |
  852. IB_QP_PATH_MIG_STATE |
  853. IB_QP_MIN_RNR_TIMER),
  854. [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
  855. IB_QP_ACCESS_FLAGS |
  856. IB_QP_ALT_PATH |
  857. IB_QP_PATH_MIG_STATE),
  858. [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
  859. IB_QP_ACCESS_FLAGS |
  860. IB_QP_ALT_PATH |
  861. IB_QP_PATH_MIG_STATE |
  862. IB_QP_MIN_RNR_TIMER),
  863. [IB_QPT_SMI] = (IB_QP_CUR_STATE |
  864. IB_QP_QKEY),
  865. [IB_QPT_GSI] = (IB_QP_CUR_STATE |
  866. IB_QP_QKEY),
  867. }
  868. },
  869. [IB_QPS_SQD] = {
  870. .valid = 1,
  871. .opt_param = {
  872. [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  873. [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  874. [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  875. [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  876. [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
  877. [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  878. [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
  879. }
  880. },
  881. },
  882. [IB_QPS_SQD] = {
  883. [IB_QPS_RESET] = { .valid = 1 },
  884. [IB_QPS_ERR] = { .valid = 1 },
  885. [IB_QPS_RTS] = {
  886. .valid = 1,
  887. .opt_param = {
  888. [IB_QPT_UD] = (IB_QP_CUR_STATE |
  889. IB_QP_QKEY),
  890. [IB_QPT_UC] = (IB_QP_CUR_STATE |
  891. IB_QP_ALT_PATH |
  892. IB_QP_ACCESS_FLAGS |
  893. IB_QP_PATH_MIG_STATE),
  894. [IB_QPT_RC] = (IB_QP_CUR_STATE |
  895. IB_QP_ALT_PATH |
  896. IB_QP_ACCESS_FLAGS |
  897. IB_QP_MIN_RNR_TIMER |
  898. IB_QP_PATH_MIG_STATE),
  899. [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
  900. IB_QP_ALT_PATH |
  901. IB_QP_ACCESS_FLAGS |
  902. IB_QP_PATH_MIG_STATE),
  903. [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
  904. IB_QP_ALT_PATH |
  905. IB_QP_ACCESS_FLAGS |
  906. IB_QP_MIN_RNR_TIMER |
  907. IB_QP_PATH_MIG_STATE),
  908. [IB_QPT_SMI] = (IB_QP_CUR_STATE |
  909. IB_QP_QKEY),
  910. [IB_QPT_GSI] = (IB_QP_CUR_STATE |
  911. IB_QP_QKEY),
  912. }
  913. },
  914. [IB_QPS_SQD] = {
  915. .valid = 1,
  916. .opt_param = {
  917. [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
  918. IB_QP_QKEY),
  919. [IB_QPT_UC] = (IB_QP_AV |
  920. IB_QP_ALT_PATH |
  921. IB_QP_ACCESS_FLAGS |
  922. IB_QP_PKEY_INDEX |
  923. IB_QP_PATH_MIG_STATE),
  924. [IB_QPT_RC] = (IB_QP_PORT |
  925. IB_QP_AV |
  926. IB_QP_TIMEOUT |
  927. IB_QP_RETRY_CNT |
  928. IB_QP_RNR_RETRY |
  929. IB_QP_MAX_QP_RD_ATOMIC |
  930. IB_QP_MAX_DEST_RD_ATOMIC |
  931. IB_QP_ALT_PATH |
  932. IB_QP_ACCESS_FLAGS |
  933. IB_QP_PKEY_INDEX |
  934. IB_QP_MIN_RNR_TIMER |
  935. IB_QP_PATH_MIG_STATE),
  936. [IB_QPT_XRC_INI] = (IB_QP_PORT |
  937. IB_QP_AV |
  938. IB_QP_TIMEOUT |
  939. IB_QP_RETRY_CNT |
  940. IB_QP_RNR_RETRY |
  941. IB_QP_MAX_QP_RD_ATOMIC |
  942. IB_QP_ALT_PATH |
  943. IB_QP_ACCESS_FLAGS |
  944. IB_QP_PKEY_INDEX |
  945. IB_QP_PATH_MIG_STATE),
  946. [IB_QPT_XRC_TGT] = (IB_QP_PORT |
  947. IB_QP_AV |
  948. IB_QP_TIMEOUT |
  949. IB_QP_MAX_DEST_RD_ATOMIC |
  950. IB_QP_ALT_PATH |
  951. IB_QP_ACCESS_FLAGS |
  952. IB_QP_PKEY_INDEX |
  953. IB_QP_MIN_RNR_TIMER |
  954. IB_QP_PATH_MIG_STATE),
  955. [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
  956. IB_QP_QKEY),
  957. [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
  958. IB_QP_QKEY),
  959. }
  960. }
  961. },
  962. [IB_QPS_SQE] = {
  963. [IB_QPS_RESET] = { .valid = 1 },
  964. [IB_QPS_ERR] = { .valid = 1 },
  965. [IB_QPS_RTS] = {
  966. .valid = 1,
  967. .opt_param = {
  968. [IB_QPT_UD] = (IB_QP_CUR_STATE |
  969. IB_QP_QKEY),
  970. [IB_QPT_UC] = (IB_QP_CUR_STATE |
  971. IB_QP_ACCESS_FLAGS),
  972. [IB_QPT_SMI] = (IB_QP_CUR_STATE |
  973. IB_QP_QKEY),
  974. [IB_QPT_GSI] = (IB_QP_CUR_STATE |
  975. IB_QP_QKEY),
  976. }
  977. }
  978. },
  979. [IB_QPS_ERR] = {
  980. [IB_QPS_RESET] = { .valid = 1 },
  981. [IB_QPS_ERR] = { .valid = 1 }
  982. }
  983. };
  984. int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
  985. enum ib_qp_type type, enum ib_qp_attr_mask mask,
  986. enum rdma_link_layer ll)
  987. {
  988. enum ib_qp_attr_mask req_param, opt_param;
  989. if (cur_state < 0 || cur_state > IB_QPS_ERR ||
  990. next_state < 0 || next_state > IB_QPS_ERR)
  991. return 0;
  992. if (mask & IB_QP_CUR_STATE &&
  993. cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
  994. cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
  995. return 0;
  996. if (!qp_state_table[cur_state][next_state].valid)
  997. return 0;
  998. req_param = qp_state_table[cur_state][next_state].req_param[type];
  999. opt_param = qp_state_table[cur_state][next_state].opt_param[type];
  1000. if ((mask & req_param) != req_param)
  1001. return 0;
  1002. if (mask & ~(req_param | opt_param | IB_QP_STATE))
  1003. return 0;
  1004. return 1;
  1005. }
  1006. EXPORT_SYMBOL(ib_modify_qp_is_ok);
  1007. int ib_resolve_eth_dmac(struct ib_qp *qp,
  1008. struct ib_qp_attr *qp_attr, int *qp_attr_mask)
  1009. {
  1010. int ret = 0;
  1011. if (*qp_attr_mask & IB_QP_AV) {
  1012. if (qp_attr->ah_attr.port_num < rdma_start_port(qp->device) ||
  1013. qp_attr->ah_attr.port_num > rdma_end_port(qp->device))
  1014. return -EINVAL;
  1015. if (!rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num))
  1016. return 0;
  1017. if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) {
  1018. rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw,
  1019. qp_attr->ah_attr.dmac);
  1020. } else {
  1021. union ib_gid sgid;
  1022. struct ib_gid_attr sgid_attr;
  1023. int ifindex;
  1024. int hop_limit;
  1025. ret = ib_query_gid(qp->device,
  1026. qp_attr->ah_attr.port_num,
  1027. qp_attr->ah_attr.grh.sgid_index,
  1028. &sgid, &sgid_attr);
  1029. if (ret || !sgid_attr.ndev) {
  1030. if (!ret)
  1031. ret = -ENXIO;
  1032. goto out;
  1033. }
  1034. ifindex = sgid_attr.ndev->ifindex;
  1035. ret = rdma_addr_find_l2_eth_by_grh(&sgid,
  1036. &qp_attr->ah_attr.grh.dgid,
  1037. qp_attr->ah_attr.dmac,
  1038. NULL, &ifindex, &hop_limit);
  1039. dev_put(sgid_attr.ndev);
  1040. qp_attr->ah_attr.grh.hop_limit = hop_limit;
  1041. }
  1042. }
  1043. out:
  1044. return ret;
  1045. }
  1046. EXPORT_SYMBOL(ib_resolve_eth_dmac);
  1047. int ib_modify_qp(struct ib_qp *qp,
  1048. struct ib_qp_attr *qp_attr,
  1049. int qp_attr_mask)
  1050. {
  1051. int ret;
  1052. ret = ib_resolve_eth_dmac(qp, qp_attr, &qp_attr_mask);
  1053. if (ret)
  1054. return ret;
  1055. return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
  1056. }
  1057. EXPORT_SYMBOL(ib_modify_qp);
  1058. int ib_query_qp(struct ib_qp *qp,
  1059. struct ib_qp_attr *qp_attr,
  1060. int qp_attr_mask,
  1061. struct ib_qp_init_attr *qp_init_attr)
  1062. {
  1063. return qp->device->query_qp ?
  1064. qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
  1065. -ENOSYS;
  1066. }
  1067. EXPORT_SYMBOL(ib_query_qp);
  1068. int ib_close_qp(struct ib_qp *qp)
  1069. {
  1070. struct ib_qp *real_qp;
  1071. unsigned long flags;
  1072. real_qp = qp->real_qp;
  1073. if (real_qp == qp)
  1074. return -EINVAL;
  1075. spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
  1076. list_del(&qp->open_list);
  1077. spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
  1078. atomic_dec(&real_qp->usecnt);
  1079. kfree(qp);
  1080. return 0;
  1081. }
  1082. EXPORT_SYMBOL(ib_close_qp);
  1083. static int __ib_destroy_shared_qp(struct ib_qp *qp)
  1084. {
  1085. struct ib_xrcd *xrcd;
  1086. struct ib_qp *real_qp;
  1087. int ret;
  1088. real_qp = qp->real_qp;
  1089. xrcd = real_qp->xrcd;
  1090. mutex_lock(&xrcd->tgt_qp_mutex);
  1091. ib_close_qp(qp);
  1092. if (atomic_read(&real_qp->usecnt) == 0)
  1093. list_del(&real_qp->xrcd_list);
  1094. else
  1095. real_qp = NULL;
  1096. mutex_unlock(&xrcd->tgt_qp_mutex);
  1097. if (real_qp) {
  1098. ret = ib_destroy_qp(real_qp);
  1099. if (!ret)
  1100. atomic_dec(&xrcd->usecnt);
  1101. else
  1102. __ib_insert_xrcd_qp(xrcd, real_qp);
  1103. }
  1104. return 0;
  1105. }
  1106. int ib_destroy_qp(struct ib_qp *qp)
  1107. {
  1108. struct ib_pd *pd;
  1109. struct ib_cq *scq, *rcq;
  1110. struct ib_srq *srq;
  1111. int ret;
  1112. if (atomic_read(&qp->usecnt))
  1113. return -EBUSY;
  1114. if (qp->real_qp != qp)
  1115. return __ib_destroy_shared_qp(qp);
  1116. pd = qp->pd;
  1117. scq = qp->send_cq;
  1118. rcq = qp->recv_cq;
  1119. srq = qp->srq;
  1120. ret = qp->device->destroy_qp(qp);
  1121. if (!ret) {
  1122. if (pd)
  1123. atomic_dec(&pd->usecnt);
  1124. if (scq)
  1125. atomic_dec(&scq->usecnt);
  1126. if (rcq)
  1127. atomic_dec(&rcq->usecnt);
  1128. if (srq)
  1129. atomic_dec(&srq->usecnt);
  1130. }
  1131. return ret;
  1132. }
  1133. EXPORT_SYMBOL(ib_destroy_qp);
  1134. /* Completion queues */
  1135. struct ib_cq *ib_create_cq(struct ib_device *device,
  1136. ib_comp_handler comp_handler,
  1137. void (*event_handler)(struct ib_event *, void *),
  1138. void *cq_context,
  1139. const struct ib_cq_init_attr *cq_attr)
  1140. {
  1141. struct ib_cq *cq;
  1142. cq = device->create_cq(device, cq_attr, NULL, NULL);
  1143. if (!IS_ERR(cq)) {
  1144. cq->device = device;
  1145. cq->uobject = NULL;
  1146. cq->comp_handler = comp_handler;
  1147. cq->event_handler = event_handler;
  1148. cq->cq_context = cq_context;
  1149. atomic_set(&cq->usecnt, 0);
  1150. }
  1151. return cq;
  1152. }
  1153. EXPORT_SYMBOL(ib_create_cq);
  1154. int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
  1155. {
  1156. return cq->device->modify_cq ?
  1157. cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
  1158. }
  1159. EXPORT_SYMBOL(ib_modify_cq);
  1160. int ib_destroy_cq(struct ib_cq *cq)
  1161. {
  1162. if (atomic_read(&cq->usecnt))
  1163. return -EBUSY;
  1164. return cq->device->destroy_cq(cq);
  1165. }
  1166. EXPORT_SYMBOL(ib_destroy_cq);
  1167. int ib_resize_cq(struct ib_cq *cq, int cqe)
  1168. {
  1169. return cq->device->resize_cq ?
  1170. cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
  1171. }
  1172. EXPORT_SYMBOL(ib_resize_cq);
  1173. /* Memory regions */
  1174. struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
  1175. {
  1176. struct ib_mr *mr;
  1177. int err;
  1178. err = ib_check_mr_access(mr_access_flags);
  1179. if (err)
  1180. return ERR_PTR(err);
  1181. mr = pd->device->get_dma_mr(pd, mr_access_flags);
  1182. if (!IS_ERR(mr)) {
  1183. mr->device = pd->device;
  1184. mr->pd = pd;
  1185. mr->uobject = NULL;
  1186. atomic_inc(&pd->usecnt);
  1187. }
  1188. return mr;
  1189. }
  1190. EXPORT_SYMBOL(ib_get_dma_mr);
  1191. int ib_dereg_mr(struct ib_mr *mr)
  1192. {
  1193. struct ib_pd *pd = mr->pd;
  1194. int ret;
  1195. ret = mr->device->dereg_mr(mr);
  1196. if (!ret)
  1197. atomic_dec(&pd->usecnt);
  1198. return ret;
  1199. }
  1200. EXPORT_SYMBOL(ib_dereg_mr);
  1201. /**
  1202. * ib_alloc_mr() - Allocates a memory region
  1203. * @pd: protection domain associated with the region
  1204. * @mr_type: memory region type
  1205. * @max_num_sg: maximum sg entries available for registration.
  1206. *
  1207. * Notes:
  1208. * Memory registeration page/sg lists must not exceed max_num_sg.
  1209. * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
  1210. * max_num_sg * used_page_size.
  1211. *
  1212. */
  1213. struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
  1214. enum ib_mr_type mr_type,
  1215. u32 max_num_sg)
  1216. {
  1217. struct ib_mr *mr;
  1218. if (!pd->device->alloc_mr)
  1219. return ERR_PTR(-ENOSYS);
  1220. mr = pd->device->alloc_mr(pd, mr_type, max_num_sg);
  1221. if (!IS_ERR(mr)) {
  1222. mr->device = pd->device;
  1223. mr->pd = pd;
  1224. mr->uobject = NULL;
  1225. atomic_inc(&pd->usecnt);
  1226. }
  1227. return mr;
  1228. }
  1229. EXPORT_SYMBOL(ib_alloc_mr);
  1230. /* "Fast" memory regions */
  1231. struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
  1232. int mr_access_flags,
  1233. struct ib_fmr_attr *fmr_attr)
  1234. {
  1235. struct ib_fmr *fmr;
  1236. if (!pd->device->alloc_fmr)
  1237. return ERR_PTR(-ENOSYS);
  1238. fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
  1239. if (!IS_ERR(fmr)) {
  1240. fmr->device = pd->device;
  1241. fmr->pd = pd;
  1242. atomic_inc(&pd->usecnt);
  1243. }
  1244. return fmr;
  1245. }
  1246. EXPORT_SYMBOL(ib_alloc_fmr);
  1247. int ib_unmap_fmr(struct list_head *fmr_list)
  1248. {
  1249. struct ib_fmr *fmr;
  1250. if (list_empty(fmr_list))
  1251. return 0;
  1252. fmr = list_entry(fmr_list->next, struct ib_fmr, list);
  1253. return fmr->device->unmap_fmr(fmr_list);
  1254. }
  1255. EXPORT_SYMBOL(ib_unmap_fmr);
  1256. int ib_dealloc_fmr(struct ib_fmr *fmr)
  1257. {
  1258. struct ib_pd *pd;
  1259. int ret;
  1260. pd = fmr->pd;
  1261. ret = fmr->device->dealloc_fmr(fmr);
  1262. if (!ret)
  1263. atomic_dec(&pd->usecnt);
  1264. return ret;
  1265. }
  1266. EXPORT_SYMBOL(ib_dealloc_fmr);
  1267. /* Multicast groups */
  1268. int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
  1269. {
  1270. int ret;
  1271. if (!qp->device->attach_mcast)
  1272. return -ENOSYS;
  1273. if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
  1274. return -EINVAL;
  1275. ret = qp->device->attach_mcast(qp, gid, lid);
  1276. if (!ret)
  1277. atomic_inc(&qp->usecnt);
  1278. return ret;
  1279. }
  1280. EXPORT_SYMBOL(ib_attach_mcast);
  1281. int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
  1282. {
  1283. int ret;
  1284. if (!qp->device->detach_mcast)
  1285. return -ENOSYS;
  1286. if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
  1287. return -EINVAL;
  1288. ret = qp->device->detach_mcast(qp, gid, lid);
  1289. if (!ret)
  1290. atomic_dec(&qp->usecnt);
  1291. return ret;
  1292. }
  1293. EXPORT_SYMBOL(ib_detach_mcast);
  1294. struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
  1295. {
  1296. struct ib_xrcd *xrcd;
  1297. if (!device->alloc_xrcd)
  1298. return ERR_PTR(-ENOSYS);
  1299. xrcd = device->alloc_xrcd(device, NULL, NULL);
  1300. if (!IS_ERR(xrcd)) {
  1301. xrcd->device = device;
  1302. xrcd->inode = NULL;
  1303. atomic_set(&xrcd->usecnt, 0);
  1304. mutex_init(&xrcd->tgt_qp_mutex);
  1305. INIT_LIST_HEAD(&xrcd->tgt_qp_list);
  1306. }
  1307. return xrcd;
  1308. }
  1309. EXPORT_SYMBOL(ib_alloc_xrcd);
  1310. int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
  1311. {
  1312. struct ib_qp *qp;
  1313. int ret;
  1314. if (atomic_read(&xrcd->usecnt))
  1315. return -EBUSY;
  1316. while (!list_empty(&xrcd->tgt_qp_list)) {
  1317. qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
  1318. ret = ib_destroy_qp(qp);
  1319. if (ret)
  1320. return ret;
  1321. }
  1322. return xrcd->device->dealloc_xrcd(xrcd);
  1323. }
  1324. EXPORT_SYMBOL(ib_dealloc_xrcd);
  1325. struct ib_flow *ib_create_flow(struct ib_qp *qp,
  1326. struct ib_flow_attr *flow_attr,
  1327. int domain)
  1328. {
  1329. struct ib_flow *flow_id;
  1330. if (!qp->device->create_flow)
  1331. return ERR_PTR(-ENOSYS);
  1332. flow_id = qp->device->create_flow(qp, flow_attr, domain);
  1333. if (!IS_ERR(flow_id))
  1334. atomic_inc(&qp->usecnt);
  1335. return flow_id;
  1336. }
  1337. EXPORT_SYMBOL(ib_create_flow);
  1338. int ib_destroy_flow(struct ib_flow *flow_id)
  1339. {
  1340. int err;
  1341. struct ib_qp *qp = flow_id->qp;
  1342. err = qp->device->destroy_flow(flow_id);
  1343. if (!err)
  1344. atomic_dec(&qp->usecnt);
  1345. return err;
  1346. }
  1347. EXPORT_SYMBOL(ib_destroy_flow);
  1348. int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
  1349. struct ib_mr_status *mr_status)
  1350. {
  1351. return mr->device->check_mr_status ?
  1352. mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
  1353. }
  1354. EXPORT_SYMBOL(ib_check_mr_status);
  1355. int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
  1356. int state)
  1357. {
  1358. if (!device->set_vf_link_state)
  1359. return -ENOSYS;
  1360. return device->set_vf_link_state(device, vf, port, state);
  1361. }
  1362. EXPORT_SYMBOL(ib_set_vf_link_state);
  1363. int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
  1364. struct ifla_vf_info *info)
  1365. {
  1366. if (!device->get_vf_config)
  1367. return -ENOSYS;
  1368. return device->get_vf_config(device, vf, port, info);
  1369. }
  1370. EXPORT_SYMBOL(ib_get_vf_config);
  1371. int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
  1372. struct ifla_vf_stats *stats)
  1373. {
  1374. if (!device->get_vf_stats)
  1375. return -ENOSYS;
  1376. return device->get_vf_stats(device, vf, port, stats);
  1377. }
  1378. EXPORT_SYMBOL(ib_get_vf_stats);
  1379. int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
  1380. int type)
  1381. {
  1382. if (!device->set_vf_guid)
  1383. return -ENOSYS;
  1384. return device->set_vf_guid(device, vf, port, guid, type);
  1385. }
  1386. EXPORT_SYMBOL(ib_set_vf_guid);
  1387. /**
  1388. * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
  1389. * and set it the memory region.
  1390. * @mr: memory region
  1391. * @sg: dma mapped scatterlist
  1392. * @sg_nents: number of entries in sg
  1393. * @page_size: page vector desired page size
  1394. *
  1395. * Constraints:
  1396. * - The first sg element is allowed to have an offset.
  1397. * - Each sg element must be aligned to page_size (or physically
  1398. * contiguous to the previous element). In case an sg element has a
  1399. * non contiguous offset, the mapping prefix will not include it.
  1400. * - The last sg element is allowed to have length less than page_size.
  1401. * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
  1402. * then only max_num_sg entries will be mapped.
  1403. * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS_REG, non of these
  1404. * constraints holds and the page_size argument is ignored.
  1405. *
  1406. * Returns the number of sg elements that were mapped to the memory region.
  1407. *
  1408. * After this completes successfully, the memory region
  1409. * is ready for registration.
  1410. */
  1411. int ib_map_mr_sg(struct ib_mr *mr,
  1412. struct scatterlist *sg,
  1413. int sg_nents,
  1414. unsigned int page_size)
  1415. {
  1416. if (unlikely(!mr->device->map_mr_sg))
  1417. return -ENOSYS;
  1418. mr->page_size = page_size;
  1419. return mr->device->map_mr_sg(mr, sg, sg_nents);
  1420. }
  1421. EXPORT_SYMBOL(ib_map_mr_sg);
  1422. /**
  1423. * ib_sg_to_pages() - Convert the largest prefix of a sg list
  1424. * to a page vector
  1425. * @mr: memory region
  1426. * @sgl: dma mapped scatterlist
  1427. * @sg_nents: number of entries in sg
  1428. * @set_page: driver page assignment function pointer
  1429. *
  1430. * Core service helper for drivers to convert the largest
  1431. * prefix of given sg list to a page vector. The sg list
  1432. * prefix converted is the prefix that meet the requirements
  1433. * of ib_map_mr_sg.
  1434. *
  1435. * Returns the number of sg elements that were assigned to
  1436. * a page vector.
  1437. */
  1438. int ib_sg_to_pages(struct ib_mr *mr,
  1439. struct scatterlist *sgl,
  1440. int sg_nents,
  1441. int (*set_page)(struct ib_mr *, u64))
  1442. {
  1443. struct scatterlist *sg;
  1444. u64 last_end_dma_addr = 0;
  1445. unsigned int last_page_off = 0;
  1446. u64 page_mask = ~((u64)mr->page_size - 1);
  1447. int i, ret;
  1448. mr->iova = sg_dma_address(&sgl[0]);
  1449. mr->length = 0;
  1450. for_each_sg(sgl, sg, sg_nents, i) {
  1451. u64 dma_addr = sg_dma_address(sg);
  1452. unsigned int dma_len = sg_dma_len(sg);
  1453. u64 end_dma_addr = dma_addr + dma_len;
  1454. u64 page_addr = dma_addr & page_mask;
  1455. /*
  1456. * For the second and later elements, check whether either the
  1457. * end of element i-1 or the start of element i is not aligned
  1458. * on a page boundary.
  1459. */
  1460. if (i && (last_page_off != 0 || page_addr != dma_addr)) {
  1461. /* Stop mapping if there is a gap. */
  1462. if (last_end_dma_addr != dma_addr)
  1463. break;
  1464. /*
  1465. * Coalesce this element with the last. If it is small
  1466. * enough just update mr->length. Otherwise start
  1467. * mapping from the next page.
  1468. */
  1469. goto next_page;
  1470. }
  1471. do {
  1472. ret = set_page(mr, page_addr);
  1473. if (unlikely(ret < 0))
  1474. return i ? : ret;
  1475. next_page:
  1476. page_addr += mr->page_size;
  1477. } while (page_addr < end_dma_addr);
  1478. mr->length += dma_len;
  1479. last_end_dma_addr = end_dma_addr;
  1480. last_page_off = end_dma_addr & ~page_mask;
  1481. }
  1482. return i;
  1483. }
  1484. EXPORT_SYMBOL(ib_sg_to_pages);
  1485. struct ib_drain_cqe {
  1486. struct ib_cqe cqe;
  1487. struct completion done;
  1488. };
  1489. static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
  1490. {
  1491. struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
  1492. cqe);
  1493. complete(&cqe->done);
  1494. }
  1495. /*
  1496. * Post a WR and block until its completion is reaped for the SQ.
  1497. */
  1498. static void __ib_drain_sq(struct ib_qp *qp)
  1499. {
  1500. struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
  1501. struct ib_drain_cqe sdrain;
  1502. struct ib_send_wr swr = {}, *bad_swr;
  1503. int ret;
  1504. if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) {
  1505. WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT,
  1506. "IB_POLL_DIRECT poll_ctx not supported for drain\n");
  1507. return;
  1508. }
  1509. swr.wr_cqe = &sdrain.cqe;
  1510. sdrain.cqe.done = ib_drain_qp_done;
  1511. init_completion(&sdrain.done);
  1512. ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
  1513. if (ret) {
  1514. WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
  1515. return;
  1516. }
  1517. ret = ib_post_send(qp, &swr, &bad_swr);
  1518. if (ret) {
  1519. WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
  1520. return;
  1521. }
  1522. wait_for_completion(&sdrain.done);
  1523. }
  1524. /*
  1525. * Post a WR and block until its completion is reaped for the RQ.
  1526. */
  1527. static void __ib_drain_rq(struct ib_qp *qp)
  1528. {
  1529. struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
  1530. struct ib_drain_cqe rdrain;
  1531. struct ib_recv_wr rwr = {}, *bad_rwr;
  1532. int ret;
  1533. if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) {
  1534. WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT,
  1535. "IB_POLL_DIRECT poll_ctx not supported for drain\n");
  1536. return;
  1537. }
  1538. rwr.wr_cqe = &rdrain.cqe;
  1539. rdrain.cqe.done = ib_drain_qp_done;
  1540. init_completion(&rdrain.done);
  1541. ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
  1542. if (ret) {
  1543. WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
  1544. return;
  1545. }
  1546. ret = ib_post_recv(qp, &rwr, &bad_rwr);
  1547. if (ret) {
  1548. WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
  1549. return;
  1550. }
  1551. wait_for_completion(&rdrain.done);
  1552. }
  1553. /**
  1554. * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
  1555. * application.
  1556. * @qp: queue pair to drain
  1557. *
  1558. * If the device has a provider-specific drain function, then
  1559. * call that. Otherwise call the generic drain function
  1560. * __ib_drain_sq().
  1561. *
  1562. * The caller must:
  1563. *
  1564. * ensure there is room in the CQ and SQ for the drain work request and
  1565. * completion.
  1566. *
  1567. * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
  1568. * IB_POLL_DIRECT.
  1569. *
  1570. * ensure that there are no other contexts that are posting WRs concurrently.
  1571. * Otherwise the drain is not guaranteed.
  1572. */
  1573. void ib_drain_sq(struct ib_qp *qp)
  1574. {
  1575. if (qp->device->drain_sq)
  1576. qp->device->drain_sq(qp);
  1577. else
  1578. __ib_drain_sq(qp);
  1579. }
  1580. EXPORT_SYMBOL(ib_drain_sq);
  1581. /**
  1582. * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
  1583. * application.
  1584. * @qp: queue pair to drain
  1585. *
  1586. * If the device has a provider-specific drain function, then
  1587. * call that. Otherwise call the generic drain function
  1588. * __ib_drain_rq().
  1589. *
  1590. * The caller must:
  1591. *
  1592. * ensure there is room in the CQ and RQ for the drain work request and
  1593. * completion.
  1594. *
  1595. * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
  1596. * IB_POLL_DIRECT.
  1597. *
  1598. * ensure that there are no other contexts that are posting WRs concurrently.
  1599. * Otherwise the drain is not guaranteed.
  1600. */
  1601. void ib_drain_rq(struct ib_qp *qp)
  1602. {
  1603. if (qp->device->drain_rq)
  1604. qp->device->drain_rq(qp);
  1605. else
  1606. __ib_drain_rq(qp);
  1607. }
  1608. EXPORT_SYMBOL(ib_drain_rq);
  1609. /**
  1610. * ib_drain_qp() - Block until all CQEs have been consumed by the
  1611. * application on both the RQ and SQ.
  1612. * @qp: queue pair to drain
  1613. *
  1614. * The caller must:
  1615. *
  1616. * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
  1617. * and completions.
  1618. *
  1619. * allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be
  1620. * IB_POLL_DIRECT.
  1621. *
  1622. * ensure that there are no other contexts that are posting WRs concurrently.
  1623. * Otherwise the drain is not guaranteed.
  1624. */
  1625. void ib_drain_qp(struct ib_qp *qp)
  1626. {
  1627. ib_drain_sq(qp);
  1628. ib_drain_rq(qp);
  1629. }
  1630. EXPORT_SYMBOL(ib_drain_qp);