cma.c 93 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702
  1. /*
  2. * Copyright (c) 2005 Voltaire Inc. All rights reserved.
  3. * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
  4. * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
  5. * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. */
  35. #include <linux/completion.h>
  36. #include <linux/in.h>
  37. #include <linux/in6.h>
  38. #include <linux/mutex.h>
  39. #include <linux/random.h>
  40. #include <linux/idr.h>
  41. #include <linux/inetdevice.h>
  42. #include <linux/slab.h>
  43. #include <linux/module.h>
  44. #include <net/route.h>
  45. #include <net/tcp.h>
  46. #include <net/ipv6.h>
  47. #include <rdma/rdma_cm.h>
  48. #include <rdma/rdma_cm_ib.h>
  49. #include <rdma/rdma_netlink.h>
  50. #include <rdma/ib.h>
  51. #include <rdma/ib_cache.h>
  52. #include <rdma/ib_cm.h>
  53. #include <rdma/ib_sa.h>
  54. #include <rdma/iw_cm.h>
  55. MODULE_AUTHOR("Sean Hefty");
  56. MODULE_DESCRIPTION("Generic RDMA CM Agent");
  57. MODULE_LICENSE("Dual BSD/GPL");
  58. #define CMA_CM_RESPONSE_TIMEOUT 20
  59. #define CMA_MAX_CM_RETRIES 15
  60. #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
  61. #define CMA_IBOE_PACKET_LIFETIME 18
  62. static void cma_add_one(struct ib_device *device);
  63. static void cma_remove_one(struct ib_device *device);
  64. static struct ib_client cma_client = {
  65. .name = "cma",
  66. .add = cma_add_one,
  67. .remove = cma_remove_one
  68. };
  69. static struct ib_sa_client sa_client;
  70. static struct rdma_addr_client addr_client;
  71. static LIST_HEAD(dev_list);
  72. static LIST_HEAD(listen_any_list);
  73. static DEFINE_MUTEX(lock);
  74. static struct workqueue_struct *cma_wq;
  75. static DEFINE_IDR(tcp_ps);
  76. static DEFINE_IDR(udp_ps);
  77. static DEFINE_IDR(ipoib_ps);
  78. static DEFINE_IDR(ib_ps);
  79. struct cma_device {
  80. struct list_head list;
  81. struct ib_device *device;
  82. struct completion comp;
  83. atomic_t refcount;
  84. struct list_head id_list;
  85. };
  86. struct rdma_bind_list {
  87. struct idr *ps;
  88. struct hlist_head owners;
  89. unsigned short port;
  90. };
  91. enum {
  92. CMA_OPTION_AFONLY,
  93. };
  94. /*
  95. * Device removal can occur at anytime, so we need extra handling to
  96. * serialize notifying the user of device removal with other callbacks.
  97. * We do this by disabling removal notification while a callback is in process,
  98. * and reporting it after the callback completes.
  99. */
  100. struct rdma_id_private {
  101. struct rdma_cm_id id;
  102. struct rdma_bind_list *bind_list;
  103. struct hlist_node node;
  104. struct list_head list; /* listen_any_list or cma_device.list */
  105. struct list_head listen_list; /* per device listens */
  106. struct cma_device *cma_dev;
  107. struct list_head mc_list;
  108. int internal_id;
  109. enum rdma_cm_state state;
  110. spinlock_t lock;
  111. struct mutex qp_mutex;
  112. struct completion comp;
  113. atomic_t refcount;
  114. struct mutex handler_mutex;
  115. int backlog;
  116. int timeout_ms;
  117. struct ib_sa_query *query;
  118. int query_id;
  119. union {
  120. struct ib_cm_id *ib;
  121. struct iw_cm_id *iw;
  122. } cm_id;
  123. u32 seq_num;
  124. u32 qkey;
  125. u32 qp_num;
  126. pid_t owner;
  127. u32 options;
  128. u8 srq;
  129. u8 tos;
  130. u8 reuseaddr;
  131. u8 afonly;
  132. };
  133. struct cma_multicast {
  134. struct rdma_id_private *id_priv;
  135. union {
  136. struct ib_sa_multicast *ib;
  137. } multicast;
  138. struct list_head list;
  139. void *context;
  140. struct sockaddr_storage addr;
  141. struct kref mcref;
  142. };
  143. struct cma_work {
  144. struct work_struct work;
  145. struct rdma_id_private *id;
  146. enum rdma_cm_state old_state;
  147. enum rdma_cm_state new_state;
  148. struct rdma_cm_event event;
  149. };
  150. struct cma_ndev_work {
  151. struct work_struct work;
  152. struct rdma_id_private *id;
  153. struct rdma_cm_event event;
  154. };
  155. struct iboe_mcast_work {
  156. struct work_struct work;
  157. struct rdma_id_private *id;
  158. struct cma_multicast *mc;
  159. };
  160. union cma_ip_addr {
  161. struct in6_addr ip6;
  162. struct {
  163. __be32 pad[3];
  164. __be32 addr;
  165. } ip4;
  166. };
  167. struct cma_hdr {
  168. u8 cma_version;
  169. u8 ip_version; /* IP version: 7:4 */
  170. __be16 port;
  171. union cma_ip_addr src_addr;
  172. union cma_ip_addr dst_addr;
  173. };
  174. #define CMA_VERSION 0x00
  175. static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)
  176. {
  177. unsigned long flags;
  178. int ret;
  179. spin_lock_irqsave(&id_priv->lock, flags);
  180. ret = (id_priv->state == comp);
  181. spin_unlock_irqrestore(&id_priv->lock, flags);
  182. return ret;
  183. }
  184. static int cma_comp_exch(struct rdma_id_private *id_priv,
  185. enum rdma_cm_state comp, enum rdma_cm_state exch)
  186. {
  187. unsigned long flags;
  188. int ret;
  189. spin_lock_irqsave(&id_priv->lock, flags);
  190. if ((ret = (id_priv->state == comp)))
  191. id_priv->state = exch;
  192. spin_unlock_irqrestore(&id_priv->lock, flags);
  193. return ret;
  194. }
  195. static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv,
  196. enum rdma_cm_state exch)
  197. {
  198. unsigned long flags;
  199. enum rdma_cm_state old;
  200. spin_lock_irqsave(&id_priv->lock, flags);
  201. old = id_priv->state;
  202. id_priv->state = exch;
  203. spin_unlock_irqrestore(&id_priv->lock, flags);
  204. return old;
  205. }
  206. static inline u8 cma_get_ip_ver(struct cma_hdr *hdr)
  207. {
  208. return hdr->ip_version >> 4;
  209. }
  210. static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
  211. {
  212. hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
  213. }
  214. static void cma_attach_to_dev(struct rdma_id_private *id_priv,
  215. struct cma_device *cma_dev)
  216. {
  217. atomic_inc(&cma_dev->refcount);
  218. id_priv->cma_dev = cma_dev;
  219. id_priv->id.device = cma_dev->device;
  220. id_priv->id.route.addr.dev_addr.transport =
  221. rdma_node_get_transport(cma_dev->device->node_type);
  222. list_add_tail(&id_priv->list, &cma_dev->id_list);
  223. }
  224. static inline void cma_deref_dev(struct cma_device *cma_dev)
  225. {
  226. if (atomic_dec_and_test(&cma_dev->refcount))
  227. complete(&cma_dev->comp);
  228. }
  229. static inline void release_mc(struct kref *kref)
  230. {
  231. struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref);
  232. kfree(mc->multicast.ib);
  233. kfree(mc);
  234. }
  235. static void cma_release_dev(struct rdma_id_private *id_priv)
  236. {
  237. mutex_lock(&lock);
  238. list_del(&id_priv->list);
  239. cma_deref_dev(id_priv->cma_dev);
  240. id_priv->cma_dev = NULL;
  241. mutex_unlock(&lock);
  242. }
  243. static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
  244. {
  245. return (struct sockaddr *) &id_priv->id.route.addr.src_addr;
  246. }
  247. static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
  248. {
  249. return (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
  250. }
  251. static inline unsigned short cma_family(struct rdma_id_private *id_priv)
  252. {
  253. return id_priv->id.route.addr.src_addr.ss_family;
  254. }
  255. static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
  256. {
  257. struct ib_sa_mcmember_rec rec;
  258. int ret = 0;
  259. if (id_priv->qkey) {
  260. if (qkey && id_priv->qkey != qkey)
  261. return -EINVAL;
  262. return 0;
  263. }
  264. if (qkey) {
  265. id_priv->qkey = qkey;
  266. return 0;
  267. }
  268. switch (id_priv->id.ps) {
  269. case RDMA_PS_UDP:
  270. case RDMA_PS_IB:
  271. id_priv->qkey = RDMA_UDP_QKEY;
  272. break;
  273. case RDMA_PS_IPOIB:
  274. ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid);
  275. ret = ib_sa_get_mcmember_rec(id_priv->id.device,
  276. id_priv->id.port_num, &rec.mgid,
  277. &rec);
  278. if (!ret)
  279. id_priv->qkey = be32_to_cpu(rec.qkey);
  280. break;
  281. default:
  282. break;
  283. }
  284. return ret;
  285. }
  286. static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
  287. {
  288. dev_addr->dev_type = ARPHRD_INFINIBAND;
  289. rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr);
  290. ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey));
  291. }
  292. static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
  293. {
  294. int ret;
  295. if (addr->sa_family != AF_IB) {
  296. ret = rdma_translate_ip(addr, dev_addr, NULL);
  297. } else {
  298. cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
  299. ret = 0;
  300. }
  301. return ret;
  302. }
  303. static int cma_acquire_dev(struct rdma_id_private *id_priv,
  304. struct rdma_id_private *listen_id_priv)
  305. {
  306. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  307. struct cma_device *cma_dev;
  308. union ib_gid gid, iboe_gid;
  309. int ret = -ENODEV;
  310. u8 port, found_port;
  311. enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
  312. IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
  313. if (dev_ll != IB_LINK_LAYER_INFINIBAND &&
  314. id_priv->id.ps == RDMA_PS_IPOIB)
  315. return -EINVAL;
  316. mutex_lock(&lock);
  317. rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
  318. &iboe_gid);
  319. memcpy(&gid, dev_addr->src_dev_addr +
  320. rdma_addr_gid_offset(dev_addr), sizeof gid);
  321. if (listen_id_priv &&
  322. rdma_port_get_link_layer(listen_id_priv->id.device,
  323. listen_id_priv->id.port_num) == dev_ll) {
  324. cma_dev = listen_id_priv->cma_dev;
  325. port = listen_id_priv->id.port_num;
  326. if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
  327. rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
  328. ret = ib_find_cached_gid(cma_dev->device, &iboe_gid,
  329. &found_port, NULL);
  330. else
  331. ret = ib_find_cached_gid(cma_dev->device, &gid,
  332. &found_port, NULL);
  333. if (!ret && (port == found_port)) {
  334. id_priv->id.port_num = found_port;
  335. goto out;
  336. }
  337. }
  338. list_for_each_entry(cma_dev, &dev_list, list) {
  339. for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
  340. if (listen_id_priv &&
  341. listen_id_priv->cma_dev == cma_dev &&
  342. listen_id_priv->id.port_num == port)
  343. continue;
  344. if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) {
  345. if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
  346. rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
  347. ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, &found_port, NULL);
  348. else
  349. ret = ib_find_cached_gid(cma_dev->device, &gid, &found_port, NULL);
  350. if (!ret && (port == found_port)) {
  351. id_priv->id.port_num = found_port;
  352. goto out;
  353. }
  354. }
  355. }
  356. }
  357. out:
  358. if (!ret)
  359. cma_attach_to_dev(id_priv, cma_dev);
  360. mutex_unlock(&lock);
  361. return ret;
  362. }
  363. /*
  364. * Select the source IB device and address to reach the destination IB address.
  365. */
  366. static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
  367. {
  368. struct cma_device *cma_dev, *cur_dev;
  369. struct sockaddr_ib *addr;
  370. union ib_gid gid, sgid, *dgid;
  371. u16 pkey, index;
  372. u8 p;
  373. int i;
  374. cma_dev = NULL;
  375. addr = (struct sockaddr_ib *) cma_dst_addr(id_priv);
  376. dgid = (union ib_gid *) &addr->sib_addr;
  377. pkey = ntohs(addr->sib_pkey);
  378. list_for_each_entry(cur_dev, &dev_list, list) {
  379. if (rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
  380. continue;
  381. for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
  382. if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
  383. continue;
  384. for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i, &gid); i++) {
  385. if (!memcmp(&gid, dgid, sizeof(gid))) {
  386. cma_dev = cur_dev;
  387. sgid = gid;
  388. id_priv->id.port_num = p;
  389. goto found;
  390. }
  391. if (!cma_dev && (gid.global.subnet_prefix ==
  392. dgid->global.subnet_prefix)) {
  393. cma_dev = cur_dev;
  394. sgid = gid;
  395. id_priv->id.port_num = p;
  396. }
  397. }
  398. }
  399. }
  400. if (!cma_dev)
  401. return -ENODEV;
  402. found:
  403. cma_attach_to_dev(id_priv, cma_dev);
  404. addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
  405. memcpy(&addr->sib_addr, &sgid, sizeof sgid);
  406. cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
  407. return 0;
  408. }
  409. static void cma_deref_id(struct rdma_id_private *id_priv)
  410. {
  411. if (atomic_dec_and_test(&id_priv->refcount))
  412. complete(&id_priv->comp);
  413. }
  414. static int cma_disable_callback(struct rdma_id_private *id_priv,
  415. enum rdma_cm_state state)
  416. {
  417. mutex_lock(&id_priv->handler_mutex);
  418. if (id_priv->state != state) {
  419. mutex_unlock(&id_priv->handler_mutex);
  420. return -EINVAL;
  421. }
  422. return 0;
  423. }
  424. struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
  425. void *context, enum rdma_port_space ps,
  426. enum ib_qp_type qp_type)
  427. {
  428. struct rdma_id_private *id_priv;
  429. id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
  430. if (!id_priv)
  431. return ERR_PTR(-ENOMEM);
  432. id_priv->owner = task_pid_nr(current);
  433. id_priv->state = RDMA_CM_IDLE;
  434. id_priv->id.context = context;
  435. id_priv->id.event_handler = event_handler;
  436. id_priv->id.ps = ps;
  437. id_priv->id.qp_type = qp_type;
  438. spin_lock_init(&id_priv->lock);
  439. mutex_init(&id_priv->qp_mutex);
  440. init_completion(&id_priv->comp);
  441. atomic_set(&id_priv->refcount, 1);
  442. mutex_init(&id_priv->handler_mutex);
  443. INIT_LIST_HEAD(&id_priv->listen_list);
  444. INIT_LIST_HEAD(&id_priv->mc_list);
  445. get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
  446. return &id_priv->id;
  447. }
  448. EXPORT_SYMBOL(rdma_create_id);
  449. static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
  450. {
  451. struct ib_qp_attr qp_attr;
  452. int qp_attr_mask, ret;
  453. qp_attr.qp_state = IB_QPS_INIT;
  454. ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
  455. if (ret)
  456. return ret;
  457. ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
  458. if (ret)
  459. return ret;
  460. qp_attr.qp_state = IB_QPS_RTR;
  461. ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
  462. if (ret)
  463. return ret;
  464. qp_attr.qp_state = IB_QPS_RTS;
  465. qp_attr.sq_psn = 0;
  466. ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
  467. return ret;
  468. }
  469. static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
  470. {
  471. struct ib_qp_attr qp_attr;
  472. int qp_attr_mask, ret;
  473. qp_attr.qp_state = IB_QPS_INIT;
  474. ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
  475. if (ret)
  476. return ret;
  477. return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
  478. }
  479. int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
  480. struct ib_qp_init_attr *qp_init_attr)
  481. {
  482. struct rdma_id_private *id_priv;
  483. struct ib_qp *qp;
  484. int ret;
  485. id_priv = container_of(id, struct rdma_id_private, id);
  486. if (id->device != pd->device)
  487. return -EINVAL;
  488. qp = ib_create_qp(pd, qp_init_attr);
  489. if (IS_ERR(qp))
  490. return PTR_ERR(qp);
  491. if (id->qp_type == IB_QPT_UD)
  492. ret = cma_init_ud_qp(id_priv, qp);
  493. else
  494. ret = cma_init_conn_qp(id_priv, qp);
  495. if (ret)
  496. goto err;
  497. id->qp = qp;
  498. id_priv->qp_num = qp->qp_num;
  499. id_priv->srq = (qp->srq != NULL);
  500. return 0;
  501. err:
  502. ib_destroy_qp(qp);
  503. return ret;
  504. }
  505. EXPORT_SYMBOL(rdma_create_qp);
  506. void rdma_destroy_qp(struct rdma_cm_id *id)
  507. {
  508. struct rdma_id_private *id_priv;
  509. id_priv = container_of(id, struct rdma_id_private, id);
  510. mutex_lock(&id_priv->qp_mutex);
  511. ib_destroy_qp(id_priv->id.qp);
  512. id_priv->id.qp = NULL;
  513. mutex_unlock(&id_priv->qp_mutex);
  514. }
  515. EXPORT_SYMBOL(rdma_destroy_qp);
  516. static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
  517. struct rdma_conn_param *conn_param)
  518. {
  519. struct ib_qp_attr qp_attr;
  520. int qp_attr_mask, ret;
  521. union ib_gid sgid;
  522. mutex_lock(&id_priv->qp_mutex);
  523. if (!id_priv->id.qp) {
  524. ret = 0;
  525. goto out;
  526. }
  527. /* Need to update QP attributes from default values. */
  528. qp_attr.qp_state = IB_QPS_INIT;
  529. ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
  530. if (ret)
  531. goto out;
  532. ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
  533. if (ret)
  534. goto out;
  535. qp_attr.qp_state = IB_QPS_RTR;
  536. ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
  537. if (ret)
  538. goto out;
  539. ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num,
  540. qp_attr.ah_attr.grh.sgid_index, &sgid);
  541. if (ret)
  542. goto out;
  543. if (rdma_node_get_transport(id_priv->cma_dev->device->node_type)
  544. == RDMA_TRANSPORT_IB &&
  545. rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)
  546. == IB_LINK_LAYER_ETHERNET) {
  547. ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);
  548. if (ret)
  549. goto out;
  550. }
  551. if (conn_param)
  552. qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
  553. ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
  554. out:
  555. mutex_unlock(&id_priv->qp_mutex);
  556. return ret;
  557. }
  558. static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
  559. struct rdma_conn_param *conn_param)
  560. {
  561. struct ib_qp_attr qp_attr;
  562. int qp_attr_mask, ret;
  563. mutex_lock(&id_priv->qp_mutex);
  564. if (!id_priv->id.qp) {
  565. ret = 0;
  566. goto out;
  567. }
  568. qp_attr.qp_state = IB_QPS_RTS;
  569. ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
  570. if (ret)
  571. goto out;
  572. if (conn_param)
  573. qp_attr.max_rd_atomic = conn_param->initiator_depth;
  574. ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
  575. out:
  576. mutex_unlock(&id_priv->qp_mutex);
  577. return ret;
  578. }
  579. static int cma_modify_qp_err(struct rdma_id_private *id_priv)
  580. {
  581. struct ib_qp_attr qp_attr;
  582. int ret;
  583. mutex_lock(&id_priv->qp_mutex);
  584. if (!id_priv->id.qp) {
  585. ret = 0;
  586. goto out;
  587. }
  588. qp_attr.qp_state = IB_QPS_ERR;
  589. ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE);
  590. out:
  591. mutex_unlock(&id_priv->qp_mutex);
  592. return ret;
  593. }
  594. static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
  595. struct ib_qp_attr *qp_attr, int *qp_attr_mask)
  596. {
  597. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  598. int ret;
  599. u16 pkey;
  600. if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) ==
  601. IB_LINK_LAYER_INFINIBAND)
  602. pkey = ib_addr_get_pkey(dev_addr);
  603. else
  604. pkey = 0xffff;
  605. ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
  606. pkey, &qp_attr->pkey_index);
  607. if (ret)
  608. return ret;
  609. qp_attr->port_num = id_priv->id.port_num;
  610. *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
  611. if (id_priv->id.qp_type == IB_QPT_UD) {
  612. ret = cma_set_qkey(id_priv, 0);
  613. if (ret)
  614. return ret;
  615. qp_attr->qkey = id_priv->qkey;
  616. *qp_attr_mask |= IB_QP_QKEY;
  617. } else {
  618. qp_attr->qp_access_flags = 0;
  619. *qp_attr_mask |= IB_QP_ACCESS_FLAGS;
  620. }
  621. return 0;
  622. }
  623. int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
  624. int *qp_attr_mask)
  625. {
  626. struct rdma_id_private *id_priv;
  627. int ret = 0;
  628. id_priv = container_of(id, struct rdma_id_private, id);
  629. switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
  630. case RDMA_TRANSPORT_IB:
  631. if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
  632. ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
  633. else
  634. ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
  635. qp_attr_mask);
  636. if (qp_attr->qp_state == IB_QPS_RTR)
  637. qp_attr->rq_psn = id_priv->seq_num;
  638. break;
  639. case RDMA_TRANSPORT_IWARP:
  640. if (!id_priv->cm_id.iw) {
  641. qp_attr->qp_access_flags = 0;
  642. *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
  643. } else
  644. ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
  645. qp_attr_mask);
  646. break;
  647. default:
  648. ret = -ENOSYS;
  649. break;
  650. }
  651. return ret;
  652. }
  653. EXPORT_SYMBOL(rdma_init_qp_attr);
  654. static inline int cma_zero_addr(struct sockaddr *addr)
  655. {
  656. switch (addr->sa_family) {
  657. case AF_INET:
  658. return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr);
  659. case AF_INET6:
  660. return ipv6_addr_any(&((struct sockaddr_in6 *) addr)->sin6_addr);
  661. case AF_IB:
  662. return ib_addr_any(&((struct sockaddr_ib *) addr)->sib_addr);
  663. default:
  664. return 0;
  665. }
  666. }
  667. static inline int cma_loopback_addr(struct sockaddr *addr)
  668. {
  669. switch (addr->sa_family) {
  670. case AF_INET:
  671. return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr);
  672. case AF_INET6:
  673. return ipv6_addr_loopback(&((struct sockaddr_in6 *) addr)->sin6_addr);
  674. case AF_IB:
  675. return ib_addr_loopback(&((struct sockaddr_ib *) addr)->sib_addr);
  676. default:
  677. return 0;
  678. }
  679. }
  680. static inline int cma_any_addr(struct sockaddr *addr)
  681. {
  682. return cma_zero_addr(addr) || cma_loopback_addr(addr);
  683. }
  684. static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst)
  685. {
  686. if (src->sa_family != dst->sa_family)
  687. return -1;
  688. switch (src->sa_family) {
  689. case AF_INET:
  690. return ((struct sockaddr_in *) src)->sin_addr.s_addr !=
  691. ((struct sockaddr_in *) dst)->sin_addr.s_addr;
  692. case AF_INET6:
  693. return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr,
  694. &((struct sockaddr_in6 *) dst)->sin6_addr);
  695. default:
  696. return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr,
  697. &((struct sockaddr_ib *) dst)->sib_addr);
  698. }
  699. }
  700. static __be16 cma_port(struct sockaddr *addr)
  701. {
  702. struct sockaddr_ib *sib;
  703. switch (addr->sa_family) {
  704. case AF_INET:
  705. return ((struct sockaddr_in *) addr)->sin_port;
  706. case AF_INET6:
  707. return ((struct sockaddr_in6 *) addr)->sin6_port;
  708. case AF_IB:
  709. sib = (struct sockaddr_ib *) addr;
  710. return htons((u16) (be64_to_cpu(sib->sib_sid) &
  711. be64_to_cpu(sib->sib_sid_mask)));
  712. default:
  713. return 0;
  714. }
  715. }
  716. static inline int cma_any_port(struct sockaddr *addr)
  717. {
  718. return !cma_port(addr);
  719. }
  720. static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
  721. struct ib_sa_path_rec *path)
  722. {
  723. struct sockaddr_ib *listen_ib, *ib;
  724. listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr;
  725. ib = (struct sockaddr_ib *) &id->route.addr.src_addr;
  726. ib->sib_family = listen_ib->sib_family;
  727. ib->sib_pkey = path->pkey;
  728. ib->sib_flowinfo = path->flow_label;
  729. memcpy(&ib->sib_addr, &path->sgid, 16);
  730. ib->sib_sid = listen_ib->sib_sid;
  731. ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
  732. ib->sib_scope_id = listen_ib->sib_scope_id;
  733. ib = (struct sockaddr_ib *) &id->route.addr.dst_addr;
  734. ib->sib_family = listen_ib->sib_family;
  735. ib->sib_pkey = path->pkey;
  736. ib->sib_flowinfo = path->flow_label;
  737. memcpy(&ib->sib_addr, &path->dgid, 16);
  738. }
  739. static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
  740. struct cma_hdr *hdr)
  741. {
  742. struct sockaddr_in *listen4, *ip4;
  743. listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr;
  744. ip4 = (struct sockaddr_in *) &id->route.addr.src_addr;
  745. ip4->sin_family = listen4->sin_family;
  746. ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
  747. ip4->sin_port = listen4->sin_port;
  748. ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr;
  749. ip4->sin_family = listen4->sin_family;
  750. ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
  751. ip4->sin_port = hdr->port;
  752. }
  753. static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
  754. struct cma_hdr *hdr)
  755. {
  756. struct sockaddr_in6 *listen6, *ip6;
  757. listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr;
  758. ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr;
  759. ip6->sin6_family = listen6->sin6_family;
  760. ip6->sin6_addr = hdr->dst_addr.ip6;
  761. ip6->sin6_port = listen6->sin6_port;
  762. ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr;
  763. ip6->sin6_family = listen6->sin6_family;
  764. ip6->sin6_addr = hdr->src_addr.ip6;
  765. ip6->sin6_port = hdr->port;
  766. }
  767. static int cma_save_net_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
  768. struct ib_cm_event *ib_event)
  769. {
  770. struct cma_hdr *hdr;
  771. if ((listen_id->route.addr.src_addr.ss_family == AF_IB) &&
  772. (ib_event->event == IB_CM_REQ_RECEIVED)) {
  773. cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path);
  774. return 0;
  775. }
  776. hdr = ib_event->private_data;
  777. if (hdr->cma_version != CMA_VERSION)
  778. return -EINVAL;
  779. switch (cma_get_ip_ver(hdr)) {
  780. case 4:
  781. cma_save_ip4_info(id, listen_id, hdr);
  782. break;
  783. case 6:
  784. cma_save_ip6_info(id, listen_id, hdr);
  785. break;
  786. default:
  787. return -EINVAL;
  788. }
  789. return 0;
  790. }
  791. static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
  792. {
  793. return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
  794. }
  795. static void cma_cancel_route(struct rdma_id_private *id_priv)
  796. {
  797. switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) {
  798. case IB_LINK_LAYER_INFINIBAND:
  799. if (id_priv->query)
  800. ib_sa_cancel_query(id_priv->query_id, id_priv->query);
  801. break;
  802. default:
  803. break;
  804. }
  805. }
  806. static void cma_cancel_listens(struct rdma_id_private *id_priv)
  807. {
  808. struct rdma_id_private *dev_id_priv;
  809. /*
  810. * Remove from listen_any_list to prevent added devices from spawning
  811. * additional listen requests.
  812. */
  813. mutex_lock(&lock);
  814. list_del(&id_priv->list);
  815. while (!list_empty(&id_priv->listen_list)) {
  816. dev_id_priv = list_entry(id_priv->listen_list.next,
  817. struct rdma_id_private, listen_list);
  818. /* sync with device removal to avoid duplicate destruction */
  819. list_del_init(&dev_id_priv->list);
  820. list_del(&dev_id_priv->listen_list);
  821. mutex_unlock(&lock);
  822. rdma_destroy_id(&dev_id_priv->id);
  823. mutex_lock(&lock);
  824. }
  825. mutex_unlock(&lock);
  826. }
  827. static void cma_cancel_operation(struct rdma_id_private *id_priv,
  828. enum rdma_cm_state state)
  829. {
  830. switch (state) {
  831. case RDMA_CM_ADDR_QUERY:
  832. rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
  833. break;
  834. case RDMA_CM_ROUTE_QUERY:
  835. cma_cancel_route(id_priv);
  836. break;
  837. case RDMA_CM_LISTEN:
  838. if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
  839. cma_cancel_listens(id_priv);
  840. break;
  841. default:
  842. break;
  843. }
  844. }
  845. static void cma_release_port(struct rdma_id_private *id_priv)
  846. {
  847. struct rdma_bind_list *bind_list = id_priv->bind_list;
  848. if (!bind_list)
  849. return;
  850. mutex_lock(&lock);
  851. hlist_del(&id_priv->node);
  852. if (hlist_empty(&bind_list->owners)) {
  853. idr_remove(bind_list->ps, bind_list->port);
  854. kfree(bind_list);
  855. }
  856. mutex_unlock(&lock);
  857. }
  858. static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
  859. {
  860. struct cma_multicast *mc;
  861. while (!list_empty(&id_priv->mc_list)) {
  862. mc = container_of(id_priv->mc_list.next,
  863. struct cma_multicast, list);
  864. list_del(&mc->list);
  865. switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) {
  866. case IB_LINK_LAYER_INFINIBAND:
  867. ib_sa_free_multicast(mc->multicast.ib);
  868. kfree(mc);
  869. break;
  870. case IB_LINK_LAYER_ETHERNET:
  871. kref_put(&mc->mcref, release_mc);
  872. break;
  873. default:
  874. break;
  875. }
  876. }
  877. }
  878. void rdma_destroy_id(struct rdma_cm_id *id)
  879. {
  880. struct rdma_id_private *id_priv;
  881. enum rdma_cm_state state;
  882. id_priv = container_of(id, struct rdma_id_private, id);
  883. state = cma_exch(id_priv, RDMA_CM_DESTROYING);
  884. cma_cancel_operation(id_priv, state);
  885. /*
  886. * Wait for any active callback to finish. New callbacks will find
  887. * the id_priv state set to destroying and abort.
  888. */
  889. mutex_lock(&id_priv->handler_mutex);
  890. mutex_unlock(&id_priv->handler_mutex);
  891. if (id_priv->cma_dev) {
  892. switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
  893. case RDMA_TRANSPORT_IB:
  894. if (id_priv->cm_id.ib)
  895. ib_destroy_cm_id(id_priv->cm_id.ib);
  896. break;
  897. case RDMA_TRANSPORT_IWARP:
  898. if (id_priv->cm_id.iw)
  899. iw_destroy_cm_id(id_priv->cm_id.iw);
  900. break;
  901. default:
  902. break;
  903. }
  904. cma_leave_mc_groups(id_priv);
  905. cma_release_dev(id_priv);
  906. }
  907. cma_release_port(id_priv);
  908. cma_deref_id(id_priv);
  909. wait_for_completion(&id_priv->comp);
  910. if (id_priv->internal_id)
  911. cma_deref_id(id_priv->id.context);
  912. kfree(id_priv->id.route.path_rec);
  913. kfree(id_priv);
  914. }
  915. EXPORT_SYMBOL(rdma_destroy_id);
  916. static int cma_rep_recv(struct rdma_id_private *id_priv)
  917. {
  918. int ret;
  919. ret = cma_modify_qp_rtr(id_priv, NULL);
  920. if (ret)
  921. goto reject;
  922. ret = cma_modify_qp_rts(id_priv, NULL);
  923. if (ret)
  924. goto reject;
  925. ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
  926. if (ret)
  927. goto reject;
  928. return 0;
  929. reject:
  930. cma_modify_qp_err(id_priv);
  931. ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
  932. NULL, 0, NULL, 0);
  933. return ret;
  934. }
  935. static void cma_set_rep_event_data(struct rdma_cm_event *event,
  936. struct ib_cm_rep_event_param *rep_data,
  937. void *private_data)
  938. {
  939. event->param.conn.private_data = private_data;
  940. event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
  941. event->param.conn.responder_resources = rep_data->responder_resources;
  942. event->param.conn.initiator_depth = rep_data->initiator_depth;
  943. event->param.conn.flow_control = rep_data->flow_control;
  944. event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
  945. event->param.conn.srq = rep_data->srq;
  946. event->param.conn.qp_num = rep_data->remote_qpn;
  947. }
  948. static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
  949. {
  950. struct rdma_id_private *id_priv = cm_id->context;
  951. struct rdma_cm_event event;
  952. int ret = 0;
  953. if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
  954. cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
  955. (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
  956. cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
  957. return 0;
  958. memset(&event, 0, sizeof event);
  959. switch (ib_event->event) {
  960. case IB_CM_REQ_ERROR:
  961. case IB_CM_REP_ERROR:
  962. event.event = RDMA_CM_EVENT_UNREACHABLE;
  963. event.status = -ETIMEDOUT;
  964. break;
  965. case IB_CM_REP_RECEIVED:
  966. if (id_priv->id.qp) {
  967. event.status = cma_rep_recv(id_priv);
  968. event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
  969. RDMA_CM_EVENT_ESTABLISHED;
  970. } else {
  971. event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
  972. }
  973. cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
  974. ib_event->private_data);
  975. break;
  976. case IB_CM_RTU_RECEIVED:
  977. case IB_CM_USER_ESTABLISHED:
  978. event.event = RDMA_CM_EVENT_ESTABLISHED;
  979. break;
  980. case IB_CM_DREQ_ERROR:
  981. event.status = -ETIMEDOUT; /* fall through */
  982. case IB_CM_DREQ_RECEIVED:
  983. case IB_CM_DREP_RECEIVED:
  984. if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
  985. RDMA_CM_DISCONNECT))
  986. goto out;
  987. event.event = RDMA_CM_EVENT_DISCONNECTED;
  988. break;
  989. case IB_CM_TIMEWAIT_EXIT:
  990. event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
  991. break;
  992. case IB_CM_MRA_RECEIVED:
  993. /* ignore event */
  994. goto out;
  995. case IB_CM_REJ_RECEIVED:
  996. cma_modify_qp_err(id_priv);
  997. event.status = ib_event->param.rej_rcvd.reason;
  998. event.event = RDMA_CM_EVENT_REJECTED;
  999. event.param.conn.private_data = ib_event->private_data;
  1000. event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
  1001. break;
  1002. default:
  1003. printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
  1004. ib_event->event);
  1005. goto out;
  1006. }
  1007. ret = id_priv->id.event_handler(&id_priv->id, &event);
  1008. if (ret) {
  1009. /* Destroy the CM ID by returning a non-zero value. */
  1010. id_priv->cm_id.ib = NULL;
  1011. cma_exch(id_priv, RDMA_CM_DESTROYING);
  1012. mutex_unlock(&id_priv->handler_mutex);
  1013. rdma_destroy_id(&id_priv->id);
  1014. return ret;
  1015. }
  1016. out:
  1017. mutex_unlock(&id_priv->handler_mutex);
  1018. return ret;
  1019. }
  1020. static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
  1021. struct ib_cm_event *ib_event)
  1022. {
  1023. struct rdma_id_private *id_priv;
  1024. struct rdma_cm_id *id;
  1025. struct rdma_route *rt;
  1026. int ret;
  1027. id = rdma_create_id(listen_id->event_handler, listen_id->context,
  1028. listen_id->ps, ib_event->param.req_rcvd.qp_type);
  1029. if (IS_ERR(id))
  1030. return NULL;
  1031. id_priv = container_of(id, struct rdma_id_private, id);
  1032. if (cma_save_net_info(id, listen_id, ib_event))
  1033. goto err;
  1034. rt = &id->route;
  1035. rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
  1036. rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
  1037. GFP_KERNEL);
  1038. if (!rt->path_rec)
  1039. goto err;
  1040. rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
  1041. if (rt->num_paths == 2)
  1042. rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
  1043. if (cma_any_addr(cma_src_addr(id_priv))) {
  1044. rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
  1045. rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
  1046. ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
  1047. } else {
  1048. ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr);
  1049. if (ret)
  1050. goto err;
  1051. }
  1052. rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
  1053. id_priv->state = RDMA_CM_CONNECT;
  1054. return id_priv;
  1055. err:
  1056. rdma_destroy_id(id);
  1057. return NULL;
  1058. }
  1059. static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
  1060. struct ib_cm_event *ib_event)
  1061. {
  1062. struct rdma_id_private *id_priv;
  1063. struct rdma_cm_id *id;
  1064. int ret;
  1065. id = rdma_create_id(listen_id->event_handler, listen_id->context,
  1066. listen_id->ps, IB_QPT_UD);
  1067. if (IS_ERR(id))
  1068. return NULL;
  1069. id_priv = container_of(id, struct rdma_id_private, id);
  1070. if (cma_save_net_info(id, listen_id, ib_event))
  1071. goto err;
  1072. if (!cma_any_addr((struct sockaddr *) &id->route.addr.src_addr)) {
  1073. ret = cma_translate_addr(cma_src_addr(id_priv), &id->route.addr.dev_addr);
  1074. if (ret)
  1075. goto err;
  1076. }
  1077. id_priv->state = RDMA_CM_CONNECT;
  1078. return id_priv;
  1079. err:
  1080. rdma_destroy_id(id);
  1081. return NULL;
  1082. }
  1083. static void cma_set_req_event_data(struct rdma_cm_event *event,
  1084. struct ib_cm_req_event_param *req_data,
  1085. void *private_data, int offset)
  1086. {
  1087. event->param.conn.private_data = private_data + offset;
  1088. event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
  1089. event->param.conn.responder_resources = req_data->responder_resources;
  1090. event->param.conn.initiator_depth = req_data->initiator_depth;
  1091. event->param.conn.flow_control = req_data->flow_control;
  1092. event->param.conn.retry_count = req_data->retry_count;
  1093. event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
  1094. event->param.conn.srq = req_data->srq;
  1095. event->param.conn.qp_num = req_data->remote_qpn;
  1096. }
  1097. static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
  1098. {
  1099. return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
  1100. (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
  1101. ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
  1102. (id->qp_type == IB_QPT_UD)) ||
  1103. (!id->qp_type));
  1104. }
  1105. static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
  1106. {
  1107. struct rdma_id_private *listen_id, *conn_id;
  1108. struct rdma_cm_event event;
  1109. int offset, ret;
  1110. listen_id = cm_id->context;
  1111. if (!cma_check_req_qp_type(&listen_id->id, ib_event))
  1112. return -EINVAL;
  1113. if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
  1114. return -ECONNABORTED;
  1115. memset(&event, 0, sizeof event);
  1116. offset = cma_user_data_offset(listen_id);
  1117. event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
  1118. if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
  1119. conn_id = cma_new_udp_id(&listen_id->id, ib_event);
  1120. event.param.ud.private_data = ib_event->private_data + offset;
  1121. event.param.ud.private_data_len =
  1122. IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
  1123. } else {
  1124. conn_id = cma_new_conn_id(&listen_id->id, ib_event);
  1125. cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
  1126. ib_event->private_data, offset);
  1127. }
  1128. if (!conn_id) {
  1129. ret = -ENOMEM;
  1130. goto err1;
  1131. }
  1132. mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
  1133. ret = cma_acquire_dev(conn_id, listen_id);
  1134. if (ret)
  1135. goto err2;
  1136. conn_id->cm_id.ib = cm_id;
  1137. cm_id->context = conn_id;
  1138. cm_id->cm_handler = cma_ib_handler;
  1139. /*
  1140. * Protect against the user destroying conn_id from another thread
  1141. * until we're done accessing it.
  1142. */
  1143. atomic_inc(&conn_id->refcount);
  1144. ret = conn_id->id.event_handler(&conn_id->id, &event);
  1145. if (ret)
  1146. goto err3;
  1147. /*
  1148. * Acquire mutex to prevent user executing rdma_destroy_id()
  1149. * while we're accessing the cm_id.
  1150. */
  1151. mutex_lock(&lock);
  1152. if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
  1153. (conn_id->id.qp_type != IB_QPT_UD))
  1154. ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
  1155. mutex_unlock(&lock);
  1156. mutex_unlock(&conn_id->handler_mutex);
  1157. mutex_unlock(&listen_id->handler_mutex);
  1158. cma_deref_id(conn_id);
  1159. return 0;
  1160. err3:
  1161. cma_deref_id(conn_id);
  1162. /* Destroy the CM ID by returning a non-zero value. */
  1163. conn_id->cm_id.ib = NULL;
  1164. err2:
  1165. cma_exch(conn_id, RDMA_CM_DESTROYING);
  1166. mutex_unlock(&conn_id->handler_mutex);
  1167. err1:
  1168. mutex_unlock(&listen_id->handler_mutex);
  1169. if (conn_id)
  1170. rdma_destroy_id(&conn_id->id);
  1171. return ret;
  1172. }
  1173. __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr)
  1174. {
  1175. if (addr->sa_family == AF_IB)
  1176. return ((struct sockaddr_ib *) addr)->sib_sid;
  1177. return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr)));
  1178. }
  1179. EXPORT_SYMBOL(rdma_get_service_id);
  1180. static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
  1181. struct ib_cm_compare_data *compare)
  1182. {
  1183. struct cma_hdr *cma_data, *cma_mask;
  1184. __be32 ip4_addr;
  1185. struct in6_addr ip6_addr;
  1186. memset(compare, 0, sizeof *compare);
  1187. cma_data = (void *) compare->data;
  1188. cma_mask = (void *) compare->mask;
  1189. switch (addr->sa_family) {
  1190. case AF_INET:
  1191. ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr;
  1192. cma_set_ip_ver(cma_data, 4);
  1193. cma_set_ip_ver(cma_mask, 0xF);
  1194. if (!cma_any_addr(addr)) {
  1195. cma_data->dst_addr.ip4.addr = ip4_addr;
  1196. cma_mask->dst_addr.ip4.addr = htonl(~0);
  1197. }
  1198. break;
  1199. case AF_INET6:
  1200. ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr;
  1201. cma_set_ip_ver(cma_data, 6);
  1202. cma_set_ip_ver(cma_mask, 0xF);
  1203. if (!cma_any_addr(addr)) {
  1204. cma_data->dst_addr.ip6 = ip6_addr;
  1205. memset(&cma_mask->dst_addr.ip6, 0xFF,
  1206. sizeof cma_mask->dst_addr.ip6);
  1207. }
  1208. break;
  1209. default:
  1210. break;
  1211. }
  1212. }
  1213. static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
  1214. {
  1215. struct rdma_id_private *id_priv = iw_id->context;
  1216. struct rdma_cm_event event;
  1217. int ret = 0;
  1218. struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
  1219. struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
  1220. if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
  1221. return 0;
  1222. memset(&event, 0, sizeof event);
  1223. switch (iw_event->event) {
  1224. case IW_CM_EVENT_CLOSE:
  1225. event.event = RDMA_CM_EVENT_DISCONNECTED;
  1226. break;
  1227. case IW_CM_EVENT_CONNECT_REPLY:
  1228. memcpy(cma_src_addr(id_priv), laddr,
  1229. rdma_addr_size(laddr));
  1230. memcpy(cma_dst_addr(id_priv), raddr,
  1231. rdma_addr_size(raddr));
  1232. switch (iw_event->status) {
  1233. case 0:
  1234. event.event = RDMA_CM_EVENT_ESTABLISHED;
  1235. event.param.conn.initiator_depth = iw_event->ird;
  1236. event.param.conn.responder_resources = iw_event->ord;
  1237. break;
  1238. case -ECONNRESET:
  1239. case -ECONNREFUSED:
  1240. event.event = RDMA_CM_EVENT_REJECTED;
  1241. break;
  1242. case -ETIMEDOUT:
  1243. event.event = RDMA_CM_EVENT_UNREACHABLE;
  1244. break;
  1245. default:
  1246. event.event = RDMA_CM_EVENT_CONNECT_ERROR;
  1247. break;
  1248. }
  1249. break;
  1250. case IW_CM_EVENT_ESTABLISHED:
  1251. event.event = RDMA_CM_EVENT_ESTABLISHED;
  1252. event.param.conn.initiator_depth = iw_event->ird;
  1253. event.param.conn.responder_resources = iw_event->ord;
  1254. break;
  1255. default:
  1256. BUG_ON(1);
  1257. }
  1258. event.status = iw_event->status;
  1259. event.param.conn.private_data = iw_event->private_data;
  1260. event.param.conn.private_data_len = iw_event->private_data_len;
  1261. ret = id_priv->id.event_handler(&id_priv->id, &event);
  1262. if (ret) {
  1263. /* Destroy the CM ID by returning a non-zero value. */
  1264. id_priv->cm_id.iw = NULL;
  1265. cma_exch(id_priv, RDMA_CM_DESTROYING);
  1266. mutex_unlock(&id_priv->handler_mutex);
  1267. rdma_destroy_id(&id_priv->id);
  1268. return ret;
  1269. }
  1270. mutex_unlock(&id_priv->handler_mutex);
  1271. return ret;
  1272. }
  1273. static int iw_conn_req_handler(struct iw_cm_id *cm_id,
  1274. struct iw_cm_event *iw_event)
  1275. {
  1276. struct rdma_cm_id *new_cm_id;
  1277. struct rdma_id_private *listen_id, *conn_id;
  1278. struct rdma_cm_event event;
  1279. int ret;
  1280. struct ib_device_attr attr;
  1281. struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
  1282. struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
  1283. listen_id = cm_id->context;
  1284. if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
  1285. return -ECONNABORTED;
  1286. /* Create a new RDMA id for the new IW CM ID */
  1287. new_cm_id = rdma_create_id(listen_id->id.event_handler,
  1288. listen_id->id.context,
  1289. RDMA_PS_TCP, IB_QPT_RC);
  1290. if (IS_ERR(new_cm_id)) {
  1291. ret = -ENOMEM;
  1292. goto out;
  1293. }
  1294. conn_id = container_of(new_cm_id, struct rdma_id_private, id);
  1295. mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
  1296. conn_id->state = RDMA_CM_CONNECT;
  1297. ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL);
  1298. if (ret) {
  1299. mutex_unlock(&conn_id->handler_mutex);
  1300. rdma_destroy_id(new_cm_id);
  1301. goto out;
  1302. }
  1303. ret = cma_acquire_dev(conn_id, listen_id);
  1304. if (ret) {
  1305. mutex_unlock(&conn_id->handler_mutex);
  1306. rdma_destroy_id(new_cm_id);
  1307. goto out;
  1308. }
  1309. conn_id->cm_id.iw = cm_id;
  1310. cm_id->context = conn_id;
  1311. cm_id->cm_handler = cma_iw_handler;
  1312. memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
  1313. memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
  1314. ret = ib_query_device(conn_id->id.device, &attr);
  1315. if (ret) {
  1316. mutex_unlock(&conn_id->handler_mutex);
  1317. rdma_destroy_id(new_cm_id);
  1318. goto out;
  1319. }
  1320. memset(&event, 0, sizeof event);
  1321. event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
  1322. event.param.conn.private_data = iw_event->private_data;
  1323. event.param.conn.private_data_len = iw_event->private_data_len;
  1324. event.param.conn.initiator_depth = iw_event->ird;
  1325. event.param.conn.responder_resources = iw_event->ord;
  1326. /*
  1327. * Protect against the user destroying conn_id from another thread
  1328. * until we're done accessing it.
  1329. */
  1330. atomic_inc(&conn_id->refcount);
  1331. ret = conn_id->id.event_handler(&conn_id->id, &event);
  1332. if (ret) {
  1333. /* User wants to destroy the CM ID */
  1334. conn_id->cm_id.iw = NULL;
  1335. cma_exch(conn_id, RDMA_CM_DESTROYING);
  1336. mutex_unlock(&conn_id->handler_mutex);
  1337. cma_deref_id(conn_id);
  1338. rdma_destroy_id(&conn_id->id);
  1339. goto out;
  1340. }
  1341. mutex_unlock(&conn_id->handler_mutex);
  1342. cma_deref_id(conn_id);
  1343. out:
  1344. mutex_unlock(&listen_id->handler_mutex);
  1345. return ret;
  1346. }
  1347. static int cma_ib_listen(struct rdma_id_private *id_priv)
  1348. {
  1349. struct ib_cm_compare_data compare_data;
  1350. struct sockaddr *addr;
  1351. struct ib_cm_id *id;
  1352. __be64 svc_id;
  1353. int ret;
  1354. id = ib_create_cm_id(id_priv->id.device, cma_req_handler, id_priv);
  1355. if (IS_ERR(id))
  1356. return PTR_ERR(id);
  1357. id_priv->cm_id.ib = id;
  1358. addr = cma_src_addr(id_priv);
  1359. svc_id = rdma_get_service_id(&id_priv->id, addr);
  1360. if (cma_any_addr(addr) && !id_priv->afonly)
  1361. ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
  1362. else {
  1363. cma_set_compare_data(id_priv->id.ps, addr, &compare_data);
  1364. ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data);
  1365. }
  1366. if (ret) {
  1367. ib_destroy_cm_id(id_priv->cm_id.ib);
  1368. id_priv->cm_id.ib = NULL;
  1369. }
  1370. return ret;
  1371. }
  1372. static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
  1373. {
  1374. int ret;
  1375. struct iw_cm_id *id;
  1376. id = iw_create_cm_id(id_priv->id.device,
  1377. iw_conn_req_handler,
  1378. id_priv);
  1379. if (IS_ERR(id))
  1380. return PTR_ERR(id);
  1381. id_priv->cm_id.iw = id;
  1382. memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
  1383. rdma_addr_size(cma_src_addr(id_priv)));
  1384. ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
  1385. if (ret) {
  1386. iw_destroy_cm_id(id_priv->cm_id.iw);
  1387. id_priv->cm_id.iw = NULL;
  1388. }
  1389. return ret;
  1390. }
  1391. static int cma_listen_handler(struct rdma_cm_id *id,
  1392. struct rdma_cm_event *event)
  1393. {
  1394. struct rdma_id_private *id_priv = id->context;
  1395. id->context = id_priv->id.context;
  1396. id->event_handler = id_priv->id.event_handler;
  1397. return id_priv->id.event_handler(id, event);
  1398. }
  1399. static void cma_listen_on_dev(struct rdma_id_private *id_priv,
  1400. struct cma_device *cma_dev)
  1401. {
  1402. struct rdma_id_private *dev_id_priv;
  1403. struct rdma_cm_id *id;
  1404. int ret;
  1405. if (cma_family(id_priv) == AF_IB &&
  1406. rdma_node_get_transport(cma_dev->device->node_type) != RDMA_TRANSPORT_IB)
  1407. return;
  1408. id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
  1409. id_priv->id.qp_type);
  1410. if (IS_ERR(id))
  1411. return;
  1412. dev_id_priv = container_of(id, struct rdma_id_private, id);
  1413. dev_id_priv->state = RDMA_CM_ADDR_BOUND;
  1414. memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv),
  1415. rdma_addr_size(cma_src_addr(id_priv)));
  1416. cma_attach_to_dev(dev_id_priv, cma_dev);
  1417. list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
  1418. atomic_inc(&id_priv->refcount);
  1419. dev_id_priv->internal_id = 1;
  1420. dev_id_priv->afonly = id_priv->afonly;
  1421. ret = rdma_listen(id, id_priv->backlog);
  1422. if (ret)
  1423. printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, "
  1424. "listening on device %s\n", ret, cma_dev->device->name);
  1425. }
  1426. static void cma_listen_on_all(struct rdma_id_private *id_priv)
  1427. {
  1428. struct cma_device *cma_dev;
  1429. mutex_lock(&lock);
  1430. list_add_tail(&id_priv->list, &listen_any_list);
  1431. list_for_each_entry(cma_dev, &dev_list, list)
  1432. cma_listen_on_dev(id_priv, cma_dev);
  1433. mutex_unlock(&lock);
  1434. }
  1435. void rdma_set_service_type(struct rdma_cm_id *id, int tos)
  1436. {
  1437. struct rdma_id_private *id_priv;
  1438. id_priv = container_of(id, struct rdma_id_private, id);
  1439. id_priv->tos = (u8) tos;
  1440. }
  1441. EXPORT_SYMBOL(rdma_set_service_type);
  1442. static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
  1443. void *context)
  1444. {
  1445. struct cma_work *work = context;
  1446. struct rdma_route *route;
  1447. route = &work->id->id.route;
  1448. if (!status) {
  1449. route->num_paths = 1;
  1450. *route->path_rec = *path_rec;
  1451. } else {
  1452. work->old_state = RDMA_CM_ROUTE_QUERY;
  1453. work->new_state = RDMA_CM_ADDR_RESOLVED;
  1454. work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
  1455. work->event.status = status;
  1456. }
  1457. queue_work(cma_wq, &work->work);
  1458. }
  1459. static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
  1460. struct cma_work *work)
  1461. {
  1462. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  1463. struct ib_sa_path_rec path_rec;
  1464. ib_sa_comp_mask comp_mask;
  1465. struct sockaddr_in6 *sin6;
  1466. struct sockaddr_ib *sib;
  1467. memset(&path_rec, 0, sizeof path_rec);
  1468. rdma_addr_get_sgid(dev_addr, &path_rec.sgid);
  1469. rdma_addr_get_dgid(dev_addr, &path_rec.dgid);
  1470. path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
  1471. path_rec.numb_path = 1;
  1472. path_rec.reversible = 1;
  1473. path_rec.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
  1474. comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
  1475. IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
  1476. IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
  1477. switch (cma_family(id_priv)) {
  1478. case AF_INET:
  1479. path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
  1480. comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
  1481. break;
  1482. case AF_INET6:
  1483. sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
  1484. path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);
  1485. comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
  1486. break;
  1487. case AF_IB:
  1488. sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
  1489. path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20);
  1490. comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
  1491. break;
  1492. }
  1493. id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
  1494. id_priv->id.port_num, &path_rec,
  1495. comp_mask, timeout_ms,
  1496. GFP_KERNEL, cma_query_handler,
  1497. work, &id_priv->query);
  1498. return (id_priv->query_id < 0) ? id_priv->query_id : 0;
  1499. }
  1500. static void cma_work_handler(struct work_struct *_work)
  1501. {
  1502. struct cma_work *work = container_of(_work, struct cma_work, work);
  1503. struct rdma_id_private *id_priv = work->id;
  1504. int destroy = 0;
  1505. mutex_lock(&id_priv->handler_mutex);
  1506. if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
  1507. goto out;
  1508. if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
  1509. cma_exch(id_priv, RDMA_CM_DESTROYING);
  1510. destroy = 1;
  1511. }
  1512. out:
  1513. mutex_unlock(&id_priv->handler_mutex);
  1514. cma_deref_id(id_priv);
  1515. if (destroy)
  1516. rdma_destroy_id(&id_priv->id);
  1517. kfree(work);
  1518. }
  1519. static void cma_ndev_work_handler(struct work_struct *_work)
  1520. {
  1521. struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work);
  1522. struct rdma_id_private *id_priv = work->id;
  1523. int destroy = 0;
  1524. mutex_lock(&id_priv->handler_mutex);
  1525. if (id_priv->state == RDMA_CM_DESTROYING ||
  1526. id_priv->state == RDMA_CM_DEVICE_REMOVAL)
  1527. goto out;
  1528. if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
  1529. cma_exch(id_priv, RDMA_CM_DESTROYING);
  1530. destroy = 1;
  1531. }
  1532. out:
  1533. mutex_unlock(&id_priv->handler_mutex);
  1534. cma_deref_id(id_priv);
  1535. if (destroy)
  1536. rdma_destroy_id(&id_priv->id);
  1537. kfree(work);
  1538. }
  1539. static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
  1540. {
  1541. struct rdma_route *route = &id_priv->id.route;
  1542. struct cma_work *work;
  1543. int ret;
  1544. work = kzalloc(sizeof *work, GFP_KERNEL);
  1545. if (!work)
  1546. return -ENOMEM;
  1547. work->id = id_priv;
  1548. INIT_WORK(&work->work, cma_work_handler);
  1549. work->old_state = RDMA_CM_ROUTE_QUERY;
  1550. work->new_state = RDMA_CM_ROUTE_RESOLVED;
  1551. work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
  1552. route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
  1553. if (!route->path_rec) {
  1554. ret = -ENOMEM;
  1555. goto err1;
  1556. }
  1557. ret = cma_query_ib_route(id_priv, timeout_ms, work);
  1558. if (ret)
  1559. goto err2;
  1560. return 0;
  1561. err2:
  1562. kfree(route->path_rec);
  1563. route->path_rec = NULL;
  1564. err1:
  1565. kfree(work);
  1566. return ret;
  1567. }
  1568. int rdma_set_ib_paths(struct rdma_cm_id *id,
  1569. struct ib_sa_path_rec *path_rec, int num_paths)
  1570. {
  1571. struct rdma_id_private *id_priv;
  1572. int ret;
  1573. id_priv = container_of(id, struct rdma_id_private, id);
  1574. if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
  1575. RDMA_CM_ROUTE_RESOLVED))
  1576. return -EINVAL;
  1577. id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
  1578. GFP_KERNEL);
  1579. if (!id->route.path_rec) {
  1580. ret = -ENOMEM;
  1581. goto err;
  1582. }
  1583. id->route.num_paths = num_paths;
  1584. return 0;
  1585. err:
  1586. cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
  1587. return ret;
  1588. }
  1589. EXPORT_SYMBOL(rdma_set_ib_paths);
  1590. static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
  1591. {
  1592. struct cma_work *work;
  1593. work = kzalloc(sizeof *work, GFP_KERNEL);
  1594. if (!work)
  1595. return -ENOMEM;
  1596. work->id = id_priv;
  1597. INIT_WORK(&work->work, cma_work_handler);
  1598. work->old_state = RDMA_CM_ROUTE_QUERY;
  1599. work->new_state = RDMA_CM_ROUTE_RESOLVED;
  1600. work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
  1601. queue_work(cma_wq, &work->work);
  1602. return 0;
  1603. }
  1604. static int iboe_tos_to_sl(struct net_device *ndev, int tos)
  1605. {
  1606. int prio;
  1607. struct net_device *dev;
  1608. prio = rt_tos2priority(tos);
  1609. dev = ndev->priv_flags & IFF_802_1Q_VLAN ?
  1610. vlan_dev_real_dev(ndev) : ndev;
  1611. if (dev->num_tc)
  1612. return netdev_get_prio_tc_map(dev, prio);
  1613. #if IS_ENABLED(CONFIG_VLAN_8021Q)
  1614. if (ndev->priv_flags & IFF_802_1Q_VLAN)
  1615. return (vlan_dev_get_egress_qos_mask(ndev, prio) &
  1616. VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
  1617. #endif
  1618. return 0;
  1619. }
  1620. static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
  1621. {
  1622. struct rdma_route *route = &id_priv->id.route;
  1623. struct rdma_addr *addr = &route->addr;
  1624. struct cma_work *work;
  1625. int ret;
  1626. struct net_device *ndev = NULL;
  1627. work = kzalloc(sizeof *work, GFP_KERNEL);
  1628. if (!work)
  1629. return -ENOMEM;
  1630. work->id = id_priv;
  1631. INIT_WORK(&work->work, cma_work_handler);
  1632. route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL);
  1633. if (!route->path_rec) {
  1634. ret = -ENOMEM;
  1635. goto err1;
  1636. }
  1637. route->num_paths = 1;
  1638. if (addr->dev_addr.bound_dev_if)
  1639. ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
  1640. if (!ndev) {
  1641. ret = -ENODEV;
  1642. goto err2;
  1643. }
  1644. route->path_rec->vlan_id = rdma_vlan_dev_vlan_id(ndev);
  1645. memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN);
  1646. memcpy(route->path_rec->smac, ndev->dev_addr, ndev->addr_len);
  1647. rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
  1648. &route->path_rec->sgid);
  1649. rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr,
  1650. &route->path_rec->dgid);
  1651. route->path_rec->hop_limit = 1;
  1652. route->path_rec->reversible = 1;
  1653. route->path_rec->pkey = cpu_to_be16(0xffff);
  1654. route->path_rec->mtu_selector = IB_SA_EQ;
  1655. route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos);
  1656. route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
  1657. route->path_rec->rate_selector = IB_SA_EQ;
  1658. route->path_rec->rate = iboe_get_rate(ndev);
  1659. dev_put(ndev);
  1660. route->path_rec->packet_life_time_selector = IB_SA_EQ;
  1661. route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
  1662. if (!route->path_rec->mtu) {
  1663. ret = -EINVAL;
  1664. goto err2;
  1665. }
  1666. work->old_state = RDMA_CM_ROUTE_QUERY;
  1667. work->new_state = RDMA_CM_ROUTE_RESOLVED;
  1668. work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
  1669. work->event.status = 0;
  1670. queue_work(cma_wq, &work->work);
  1671. return 0;
  1672. err2:
  1673. kfree(route->path_rec);
  1674. route->path_rec = NULL;
  1675. err1:
  1676. kfree(work);
  1677. return ret;
  1678. }
  1679. int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
  1680. {
  1681. struct rdma_id_private *id_priv;
  1682. int ret;
  1683. id_priv = container_of(id, struct rdma_id_private, id);
  1684. if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
  1685. return -EINVAL;
  1686. atomic_inc(&id_priv->refcount);
  1687. switch (rdma_node_get_transport(id->device->node_type)) {
  1688. case RDMA_TRANSPORT_IB:
  1689. switch (rdma_port_get_link_layer(id->device, id->port_num)) {
  1690. case IB_LINK_LAYER_INFINIBAND:
  1691. ret = cma_resolve_ib_route(id_priv, timeout_ms);
  1692. break;
  1693. case IB_LINK_LAYER_ETHERNET:
  1694. ret = cma_resolve_iboe_route(id_priv);
  1695. break;
  1696. default:
  1697. ret = -ENOSYS;
  1698. }
  1699. break;
  1700. case RDMA_TRANSPORT_IWARP:
  1701. ret = cma_resolve_iw_route(id_priv, timeout_ms);
  1702. break;
  1703. default:
  1704. ret = -ENOSYS;
  1705. break;
  1706. }
  1707. if (ret)
  1708. goto err;
  1709. return 0;
  1710. err:
  1711. cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
  1712. cma_deref_id(id_priv);
  1713. return ret;
  1714. }
  1715. EXPORT_SYMBOL(rdma_resolve_route);
  1716. static void cma_set_loopback(struct sockaddr *addr)
  1717. {
  1718. switch (addr->sa_family) {
  1719. case AF_INET:
  1720. ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
  1721. break;
  1722. case AF_INET6:
  1723. ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr,
  1724. 0, 0, 0, htonl(1));
  1725. break;
  1726. default:
  1727. ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr,
  1728. 0, 0, 0, htonl(1));
  1729. break;
  1730. }
  1731. }
  1732. static int cma_bind_loopback(struct rdma_id_private *id_priv)
  1733. {
  1734. struct cma_device *cma_dev, *cur_dev;
  1735. struct ib_port_attr port_attr;
  1736. union ib_gid gid;
  1737. u16 pkey;
  1738. int ret;
  1739. u8 p;
  1740. cma_dev = NULL;
  1741. mutex_lock(&lock);
  1742. list_for_each_entry(cur_dev, &dev_list, list) {
  1743. if (cma_family(id_priv) == AF_IB &&
  1744. rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
  1745. continue;
  1746. if (!cma_dev)
  1747. cma_dev = cur_dev;
  1748. for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
  1749. if (!ib_query_port(cur_dev->device, p, &port_attr) &&
  1750. port_attr.state == IB_PORT_ACTIVE) {
  1751. cma_dev = cur_dev;
  1752. goto port_found;
  1753. }
  1754. }
  1755. }
  1756. if (!cma_dev) {
  1757. ret = -ENODEV;
  1758. goto out;
  1759. }
  1760. p = 1;
  1761. port_found:
  1762. ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
  1763. if (ret)
  1764. goto out;
  1765. ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
  1766. if (ret)
  1767. goto out;
  1768. id_priv->id.route.addr.dev_addr.dev_type =
  1769. (rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ?
  1770. ARPHRD_INFINIBAND : ARPHRD_ETHER;
  1771. rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
  1772. ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
  1773. id_priv->id.port_num = p;
  1774. cma_attach_to_dev(id_priv, cma_dev);
  1775. cma_set_loopback(cma_src_addr(id_priv));
  1776. out:
  1777. mutex_unlock(&lock);
  1778. return ret;
  1779. }
  1780. static void addr_handler(int status, struct sockaddr *src_addr,
  1781. struct rdma_dev_addr *dev_addr, void *context)
  1782. {
  1783. struct rdma_id_private *id_priv = context;
  1784. struct rdma_cm_event event;
  1785. memset(&event, 0, sizeof event);
  1786. mutex_lock(&id_priv->handler_mutex);
  1787. if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
  1788. RDMA_CM_ADDR_RESOLVED))
  1789. goto out;
  1790. memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
  1791. if (!status && !id_priv->cma_dev)
  1792. status = cma_acquire_dev(id_priv, NULL);
  1793. if (status) {
  1794. if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
  1795. RDMA_CM_ADDR_BOUND))
  1796. goto out;
  1797. event.event = RDMA_CM_EVENT_ADDR_ERROR;
  1798. event.status = status;
  1799. } else
  1800. event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
  1801. if (id_priv->id.event_handler(&id_priv->id, &event)) {
  1802. cma_exch(id_priv, RDMA_CM_DESTROYING);
  1803. mutex_unlock(&id_priv->handler_mutex);
  1804. cma_deref_id(id_priv);
  1805. rdma_destroy_id(&id_priv->id);
  1806. return;
  1807. }
  1808. out:
  1809. mutex_unlock(&id_priv->handler_mutex);
  1810. cma_deref_id(id_priv);
  1811. }
  1812. static int cma_resolve_loopback(struct rdma_id_private *id_priv)
  1813. {
  1814. struct cma_work *work;
  1815. union ib_gid gid;
  1816. int ret;
  1817. work = kzalloc(sizeof *work, GFP_KERNEL);
  1818. if (!work)
  1819. return -ENOMEM;
  1820. if (!id_priv->cma_dev) {
  1821. ret = cma_bind_loopback(id_priv);
  1822. if (ret)
  1823. goto err;
  1824. }
  1825. rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
  1826. rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
  1827. work->id = id_priv;
  1828. INIT_WORK(&work->work, cma_work_handler);
  1829. work->old_state = RDMA_CM_ADDR_QUERY;
  1830. work->new_state = RDMA_CM_ADDR_RESOLVED;
  1831. work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
  1832. queue_work(cma_wq, &work->work);
  1833. return 0;
  1834. err:
  1835. kfree(work);
  1836. return ret;
  1837. }
  1838. static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
  1839. {
  1840. struct cma_work *work;
  1841. int ret;
  1842. work = kzalloc(sizeof *work, GFP_KERNEL);
  1843. if (!work)
  1844. return -ENOMEM;
  1845. if (!id_priv->cma_dev) {
  1846. ret = cma_resolve_ib_dev(id_priv);
  1847. if (ret)
  1848. goto err;
  1849. }
  1850. rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
  1851. &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
  1852. work->id = id_priv;
  1853. INIT_WORK(&work->work, cma_work_handler);
  1854. work->old_state = RDMA_CM_ADDR_QUERY;
  1855. work->new_state = RDMA_CM_ADDR_RESOLVED;
  1856. work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
  1857. queue_work(cma_wq, &work->work);
  1858. return 0;
  1859. err:
  1860. kfree(work);
  1861. return ret;
  1862. }
  1863. static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
  1864. struct sockaddr *dst_addr)
  1865. {
  1866. if (!src_addr || !src_addr->sa_family) {
  1867. src_addr = (struct sockaddr *) &id->route.addr.src_addr;
  1868. src_addr->sa_family = dst_addr->sa_family;
  1869. if (dst_addr->sa_family == AF_INET6) {
  1870. ((struct sockaddr_in6 *) src_addr)->sin6_scope_id =
  1871. ((struct sockaddr_in6 *) dst_addr)->sin6_scope_id;
  1872. } else if (dst_addr->sa_family == AF_IB) {
  1873. ((struct sockaddr_ib *) src_addr)->sib_pkey =
  1874. ((struct sockaddr_ib *) dst_addr)->sib_pkey;
  1875. }
  1876. }
  1877. return rdma_bind_addr(id, src_addr);
  1878. }
  1879. int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
  1880. struct sockaddr *dst_addr, int timeout_ms)
  1881. {
  1882. struct rdma_id_private *id_priv;
  1883. int ret;
  1884. id_priv = container_of(id, struct rdma_id_private, id);
  1885. if (id_priv->state == RDMA_CM_IDLE) {
  1886. ret = cma_bind_addr(id, src_addr, dst_addr);
  1887. if (ret)
  1888. return ret;
  1889. }
  1890. if (cma_family(id_priv) != dst_addr->sa_family)
  1891. return -EINVAL;
  1892. if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
  1893. return -EINVAL;
  1894. atomic_inc(&id_priv->refcount);
  1895. memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
  1896. if (cma_any_addr(dst_addr)) {
  1897. ret = cma_resolve_loopback(id_priv);
  1898. } else {
  1899. if (dst_addr->sa_family == AF_IB) {
  1900. ret = cma_resolve_ib_addr(id_priv);
  1901. } else {
  1902. ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv),
  1903. dst_addr, &id->route.addr.dev_addr,
  1904. timeout_ms, addr_handler, id_priv);
  1905. }
  1906. }
  1907. if (ret)
  1908. goto err;
  1909. return 0;
  1910. err:
  1911. cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
  1912. cma_deref_id(id_priv);
  1913. return ret;
  1914. }
  1915. EXPORT_SYMBOL(rdma_resolve_addr);
  1916. int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
  1917. {
  1918. struct rdma_id_private *id_priv;
  1919. unsigned long flags;
  1920. int ret;
  1921. id_priv = container_of(id, struct rdma_id_private, id);
  1922. spin_lock_irqsave(&id_priv->lock, flags);
  1923. if (reuse || id_priv->state == RDMA_CM_IDLE) {
  1924. id_priv->reuseaddr = reuse;
  1925. ret = 0;
  1926. } else {
  1927. ret = -EINVAL;
  1928. }
  1929. spin_unlock_irqrestore(&id_priv->lock, flags);
  1930. return ret;
  1931. }
  1932. EXPORT_SYMBOL(rdma_set_reuseaddr);
  1933. int rdma_set_afonly(struct rdma_cm_id *id, int afonly)
  1934. {
  1935. struct rdma_id_private *id_priv;
  1936. unsigned long flags;
  1937. int ret;
  1938. id_priv = container_of(id, struct rdma_id_private, id);
  1939. spin_lock_irqsave(&id_priv->lock, flags);
  1940. if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) {
  1941. id_priv->options |= (1 << CMA_OPTION_AFONLY);
  1942. id_priv->afonly = afonly;
  1943. ret = 0;
  1944. } else {
  1945. ret = -EINVAL;
  1946. }
  1947. spin_unlock_irqrestore(&id_priv->lock, flags);
  1948. return ret;
  1949. }
  1950. EXPORT_SYMBOL(rdma_set_afonly);
  1951. static void cma_bind_port(struct rdma_bind_list *bind_list,
  1952. struct rdma_id_private *id_priv)
  1953. {
  1954. struct sockaddr *addr;
  1955. struct sockaddr_ib *sib;
  1956. u64 sid, mask;
  1957. __be16 port;
  1958. addr = cma_src_addr(id_priv);
  1959. port = htons(bind_list->port);
  1960. switch (addr->sa_family) {
  1961. case AF_INET:
  1962. ((struct sockaddr_in *) addr)->sin_port = port;
  1963. break;
  1964. case AF_INET6:
  1965. ((struct sockaddr_in6 *) addr)->sin6_port = port;
  1966. break;
  1967. case AF_IB:
  1968. sib = (struct sockaddr_ib *) addr;
  1969. sid = be64_to_cpu(sib->sib_sid);
  1970. mask = be64_to_cpu(sib->sib_sid_mask);
  1971. sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port));
  1972. sib->sib_sid_mask = cpu_to_be64(~0ULL);
  1973. break;
  1974. }
  1975. id_priv->bind_list = bind_list;
  1976. hlist_add_head(&id_priv->node, &bind_list->owners);
  1977. }
  1978. static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
  1979. unsigned short snum)
  1980. {
  1981. struct rdma_bind_list *bind_list;
  1982. int ret;
  1983. bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
  1984. if (!bind_list)
  1985. return -ENOMEM;
  1986. ret = idr_alloc(ps, bind_list, snum, snum + 1, GFP_KERNEL);
  1987. if (ret < 0)
  1988. goto err;
  1989. bind_list->ps = ps;
  1990. bind_list->port = (unsigned short)ret;
  1991. cma_bind_port(bind_list, id_priv);
  1992. return 0;
  1993. err:
  1994. kfree(bind_list);
  1995. return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
  1996. }
  1997. static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
  1998. {
  1999. static unsigned int last_used_port;
  2000. int low, high, remaining;
  2001. unsigned int rover;
  2002. inet_get_local_port_range(&init_net, &low, &high);
  2003. remaining = (high - low) + 1;
  2004. rover = prandom_u32() % remaining + low;
  2005. retry:
  2006. if (last_used_port != rover &&
  2007. !idr_find(ps, (unsigned short) rover)) {
  2008. int ret = cma_alloc_port(ps, id_priv, rover);
  2009. /*
  2010. * Remember previously used port number in order to avoid
  2011. * re-using same port immediately after it is closed.
  2012. */
  2013. if (!ret)
  2014. last_used_port = rover;
  2015. if (ret != -EADDRNOTAVAIL)
  2016. return ret;
  2017. }
  2018. if (--remaining) {
  2019. rover++;
  2020. if ((rover < low) || (rover > high))
  2021. rover = low;
  2022. goto retry;
  2023. }
  2024. return -EADDRNOTAVAIL;
  2025. }
  2026. /*
  2027. * Check that the requested port is available. This is called when trying to
  2028. * bind to a specific port, or when trying to listen on a bound port. In
  2029. * the latter case, the provided id_priv may already be on the bind_list, but
  2030. * we still need to check that it's okay to start listening.
  2031. */
  2032. static int cma_check_port(struct rdma_bind_list *bind_list,
  2033. struct rdma_id_private *id_priv, uint8_t reuseaddr)
  2034. {
  2035. struct rdma_id_private *cur_id;
  2036. struct sockaddr *addr, *cur_addr;
  2037. addr = cma_src_addr(id_priv);
  2038. hlist_for_each_entry(cur_id, &bind_list->owners, node) {
  2039. if (id_priv == cur_id)
  2040. continue;
  2041. if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr &&
  2042. cur_id->reuseaddr)
  2043. continue;
  2044. cur_addr = cma_src_addr(cur_id);
  2045. if (id_priv->afonly && cur_id->afonly &&
  2046. (addr->sa_family != cur_addr->sa_family))
  2047. continue;
  2048. if (cma_any_addr(addr) || cma_any_addr(cur_addr))
  2049. return -EADDRNOTAVAIL;
  2050. if (!cma_addr_cmp(addr, cur_addr))
  2051. return -EADDRINUSE;
  2052. }
  2053. return 0;
  2054. }
  2055. static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
  2056. {
  2057. struct rdma_bind_list *bind_list;
  2058. unsigned short snum;
  2059. int ret;
  2060. snum = ntohs(cma_port(cma_src_addr(id_priv)));
  2061. if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
  2062. return -EACCES;
  2063. bind_list = idr_find(ps, snum);
  2064. if (!bind_list) {
  2065. ret = cma_alloc_port(ps, id_priv, snum);
  2066. } else {
  2067. ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr);
  2068. if (!ret)
  2069. cma_bind_port(bind_list, id_priv);
  2070. }
  2071. return ret;
  2072. }
  2073. static int cma_bind_listen(struct rdma_id_private *id_priv)
  2074. {
  2075. struct rdma_bind_list *bind_list = id_priv->bind_list;
  2076. int ret = 0;
  2077. mutex_lock(&lock);
  2078. if (bind_list->owners.first->next)
  2079. ret = cma_check_port(bind_list, id_priv, 0);
  2080. mutex_unlock(&lock);
  2081. return ret;
  2082. }
  2083. static struct idr *cma_select_inet_ps(struct rdma_id_private *id_priv)
  2084. {
  2085. switch (id_priv->id.ps) {
  2086. case RDMA_PS_TCP:
  2087. return &tcp_ps;
  2088. case RDMA_PS_UDP:
  2089. return &udp_ps;
  2090. case RDMA_PS_IPOIB:
  2091. return &ipoib_ps;
  2092. case RDMA_PS_IB:
  2093. return &ib_ps;
  2094. default:
  2095. return NULL;
  2096. }
  2097. }
  2098. static struct idr *cma_select_ib_ps(struct rdma_id_private *id_priv)
  2099. {
  2100. struct idr *ps = NULL;
  2101. struct sockaddr_ib *sib;
  2102. u64 sid_ps, mask, sid;
  2103. sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
  2104. mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK;
  2105. sid = be64_to_cpu(sib->sib_sid) & mask;
  2106. if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) {
  2107. sid_ps = RDMA_IB_IP_PS_IB;
  2108. ps = &ib_ps;
  2109. } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) &&
  2110. (sid == (RDMA_IB_IP_PS_TCP & mask))) {
  2111. sid_ps = RDMA_IB_IP_PS_TCP;
  2112. ps = &tcp_ps;
  2113. } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) &&
  2114. (sid == (RDMA_IB_IP_PS_UDP & mask))) {
  2115. sid_ps = RDMA_IB_IP_PS_UDP;
  2116. ps = &udp_ps;
  2117. }
  2118. if (ps) {
  2119. sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib)));
  2120. sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK |
  2121. be64_to_cpu(sib->sib_sid_mask));
  2122. }
  2123. return ps;
  2124. }
  2125. static int cma_get_port(struct rdma_id_private *id_priv)
  2126. {
  2127. struct idr *ps;
  2128. int ret;
  2129. if (cma_family(id_priv) != AF_IB)
  2130. ps = cma_select_inet_ps(id_priv);
  2131. else
  2132. ps = cma_select_ib_ps(id_priv);
  2133. if (!ps)
  2134. return -EPROTONOSUPPORT;
  2135. mutex_lock(&lock);
  2136. if (cma_any_port(cma_src_addr(id_priv)))
  2137. ret = cma_alloc_any_port(ps, id_priv);
  2138. else
  2139. ret = cma_use_port(ps, id_priv);
  2140. mutex_unlock(&lock);
  2141. return ret;
  2142. }
  2143. static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
  2144. struct sockaddr *addr)
  2145. {
  2146. #if IS_ENABLED(CONFIG_IPV6)
  2147. struct sockaddr_in6 *sin6;
  2148. if (addr->sa_family != AF_INET6)
  2149. return 0;
  2150. sin6 = (struct sockaddr_in6 *) addr;
  2151. if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
  2152. return 0;
  2153. if (!sin6->sin6_scope_id)
  2154. return -EINVAL;
  2155. dev_addr->bound_dev_if = sin6->sin6_scope_id;
  2156. #endif
  2157. return 0;
  2158. }
  2159. int rdma_listen(struct rdma_cm_id *id, int backlog)
  2160. {
  2161. struct rdma_id_private *id_priv;
  2162. int ret;
  2163. id_priv = container_of(id, struct rdma_id_private, id);
  2164. if (id_priv->state == RDMA_CM_IDLE) {
  2165. id->route.addr.src_addr.ss_family = AF_INET;
  2166. ret = rdma_bind_addr(id, cma_src_addr(id_priv));
  2167. if (ret)
  2168. return ret;
  2169. }
  2170. if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))
  2171. return -EINVAL;
  2172. if (id_priv->reuseaddr) {
  2173. ret = cma_bind_listen(id_priv);
  2174. if (ret)
  2175. goto err;
  2176. }
  2177. id_priv->backlog = backlog;
  2178. if (id->device) {
  2179. switch (rdma_node_get_transport(id->device->node_type)) {
  2180. case RDMA_TRANSPORT_IB:
  2181. ret = cma_ib_listen(id_priv);
  2182. if (ret)
  2183. goto err;
  2184. break;
  2185. case RDMA_TRANSPORT_IWARP:
  2186. ret = cma_iw_listen(id_priv, backlog);
  2187. if (ret)
  2188. goto err;
  2189. break;
  2190. default:
  2191. ret = -ENOSYS;
  2192. goto err;
  2193. }
  2194. } else
  2195. cma_listen_on_all(id_priv);
  2196. return 0;
  2197. err:
  2198. id_priv->backlog = 0;
  2199. cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
  2200. return ret;
  2201. }
  2202. EXPORT_SYMBOL(rdma_listen);
  2203. int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
  2204. {
  2205. struct rdma_id_private *id_priv;
  2206. int ret;
  2207. if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 &&
  2208. addr->sa_family != AF_IB)
  2209. return -EAFNOSUPPORT;
  2210. id_priv = container_of(id, struct rdma_id_private, id);
  2211. if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
  2212. return -EINVAL;
  2213. ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
  2214. if (ret)
  2215. goto err1;
  2216. memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
  2217. if (!cma_any_addr(addr)) {
  2218. ret = cma_translate_addr(addr, &id->route.addr.dev_addr);
  2219. if (ret)
  2220. goto err1;
  2221. ret = cma_acquire_dev(id_priv, NULL);
  2222. if (ret)
  2223. goto err1;
  2224. }
  2225. if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) {
  2226. if (addr->sa_family == AF_INET)
  2227. id_priv->afonly = 1;
  2228. #if IS_ENABLED(CONFIG_IPV6)
  2229. else if (addr->sa_family == AF_INET6)
  2230. id_priv->afonly = init_net.ipv6.sysctl.bindv6only;
  2231. #endif
  2232. }
  2233. ret = cma_get_port(id_priv);
  2234. if (ret)
  2235. goto err2;
  2236. return 0;
  2237. err2:
  2238. if (id_priv->cma_dev)
  2239. cma_release_dev(id_priv);
  2240. err1:
  2241. cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
  2242. return ret;
  2243. }
  2244. EXPORT_SYMBOL(rdma_bind_addr);
  2245. static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)
  2246. {
  2247. struct cma_hdr *cma_hdr;
  2248. cma_hdr = hdr;
  2249. cma_hdr->cma_version = CMA_VERSION;
  2250. if (cma_family(id_priv) == AF_INET) {
  2251. struct sockaddr_in *src4, *dst4;
  2252. src4 = (struct sockaddr_in *) cma_src_addr(id_priv);
  2253. dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv);
  2254. cma_set_ip_ver(cma_hdr, 4);
  2255. cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
  2256. cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
  2257. cma_hdr->port = src4->sin_port;
  2258. } else if (cma_family(id_priv) == AF_INET6) {
  2259. struct sockaddr_in6 *src6, *dst6;
  2260. src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
  2261. dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv);
  2262. cma_set_ip_ver(cma_hdr, 6);
  2263. cma_hdr->src_addr.ip6 = src6->sin6_addr;
  2264. cma_hdr->dst_addr.ip6 = dst6->sin6_addr;
  2265. cma_hdr->port = src6->sin6_port;
  2266. }
  2267. return 0;
  2268. }
  2269. static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
  2270. struct ib_cm_event *ib_event)
  2271. {
  2272. struct rdma_id_private *id_priv = cm_id->context;
  2273. struct rdma_cm_event event;
  2274. struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
  2275. int ret = 0;
  2276. if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
  2277. return 0;
  2278. memset(&event, 0, sizeof event);
  2279. switch (ib_event->event) {
  2280. case IB_CM_SIDR_REQ_ERROR:
  2281. event.event = RDMA_CM_EVENT_UNREACHABLE;
  2282. event.status = -ETIMEDOUT;
  2283. break;
  2284. case IB_CM_SIDR_REP_RECEIVED:
  2285. event.param.ud.private_data = ib_event->private_data;
  2286. event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
  2287. if (rep->status != IB_SIDR_SUCCESS) {
  2288. event.event = RDMA_CM_EVENT_UNREACHABLE;
  2289. event.status = ib_event->param.sidr_rep_rcvd.status;
  2290. break;
  2291. }
  2292. ret = cma_set_qkey(id_priv, rep->qkey);
  2293. if (ret) {
  2294. event.event = RDMA_CM_EVENT_ADDR_ERROR;
  2295. event.status = ret;
  2296. break;
  2297. }
  2298. ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
  2299. id_priv->id.route.path_rec,
  2300. &event.param.ud.ah_attr);
  2301. event.param.ud.qp_num = rep->qpn;
  2302. event.param.ud.qkey = rep->qkey;
  2303. event.event = RDMA_CM_EVENT_ESTABLISHED;
  2304. event.status = 0;
  2305. break;
  2306. default:
  2307. printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
  2308. ib_event->event);
  2309. goto out;
  2310. }
  2311. ret = id_priv->id.event_handler(&id_priv->id, &event);
  2312. if (ret) {
  2313. /* Destroy the CM ID by returning a non-zero value. */
  2314. id_priv->cm_id.ib = NULL;
  2315. cma_exch(id_priv, RDMA_CM_DESTROYING);
  2316. mutex_unlock(&id_priv->handler_mutex);
  2317. rdma_destroy_id(&id_priv->id);
  2318. return ret;
  2319. }
  2320. out:
  2321. mutex_unlock(&id_priv->handler_mutex);
  2322. return ret;
  2323. }
  2324. static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
  2325. struct rdma_conn_param *conn_param)
  2326. {
  2327. struct ib_cm_sidr_req_param req;
  2328. struct ib_cm_id *id;
  2329. void *private_data;
  2330. int offset, ret;
  2331. memset(&req, 0, sizeof req);
  2332. offset = cma_user_data_offset(id_priv);
  2333. req.private_data_len = offset + conn_param->private_data_len;
  2334. if (req.private_data_len < conn_param->private_data_len)
  2335. return -EINVAL;
  2336. if (req.private_data_len) {
  2337. private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
  2338. if (!private_data)
  2339. return -ENOMEM;
  2340. } else {
  2341. private_data = NULL;
  2342. }
  2343. if (conn_param->private_data && conn_param->private_data_len)
  2344. memcpy(private_data + offset, conn_param->private_data,
  2345. conn_param->private_data_len);
  2346. if (private_data) {
  2347. ret = cma_format_hdr(private_data, id_priv);
  2348. if (ret)
  2349. goto out;
  2350. req.private_data = private_data;
  2351. }
  2352. id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
  2353. id_priv);
  2354. if (IS_ERR(id)) {
  2355. ret = PTR_ERR(id);
  2356. goto out;
  2357. }
  2358. id_priv->cm_id.ib = id;
  2359. req.path = id_priv->id.route.path_rec;
  2360. req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
  2361. req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
  2362. req.max_cm_retries = CMA_MAX_CM_RETRIES;
  2363. ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
  2364. if (ret) {
  2365. ib_destroy_cm_id(id_priv->cm_id.ib);
  2366. id_priv->cm_id.ib = NULL;
  2367. }
  2368. out:
  2369. kfree(private_data);
  2370. return ret;
  2371. }
  2372. static int cma_connect_ib(struct rdma_id_private *id_priv,
  2373. struct rdma_conn_param *conn_param)
  2374. {
  2375. struct ib_cm_req_param req;
  2376. struct rdma_route *route;
  2377. void *private_data;
  2378. struct ib_cm_id *id;
  2379. int offset, ret;
  2380. memset(&req, 0, sizeof req);
  2381. offset = cma_user_data_offset(id_priv);
  2382. req.private_data_len = offset + conn_param->private_data_len;
  2383. if (req.private_data_len < conn_param->private_data_len)
  2384. return -EINVAL;
  2385. if (req.private_data_len) {
  2386. private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
  2387. if (!private_data)
  2388. return -ENOMEM;
  2389. } else {
  2390. private_data = NULL;
  2391. }
  2392. if (conn_param->private_data && conn_param->private_data_len)
  2393. memcpy(private_data + offset, conn_param->private_data,
  2394. conn_param->private_data_len);
  2395. id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv);
  2396. if (IS_ERR(id)) {
  2397. ret = PTR_ERR(id);
  2398. goto out;
  2399. }
  2400. id_priv->cm_id.ib = id;
  2401. route = &id_priv->id.route;
  2402. if (private_data) {
  2403. ret = cma_format_hdr(private_data, id_priv);
  2404. if (ret)
  2405. goto out;
  2406. req.private_data = private_data;
  2407. }
  2408. req.primary_path = &route->path_rec[0];
  2409. if (route->num_paths == 2)
  2410. req.alternate_path = &route->path_rec[1];
  2411. req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
  2412. req.qp_num = id_priv->qp_num;
  2413. req.qp_type = id_priv->id.qp_type;
  2414. req.starting_psn = id_priv->seq_num;
  2415. req.responder_resources = conn_param->responder_resources;
  2416. req.initiator_depth = conn_param->initiator_depth;
  2417. req.flow_control = conn_param->flow_control;
  2418. req.retry_count = min_t(u8, 7, conn_param->retry_count);
  2419. req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
  2420. req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
  2421. req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
  2422. req.max_cm_retries = CMA_MAX_CM_RETRIES;
  2423. req.srq = id_priv->srq ? 1 : 0;
  2424. ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
  2425. out:
  2426. if (ret && !IS_ERR(id)) {
  2427. ib_destroy_cm_id(id);
  2428. id_priv->cm_id.ib = NULL;
  2429. }
  2430. kfree(private_data);
  2431. return ret;
  2432. }
  2433. static int cma_connect_iw(struct rdma_id_private *id_priv,
  2434. struct rdma_conn_param *conn_param)
  2435. {
  2436. struct iw_cm_id *cm_id;
  2437. int ret;
  2438. struct iw_cm_conn_param iw_param;
  2439. cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
  2440. if (IS_ERR(cm_id))
  2441. return PTR_ERR(cm_id);
  2442. id_priv->cm_id.iw = cm_id;
  2443. memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
  2444. rdma_addr_size(cma_src_addr(id_priv)));
  2445. memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv),
  2446. rdma_addr_size(cma_dst_addr(id_priv)));
  2447. ret = cma_modify_qp_rtr(id_priv, conn_param);
  2448. if (ret)
  2449. goto out;
  2450. if (conn_param) {
  2451. iw_param.ord = conn_param->initiator_depth;
  2452. iw_param.ird = conn_param->responder_resources;
  2453. iw_param.private_data = conn_param->private_data;
  2454. iw_param.private_data_len = conn_param->private_data_len;
  2455. iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num;
  2456. } else {
  2457. memset(&iw_param, 0, sizeof iw_param);
  2458. iw_param.qpn = id_priv->qp_num;
  2459. }
  2460. ret = iw_cm_connect(cm_id, &iw_param);
  2461. out:
  2462. if (ret) {
  2463. iw_destroy_cm_id(cm_id);
  2464. id_priv->cm_id.iw = NULL;
  2465. }
  2466. return ret;
  2467. }
  2468. int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
  2469. {
  2470. struct rdma_id_private *id_priv;
  2471. int ret;
  2472. id_priv = container_of(id, struct rdma_id_private, id);
  2473. if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
  2474. return -EINVAL;
  2475. if (!id->qp) {
  2476. id_priv->qp_num = conn_param->qp_num;
  2477. id_priv->srq = conn_param->srq;
  2478. }
  2479. switch (rdma_node_get_transport(id->device->node_type)) {
  2480. case RDMA_TRANSPORT_IB:
  2481. if (id->qp_type == IB_QPT_UD)
  2482. ret = cma_resolve_ib_udp(id_priv, conn_param);
  2483. else
  2484. ret = cma_connect_ib(id_priv, conn_param);
  2485. break;
  2486. case RDMA_TRANSPORT_IWARP:
  2487. ret = cma_connect_iw(id_priv, conn_param);
  2488. break;
  2489. default:
  2490. ret = -ENOSYS;
  2491. break;
  2492. }
  2493. if (ret)
  2494. goto err;
  2495. return 0;
  2496. err:
  2497. cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
  2498. return ret;
  2499. }
  2500. EXPORT_SYMBOL(rdma_connect);
  2501. static int cma_accept_ib(struct rdma_id_private *id_priv,
  2502. struct rdma_conn_param *conn_param)
  2503. {
  2504. struct ib_cm_rep_param rep;
  2505. int ret;
  2506. ret = cma_modify_qp_rtr(id_priv, conn_param);
  2507. if (ret)
  2508. goto out;
  2509. ret = cma_modify_qp_rts(id_priv, conn_param);
  2510. if (ret)
  2511. goto out;
  2512. memset(&rep, 0, sizeof rep);
  2513. rep.qp_num = id_priv->qp_num;
  2514. rep.starting_psn = id_priv->seq_num;
  2515. rep.private_data = conn_param->private_data;
  2516. rep.private_data_len = conn_param->private_data_len;
  2517. rep.responder_resources = conn_param->responder_resources;
  2518. rep.initiator_depth = conn_param->initiator_depth;
  2519. rep.failover_accepted = 0;
  2520. rep.flow_control = conn_param->flow_control;
  2521. rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
  2522. rep.srq = id_priv->srq ? 1 : 0;
  2523. ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
  2524. out:
  2525. return ret;
  2526. }
  2527. static int cma_accept_iw(struct rdma_id_private *id_priv,
  2528. struct rdma_conn_param *conn_param)
  2529. {
  2530. struct iw_cm_conn_param iw_param;
  2531. int ret;
  2532. ret = cma_modify_qp_rtr(id_priv, conn_param);
  2533. if (ret)
  2534. return ret;
  2535. iw_param.ord = conn_param->initiator_depth;
  2536. iw_param.ird = conn_param->responder_resources;
  2537. iw_param.private_data = conn_param->private_data;
  2538. iw_param.private_data_len = conn_param->private_data_len;
  2539. if (id_priv->id.qp) {
  2540. iw_param.qpn = id_priv->qp_num;
  2541. } else
  2542. iw_param.qpn = conn_param->qp_num;
  2543. return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
  2544. }
  2545. static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
  2546. enum ib_cm_sidr_status status, u32 qkey,
  2547. const void *private_data, int private_data_len)
  2548. {
  2549. struct ib_cm_sidr_rep_param rep;
  2550. int ret;
  2551. memset(&rep, 0, sizeof rep);
  2552. rep.status = status;
  2553. if (status == IB_SIDR_SUCCESS) {
  2554. ret = cma_set_qkey(id_priv, qkey);
  2555. if (ret)
  2556. return ret;
  2557. rep.qp_num = id_priv->qp_num;
  2558. rep.qkey = id_priv->qkey;
  2559. }
  2560. rep.private_data = private_data;
  2561. rep.private_data_len = private_data_len;
  2562. return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
  2563. }
  2564. int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
  2565. {
  2566. struct rdma_id_private *id_priv;
  2567. int ret;
  2568. id_priv = container_of(id, struct rdma_id_private, id);
  2569. id_priv->owner = task_pid_nr(current);
  2570. if (!cma_comp(id_priv, RDMA_CM_CONNECT))
  2571. return -EINVAL;
  2572. if (!id->qp && conn_param) {
  2573. id_priv->qp_num = conn_param->qp_num;
  2574. id_priv->srq = conn_param->srq;
  2575. }
  2576. switch (rdma_node_get_transport(id->device->node_type)) {
  2577. case RDMA_TRANSPORT_IB:
  2578. if (id->qp_type == IB_QPT_UD) {
  2579. if (conn_param)
  2580. ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
  2581. conn_param->qkey,
  2582. conn_param->private_data,
  2583. conn_param->private_data_len);
  2584. else
  2585. ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
  2586. 0, NULL, 0);
  2587. } else {
  2588. if (conn_param)
  2589. ret = cma_accept_ib(id_priv, conn_param);
  2590. else
  2591. ret = cma_rep_recv(id_priv);
  2592. }
  2593. break;
  2594. case RDMA_TRANSPORT_IWARP:
  2595. ret = cma_accept_iw(id_priv, conn_param);
  2596. break;
  2597. default:
  2598. ret = -ENOSYS;
  2599. break;
  2600. }
  2601. if (ret)
  2602. goto reject;
  2603. return 0;
  2604. reject:
  2605. cma_modify_qp_err(id_priv);
  2606. rdma_reject(id, NULL, 0);
  2607. return ret;
  2608. }
  2609. EXPORT_SYMBOL(rdma_accept);
  2610. int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
  2611. {
  2612. struct rdma_id_private *id_priv;
  2613. int ret;
  2614. id_priv = container_of(id, struct rdma_id_private, id);
  2615. if (!id_priv->cm_id.ib)
  2616. return -EINVAL;
  2617. switch (id->device->node_type) {
  2618. case RDMA_NODE_IB_CA:
  2619. ret = ib_cm_notify(id_priv->cm_id.ib, event);
  2620. break;
  2621. default:
  2622. ret = 0;
  2623. break;
  2624. }
  2625. return ret;
  2626. }
  2627. EXPORT_SYMBOL(rdma_notify);
  2628. int rdma_reject(struct rdma_cm_id *id, const void *private_data,
  2629. u8 private_data_len)
  2630. {
  2631. struct rdma_id_private *id_priv;
  2632. int ret;
  2633. id_priv = container_of(id, struct rdma_id_private, id);
  2634. if (!id_priv->cm_id.ib)
  2635. return -EINVAL;
  2636. switch (rdma_node_get_transport(id->device->node_type)) {
  2637. case RDMA_TRANSPORT_IB:
  2638. if (id->qp_type == IB_QPT_UD)
  2639. ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
  2640. private_data, private_data_len);
  2641. else
  2642. ret = ib_send_cm_rej(id_priv->cm_id.ib,
  2643. IB_CM_REJ_CONSUMER_DEFINED, NULL,
  2644. 0, private_data, private_data_len);
  2645. break;
  2646. case RDMA_TRANSPORT_IWARP:
  2647. ret = iw_cm_reject(id_priv->cm_id.iw,
  2648. private_data, private_data_len);
  2649. break;
  2650. default:
  2651. ret = -ENOSYS;
  2652. break;
  2653. }
  2654. return ret;
  2655. }
  2656. EXPORT_SYMBOL(rdma_reject);
  2657. int rdma_disconnect(struct rdma_cm_id *id)
  2658. {
  2659. struct rdma_id_private *id_priv;
  2660. int ret;
  2661. id_priv = container_of(id, struct rdma_id_private, id);
  2662. if (!id_priv->cm_id.ib)
  2663. return -EINVAL;
  2664. switch (rdma_node_get_transport(id->device->node_type)) {
  2665. case RDMA_TRANSPORT_IB:
  2666. ret = cma_modify_qp_err(id_priv);
  2667. if (ret)
  2668. goto out;
  2669. /* Initiate or respond to a disconnect. */
  2670. if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
  2671. ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
  2672. break;
  2673. case RDMA_TRANSPORT_IWARP:
  2674. ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
  2675. break;
  2676. default:
  2677. ret = -EINVAL;
  2678. break;
  2679. }
  2680. out:
  2681. return ret;
  2682. }
  2683. EXPORT_SYMBOL(rdma_disconnect);
  2684. static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
  2685. {
  2686. struct rdma_id_private *id_priv;
  2687. struct cma_multicast *mc = multicast->context;
  2688. struct rdma_cm_event event;
  2689. int ret;
  2690. id_priv = mc->id_priv;
  2691. if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
  2692. cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
  2693. return 0;
  2694. if (!status)
  2695. status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
  2696. mutex_lock(&id_priv->qp_mutex);
  2697. if (!status && id_priv->id.qp)
  2698. status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
  2699. be16_to_cpu(multicast->rec.mlid));
  2700. mutex_unlock(&id_priv->qp_mutex);
  2701. memset(&event, 0, sizeof event);
  2702. event.status = status;
  2703. event.param.ud.private_data = mc->context;
  2704. if (!status) {
  2705. event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
  2706. ib_init_ah_from_mcmember(id_priv->id.device,
  2707. id_priv->id.port_num, &multicast->rec,
  2708. &event.param.ud.ah_attr);
  2709. event.param.ud.qp_num = 0xFFFFFF;
  2710. event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
  2711. } else
  2712. event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
  2713. ret = id_priv->id.event_handler(&id_priv->id, &event);
  2714. if (ret) {
  2715. cma_exch(id_priv, RDMA_CM_DESTROYING);
  2716. mutex_unlock(&id_priv->handler_mutex);
  2717. rdma_destroy_id(&id_priv->id);
  2718. return 0;
  2719. }
  2720. mutex_unlock(&id_priv->handler_mutex);
  2721. return 0;
  2722. }
  2723. static void cma_set_mgid(struct rdma_id_private *id_priv,
  2724. struct sockaddr *addr, union ib_gid *mgid)
  2725. {
  2726. unsigned char mc_map[MAX_ADDR_LEN];
  2727. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  2728. struct sockaddr_in *sin = (struct sockaddr_in *) addr;
  2729. struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr;
  2730. if (cma_any_addr(addr)) {
  2731. memset(mgid, 0, sizeof *mgid);
  2732. } else if ((addr->sa_family == AF_INET6) &&
  2733. ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) ==
  2734. 0xFF10A01B)) {
  2735. /* IPv6 address is an SA assigned MGID. */
  2736. memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
  2737. } else if (addr->sa_family == AF_IB) {
  2738. memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid);
  2739. } else if ((addr->sa_family == AF_INET6)) {
  2740. ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map);
  2741. if (id_priv->id.ps == RDMA_PS_UDP)
  2742. mc_map[7] = 0x01; /* Use RDMA CM signature */
  2743. *mgid = *(union ib_gid *) (mc_map + 4);
  2744. } else {
  2745. ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
  2746. if (id_priv->id.ps == RDMA_PS_UDP)
  2747. mc_map[7] = 0x01; /* Use RDMA CM signature */
  2748. *mgid = *(union ib_gid *) (mc_map + 4);
  2749. }
  2750. }
  2751. static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
  2752. struct cma_multicast *mc)
  2753. {
  2754. struct ib_sa_mcmember_rec rec;
  2755. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  2756. ib_sa_comp_mask comp_mask;
  2757. int ret;
  2758. ib_addr_get_mgid(dev_addr, &rec.mgid);
  2759. ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
  2760. &rec.mgid, &rec);
  2761. if (ret)
  2762. return ret;
  2763. ret = cma_set_qkey(id_priv, 0);
  2764. if (ret)
  2765. return ret;
  2766. cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
  2767. rec.qkey = cpu_to_be32(id_priv->qkey);
  2768. rdma_addr_get_sgid(dev_addr, &rec.port_gid);
  2769. rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
  2770. rec.join_state = 1;
  2771. comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
  2772. IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
  2773. IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
  2774. IB_SA_MCMEMBER_REC_FLOW_LABEL |
  2775. IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
  2776. if (id_priv->id.ps == RDMA_PS_IPOIB)
  2777. comp_mask |= IB_SA_MCMEMBER_REC_RATE |
  2778. IB_SA_MCMEMBER_REC_RATE_SELECTOR |
  2779. IB_SA_MCMEMBER_REC_MTU_SELECTOR |
  2780. IB_SA_MCMEMBER_REC_MTU |
  2781. IB_SA_MCMEMBER_REC_HOP_LIMIT;
  2782. mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
  2783. id_priv->id.port_num, &rec,
  2784. comp_mask, GFP_KERNEL,
  2785. cma_ib_mc_handler, mc);
  2786. return PTR_ERR_OR_ZERO(mc->multicast.ib);
  2787. }
  2788. static void iboe_mcast_work_handler(struct work_struct *work)
  2789. {
  2790. struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work);
  2791. struct cma_multicast *mc = mw->mc;
  2792. struct ib_sa_multicast *m = mc->multicast.ib;
  2793. mc->multicast.ib->context = mc;
  2794. cma_ib_mc_handler(0, m);
  2795. kref_put(&mc->mcref, release_mc);
  2796. kfree(mw);
  2797. }
  2798. static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid)
  2799. {
  2800. struct sockaddr_in *sin = (struct sockaddr_in *)addr;
  2801. struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
  2802. if (cma_any_addr(addr)) {
  2803. memset(mgid, 0, sizeof *mgid);
  2804. } else if (addr->sa_family == AF_INET6) {
  2805. memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
  2806. } else {
  2807. mgid->raw[0] = 0xff;
  2808. mgid->raw[1] = 0x0e;
  2809. mgid->raw[2] = 0;
  2810. mgid->raw[3] = 0;
  2811. mgid->raw[4] = 0;
  2812. mgid->raw[5] = 0;
  2813. mgid->raw[6] = 0;
  2814. mgid->raw[7] = 0;
  2815. mgid->raw[8] = 0;
  2816. mgid->raw[9] = 0;
  2817. mgid->raw[10] = 0xff;
  2818. mgid->raw[11] = 0xff;
  2819. *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr;
  2820. }
  2821. }
  2822. static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
  2823. struct cma_multicast *mc)
  2824. {
  2825. struct iboe_mcast_work *work;
  2826. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  2827. int err;
  2828. struct sockaddr *addr = (struct sockaddr *)&mc->addr;
  2829. struct net_device *ndev = NULL;
  2830. if (cma_zero_addr((struct sockaddr *)&mc->addr))
  2831. return -EINVAL;
  2832. work = kzalloc(sizeof *work, GFP_KERNEL);
  2833. if (!work)
  2834. return -ENOMEM;
  2835. mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL);
  2836. if (!mc->multicast.ib) {
  2837. err = -ENOMEM;
  2838. goto out1;
  2839. }
  2840. cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid);
  2841. mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff);
  2842. if (id_priv->id.ps == RDMA_PS_UDP)
  2843. mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
  2844. if (dev_addr->bound_dev_if)
  2845. ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
  2846. if (!ndev) {
  2847. err = -ENODEV;
  2848. goto out2;
  2849. }
  2850. mc->multicast.ib->rec.rate = iboe_get_rate(ndev);
  2851. mc->multicast.ib->rec.hop_limit = 1;
  2852. mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu);
  2853. dev_put(ndev);
  2854. if (!mc->multicast.ib->rec.mtu) {
  2855. err = -EINVAL;
  2856. goto out2;
  2857. }
  2858. rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
  2859. &mc->multicast.ib->rec.port_gid);
  2860. work->id = id_priv;
  2861. work->mc = mc;
  2862. INIT_WORK(&work->work, iboe_mcast_work_handler);
  2863. kref_get(&mc->mcref);
  2864. queue_work(cma_wq, &work->work);
  2865. return 0;
  2866. out2:
  2867. kfree(mc->multicast.ib);
  2868. out1:
  2869. kfree(work);
  2870. return err;
  2871. }
  2872. int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
  2873. void *context)
  2874. {
  2875. struct rdma_id_private *id_priv;
  2876. struct cma_multicast *mc;
  2877. int ret;
  2878. id_priv = container_of(id, struct rdma_id_private, id);
  2879. if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
  2880. !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
  2881. return -EINVAL;
  2882. mc = kmalloc(sizeof *mc, GFP_KERNEL);
  2883. if (!mc)
  2884. return -ENOMEM;
  2885. memcpy(&mc->addr, addr, rdma_addr_size(addr));
  2886. mc->context = context;
  2887. mc->id_priv = id_priv;
  2888. spin_lock(&id_priv->lock);
  2889. list_add(&mc->list, &id_priv->mc_list);
  2890. spin_unlock(&id_priv->lock);
  2891. switch (rdma_node_get_transport(id->device->node_type)) {
  2892. case RDMA_TRANSPORT_IB:
  2893. switch (rdma_port_get_link_layer(id->device, id->port_num)) {
  2894. case IB_LINK_LAYER_INFINIBAND:
  2895. ret = cma_join_ib_multicast(id_priv, mc);
  2896. break;
  2897. case IB_LINK_LAYER_ETHERNET:
  2898. kref_init(&mc->mcref);
  2899. ret = cma_iboe_join_multicast(id_priv, mc);
  2900. break;
  2901. default:
  2902. ret = -EINVAL;
  2903. }
  2904. break;
  2905. default:
  2906. ret = -ENOSYS;
  2907. break;
  2908. }
  2909. if (ret) {
  2910. spin_lock_irq(&id_priv->lock);
  2911. list_del(&mc->list);
  2912. spin_unlock_irq(&id_priv->lock);
  2913. kfree(mc);
  2914. }
  2915. return ret;
  2916. }
  2917. EXPORT_SYMBOL(rdma_join_multicast);
  2918. void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
  2919. {
  2920. struct rdma_id_private *id_priv;
  2921. struct cma_multicast *mc;
  2922. id_priv = container_of(id, struct rdma_id_private, id);
  2923. spin_lock_irq(&id_priv->lock);
  2924. list_for_each_entry(mc, &id_priv->mc_list, list) {
  2925. if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) {
  2926. list_del(&mc->list);
  2927. spin_unlock_irq(&id_priv->lock);
  2928. if (id->qp)
  2929. ib_detach_mcast(id->qp,
  2930. &mc->multicast.ib->rec.mgid,
  2931. be16_to_cpu(mc->multicast.ib->rec.mlid));
  2932. if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
  2933. switch (rdma_port_get_link_layer(id->device, id->port_num)) {
  2934. case IB_LINK_LAYER_INFINIBAND:
  2935. ib_sa_free_multicast(mc->multicast.ib);
  2936. kfree(mc);
  2937. break;
  2938. case IB_LINK_LAYER_ETHERNET:
  2939. kref_put(&mc->mcref, release_mc);
  2940. break;
  2941. default:
  2942. break;
  2943. }
  2944. }
  2945. return;
  2946. }
  2947. }
  2948. spin_unlock_irq(&id_priv->lock);
  2949. }
  2950. EXPORT_SYMBOL(rdma_leave_multicast);
  2951. static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
  2952. {
  2953. struct rdma_dev_addr *dev_addr;
  2954. struct cma_ndev_work *work;
  2955. dev_addr = &id_priv->id.route.addr.dev_addr;
  2956. if ((dev_addr->bound_dev_if == ndev->ifindex) &&
  2957. memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
  2958. printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
  2959. ndev->name, &id_priv->id);
  2960. work = kzalloc(sizeof *work, GFP_KERNEL);
  2961. if (!work)
  2962. return -ENOMEM;
  2963. INIT_WORK(&work->work, cma_ndev_work_handler);
  2964. work->id = id_priv;
  2965. work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
  2966. atomic_inc(&id_priv->refcount);
  2967. queue_work(cma_wq, &work->work);
  2968. }
  2969. return 0;
  2970. }
  2971. static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
  2972. void *ptr)
  2973. {
  2974. struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
  2975. struct cma_device *cma_dev;
  2976. struct rdma_id_private *id_priv;
  2977. int ret = NOTIFY_DONE;
  2978. if (dev_net(ndev) != &init_net)
  2979. return NOTIFY_DONE;
  2980. if (event != NETDEV_BONDING_FAILOVER)
  2981. return NOTIFY_DONE;
  2982. if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING))
  2983. return NOTIFY_DONE;
  2984. mutex_lock(&lock);
  2985. list_for_each_entry(cma_dev, &dev_list, list)
  2986. list_for_each_entry(id_priv, &cma_dev->id_list, list) {
  2987. ret = cma_netdev_change(ndev, id_priv);
  2988. if (ret)
  2989. goto out;
  2990. }
  2991. out:
  2992. mutex_unlock(&lock);
  2993. return ret;
  2994. }
  2995. static struct notifier_block cma_nb = {
  2996. .notifier_call = cma_netdev_callback
  2997. };
  2998. static void cma_add_one(struct ib_device *device)
  2999. {
  3000. struct cma_device *cma_dev;
  3001. struct rdma_id_private *id_priv;
  3002. cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
  3003. if (!cma_dev)
  3004. return;
  3005. cma_dev->device = device;
  3006. init_completion(&cma_dev->comp);
  3007. atomic_set(&cma_dev->refcount, 1);
  3008. INIT_LIST_HEAD(&cma_dev->id_list);
  3009. ib_set_client_data(device, &cma_client, cma_dev);
  3010. mutex_lock(&lock);
  3011. list_add_tail(&cma_dev->list, &dev_list);
  3012. list_for_each_entry(id_priv, &listen_any_list, list)
  3013. cma_listen_on_dev(id_priv, cma_dev);
  3014. mutex_unlock(&lock);
  3015. }
  3016. static int cma_remove_id_dev(struct rdma_id_private *id_priv)
  3017. {
  3018. struct rdma_cm_event event;
  3019. enum rdma_cm_state state;
  3020. int ret = 0;
  3021. /* Record that we want to remove the device */
  3022. state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL);
  3023. if (state == RDMA_CM_DESTROYING)
  3024. return 0;
  3025. cma_cancel_operation(id_priv, state);
  3026. mutex_lock(&id_priv->handler_mutex);
  3027. /* Check for destruction from another callback. */
  3028. if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL))
  3029. goto out;
  3030. memset(&event, 0, sizeof event);
  3031. event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
  3032. ret = id_priv->id.event_handler(&id_priv->id, &event);
  3033. out:
  3034. mutex_unlock(&id_priv->handler_mutex);
  3035. return ret;
  3036. }
  3037. static void cma_process_remove(struct cma_device *cma_dev)
  3038. {
  3039. struct rdma_id_private *id_priv;
  3040. int ret;
  3041. mutex_lock(&lock);
  3042. while (!list_empty(&cma_dev->id_list)) {
  3043. id_priv = list_entry(cma_dev->id_list.next,
  3044. struct rdma_id_private, list);
  3045. list_del(&id_priv->listen_list);
  3046. list_del_init(&id_priv->list);
  3047. atomic_inc(&id_priv->refcount);
  3048. mutex_unlock(&lock);
  3049. ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv);
  3050. cma_deref_id(id_priv);
  3051. if (ret)
  3052. rdma_destroy_id(&id_priv->id);
  3053. mutex_lock(&lock);
  3054. }
  3055. mutex_unlock(&lock);
  3056. cma_deref_dev(cma_dev);
  3057. wait_for_completion(&cma_dev->comp);
  3058. }
  3059. static void cma_remove_one(struct ib_device *device)
  3060. {
  3061. struct cma_device *cma_dev;
  3062. cma_dev = ib_get_client_data(device, &cma_client);
  3063. if (!cma_dev)
  3064. return;
  3065. mutex_lock(&lock);
  3066. list_del(&cma_dev->list);
  3067. mutex_unlock(&lock);
  3068. cma_process_remove(cma_dev);
  3069. kfree(cma_dev);
  3070. }
  3071. static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
  3072. {
  3073. struct nlmsghdr *nlh;
  3074. struct rdma_cm_id_stats *id_stats;
  3075. struct rdma_id_private *id_priv;
  3076. struct rdma_cm_id *id = NULL;
  3077. struct cma_device *cma_dev;
  3078. int i_dev = 0, i_id = 0;
  3079. /*
  3080. * We export all of the IDs as a sequence of messages. Each
  3081. * ID gets its own netlink message.
  3082. */
  3083. mutex_lock(&lock);
  3084. list_for_each_entry(cma_dev, &dev_list, list) {
  3085. if (i_dev < cb->args[0]) {
  3086. i_dev++;
  3087. continue;
  3088. }
  3089. i_id = 0;
  3090. list_for_each_entry(id_priv, &cma_dev->id_list, list) {
  3091. if (i_id < cb->args[1]) {
  3092. i_id++;
  3093. continue;
  3094. }
  3095. id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq,
  3096. sizeof *id_stats, RDMA_NL_RDMA_CM,
  3097. RDMA_NL_RDMA_CM_ID_STATS);
  3098. if (!id_stats)
  3099. goto out;
  3100. memset(id_stats, 0, sizeof *id_stats);
  3101. id = &id_priv->id;
  3102. id_stats->node_type = id->route.addr.dev_addr.dev_type;
  3103. id_stats->port_num = id->port_num;
  3104. id_stats->bound_dev_if =
  3105. id->route.addr.dev_addr.bound_dev_if;
  3106. if (ibnl_put_attr(skb, nlh,
  3107. rdma_addr_size(cma_src_addr(id_priv)),
  3108. cma_src_addr(id_priv),
  3109. RDMA_NL_RDMA_CM_ATTR_SRC_ADDR))
  3110. goto out;
  3111. if (ibnl_put_attr(skb, nlh,
  3112. rdma_addr_size(cma_src_addr(id_priv)),
  3113. cma_dst_addr(id_priv),
  3114. RDMA_NL_RDMA_CM_ATTR_DST_ADDR))
  3115. goto out;
  3116. id_stats->pid = id_priv->owner;
  3117. id_stats->port_space = id->ps;
  3118. id_stats->cm_state = id_priv->state;
  3119. id_stats->qp_num = id_priv->qp_num;
  3120. id_stats->qp_type = id->qp_type;
  3121. i_id++;
  3122. }
  3123. cb->args[1] = 0;
  3124. i_dev++;
  3125. }
  3126. out:
  3127. mutex_unlock(&lock);
  3128. cb->args[0] = i_dev;
  3129. cb->args[1] = i_id;
  3130. return skb->len;
  3131. }
  3132. static const struct ibnl_client_cbs cma_cb_table[] = {
  3133. [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats,
  3134. .module = THIS_MODULE },
  3135. };
  3136. static int __init cma_init(void)
  3137. {
  3138. int ret;
  3139. cma_wq = create_singlethread_workqueue("rdma_cm");
  3140. if (!cma_wq)
  3141. return -ENOMEM;
  3142. ib_sa_register_client(&sa_client);
  3143. rdma_addr_register_client(&addr_client);
  3144. register_netdevice_notifier(&cma_nb);
  3145. ret = ib_register_client(&cma_client);
  3146. if (ret)
  3147. goto err;
  3148. if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
  3149. printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n");
  3150. return 0;
  3151. err:
  3152. unregister_netdevice_notifier(&cma_nb);
  3153. rdma_addr_unregister_client(&addr_client);
  3154. ib_sa_unregister_client(&sa_client);
  3155. destroy_workqueue(cma_wq);
  3156. return ret;
  3157. }
  3158. static void __exit cma_cleanup(void)
  3159. {
  3160. ibnl_remove_client(RDMA_NL_RDMA_CM);
  3161. ib_unregister_client(&cma_client);
  3162. unregister_netdevice_notifier(&cma_nb);
  3163. rdma_addr_unregister_client(&addr_client);
  3164. ib_sa_unregister_client(&sa_client);
  3165. destroy_workqueue(cma_wq);
  3166. idr_destroy(&tcp_ps);
  3167. idr_destroy(&udp_ps);
  3168. idr_destroy(&ipoib_ps);
  3169. idr_destroy(&ib_ps);
  3170. }
  3171. module_init(cma_init);
  3172. module_exit(cma_cleanup);