uverbs_cmd.c 84 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520
  1. /*
  2. * Copyright (c) 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
  4. * Copyright (c) 2005 PathScale, Inc. All rights reserved.
  5. * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. */
  35. #include <linux/file.h>
  36. #include <linux/fs.h>
  37. #include <linux/slab.h>
  38. #include <linux/sched.h>
  39. #include <asm/uaccess.h>
  40. #include "uverbs.h"
  41. #include "core_priv.h"
  42. struct uverbs_lock_class {
  43. struct lock_class_key key;
  44. char name[16];
  45. };
  46. static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" };
  47. static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" };
  48. static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" };
  49. static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" };
  50. static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
  51. static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
  52. static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
  53. static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
  54. static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
  55. /*
  56. * The ib_uobject locking scheme is as follows:
  57. *
  58. * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
  59. * needs to be held during all idr operations. When an object is
  60. * looked up, a reference must be taken on the object's kref before
  61. * dropping this lock.
  62. *
  63. * - Each object also has an rwsem. This rwsem must be held for
  64. * reading while an operation that uses the object is performed.
  65. * For example, while registering an MR, the associated PD's
  66. * uobject.mutex must be held for reading. The rwsem must be held
  67. * for writing while initializing or destroying an object.
  68. *
  69. * - In addition, each object has a "live" flag. If this flag is not
  70. * set, then lookups of the object will fail even if it is found in
  71. * the idr. This handles a reader that blocks and does not acquire
  72. * the rwsem until after the object is destroyed. The destroy
  73. * operation will set the live flag to 0 and then drop the rwsem;
  74. * this will allow the reader to acquire the rwsem, see that the
  75. * live flag is 0, and then drop the rwsem and its reference to
  76. * object. The underlying storage will not be freed until the last
  77. * reference to the object is dropped.
  78. */
  79. static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
  80. struct ib_ucontext *context, struct uverbs_lock_class *c)
  81. {
  82. uobj->user_handle = user_handle;
  83. uobj->context = context;
  84. kref_init(&uobj->ref);
  85. init_rwsem(&uobj->mutex);
  86. lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name);
  87. uobj->live = 0;
  88. }
  89. static void release_uobj(struct kref *kref)
  90. {
  91. kfree(container_of(kref, struct ib_uobject, ref));
  92. }
  93. static void put_uobj(struct ib_uobject *uobj)
  94. {
  95. kref_put(&uobj->ref, release_uobj);
  96. }
  97. static void put_uobj_read(struct ib_uobject *uobj)
  98. {
  99. up_read(&uobj->mutex);
  100. put_uobj(uobj);
  101. }
  102. static void put_uobj_write(struct ib_uobject *uobj)
  103. {
  104. up_write(&uobj->mutex);
  105. put_uobj(uobj);
  106. }
  107. static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
  108. {
  109. int ret;
  110. idr_preload(GFP_KERNEL);
  111. spin_lock(&ib_uverbs_idr_lock);
  112. ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
  113. if (ret >= 0)
  114. uobj->id = ret;
  115. spin_unlock(&ib_uverbs_idr_lock);
  116. idr_preload_end();
  117. return ret < 0 ? ret : 0;
  118. }
  119. void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
  120. {
  121. spin_lock(&ib_uverbs_idr_lock);
  122. idr_remove(idr, uobj->id);
  123. spin_unlock(&ib_uverbs_idr_lock);
  124. }
  125. static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
  126. struct ib_ucontext *context)
  127. {
  128. struct ib_uobject *uobj;
  129. spin_lock(&ib_uverbs_idr_lock);
  130. uobj = idr_find(idr, id);
  131. if (uobj) {
  132. if (uobj->context == context)
  133. kref_get(&uobj->ref);
  134. else
  135. uobj = NULL;
  136. }
  137. spin_unlock(&ib_uverbs_idr_lock);
  138. return uobj;
  139. }
  140. static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
  141. struct ib_ucontext *context, int nested)
  142. {
  143. struct ib_uobject *uobj;
  144. uobj = __idr_get_uobj(idr, id, context);
  145. if (!uobj)
  146. return NULL;
  147. if (nested)
  148. down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
  149. else
  150. down_read(&uobj->mutex);
  151. if (!uobj->live) {
  152. put_uobj_read(uobj);
  153. return NULL;
  154. }
  155. return uobj;
  156. }
  157. static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
  158. struct ib_ucontext *context)
  159. {
  160. struct ib_uobject *uobj;
  161. uobj = __idr_get_uobj(idr, id, context);
  162. if (!uobj)
  163. return NULL;
  164. down_write(&uobj->mutex);
  165. if (!uobj->live) {
  166. put_uobj_write(uobj);
  167. return NULL;
  168. }
  169. return uobj;
  170. }
  171. static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
  172. int nested)
  173. {
  174. struct ib_uobject *uobj;
  175. uobj = idr_read_uobj(idr, id, context, nested);
  176. return uobj ? uobj->object : NULL;
  177. }
  178. static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
  179. {
  180. return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
  181. }
  182. static void put_pd_read(struct ib_pd *pd)
  183. {
  184. put_uobj_read(pd->uobject);
  185. }
  186. static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
  187. {
  188. return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
  189. }
  190. static void put_cq_read(struct ib_cq *cq)
  191. {
  192. put_uobj_read(cq->uobject);
  193. }
  194. static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
  195. {
  196. return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
  197. }
  198. static void put_ah_read(struct ib_ah *ah)
  199. {
  200. put_uobj_read(ah->uobject);
  201. }
  202. static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
  203. {
  204. return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
  205. }
  206. static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
  207. {
  208. struct ib_uobject *uobj;
  209. uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
  210. return uobj ? uobj->object : NULL;
  211. }
  212. static void put_qp_read(struct ib_qp *qp)
  213. {
  214. put_uobj_read(qp->uobject);
  215. }
  216. static void put_qp_write(struct ib_qp *qp)
  217. {
  218. put_uobj_write(qp->uobject);
  219. }
  220. static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
  221. {
  222. return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
  223. }
  224. static void put_srq_read(struct ib_srq *srq)
  225. {
  226. put_uobj_read(srq->uobject);
  227. }
  228. static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context,
  229. struct ib_uobject **uobj)
  230. {
  231. *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0);
  232. return *uobj ? (*uobj)->object : NULL;
  233. }
  234. static void put_xrcd_read(struct ib_uobject *uobj)
  235. {
  236. put_uobj_read(uobj);
  237. }
  238. ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
  239. struct ib_device *ib_dev,
  240. const char __user *buf,
  241. int in_len, int out_len)
  242. {
  243. struct ib_uverbs_get_context cmd;
  244. struct ib_uverbs_get_context_resp resp;
  245. struct ib_udata udata;
  246. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  247. struct ib_device_attr dev_attr;
  248. #endif
  249. struct ib_ucontext *ucontext;
  250. struct file *filp;
  251. int ret;
  252. if (out_len < sizeof resp)
  253. return -ENOSPC;
  254. if (copy_from_user(&cmd, buf, sizeof cmd))
  255. return -EFAULT;
  256. mutex_lock(&file->mutex);
  257. if (file->ucontext) {
  258. ret = -EINVAL;
  259. goto err;
  260. }
  261. INIT_UDATA(&udata, buf + sizeof cmd,
  262. (unsigned long) cmd.response + sizeof resp,
  263. in_len - sizeof cmd, out_len - sizeof resp);
  264. ucontext = ib_dev->alloc_ucontext(ib_dev, &udata);
  265. if (IS_ERR(ucontext)) {
  266. ret = PTR_ERR(ucontext);
  267. goto err;
  268. }
  269. ucontext->device = ib_dev;
  270. INIT_LIST_HEAD(&ucontext->pd_list);
  271. INIT_LIST_HEAD(&ucontext->mr_list);
  272. INIT_LIST_HEAD(&ucontext->mw_list);
  273. INIT_LIST_HEAD(&ucontext->cq_list);
  274. INIT_LIST_HEAD(&ucontext->qp_list);
  275. INIT_LIST_HEAD(&ucontext->srq_list);
  276. INIT_LIST_HEAD(&ucontext->ah_list);
  277. INIT_LIST_HEAD(&ucontext->xrcd_list);
  278. INIT_LIST_HEAD(&ucontext->rule_list);
  279. rcu_read_lock();
  280. ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
  281. rcu_read_unlock();
  282. ucontext->closing = 0;
  283. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  284. ucontext->umem_tree = RB_ROOT;
  285. init_rwsem(&ucontext->umem_rwsem);
  286. ucontext->odp_mrs_count = 0;
  287. INIT_LIST_HEAD(&ucontext->no_private_counters);
  288. ret = ib_query_device(ib_dev, &dev_attr);
  289. if (ret)
  290. goto err_free;
  291. if (!(dev_attr.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
  292. ucontext->invalidate_range = NULL;
  293. #endif
  294. resp.num_comp_vectors = file->device->num_comp_vectors;
  295. ret = get_unused_fd_flags(O_CLOEXEC);
  296. if (ret < 0)
  297. goto err_free;
  298. resp.async_fd = ret;
  299. filp = ib_uverbs_alloc_event_file(file, ib_dev, 1);
  300. if (IS_ERR(filp)) {
  301. ret = PTR_ERR(filp);
  302. goto err_fd;
  303. }
  304. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  305. &resp, sizeof resp)) {
  306. ret = -EFAULT;
  307. goto err_file;
  308. }
  309. file->ucontext = ucontext;
  310. fd_install(resp.async_fd, filp);
  311. mutex_unlock(&file->mutex);
  312. return in_len;
  313. err_file:
  314. ib_uverbs_free_async_event_file(file);
  315. fput(filp);
  316. err_fd:
  317. put_unused_fd(resp.async_fd);
  318. err_free:
  319. put_pid(ucontext->tgid);
  320. ib_dev->dealloc_ucontext(ucontext);
  321. err:
  322. mutex_unlock(&file->mutex);
  323. return ret;
  324. }
  325. static void copy_query_dev_fields(struct ib_uverbs_file *file,
  326. struct ib_device *ib_dev,
  327. struct ib_uverbs_query_device_resp *resp,
  328. struct ib_device_attr *attr)
  329. {
  330. resp->fw_ver = attr->fw_ver;
  331. resp->node_guid = ib_dev->node_guid;
  332. resp->sys_image_guid = attr->sys_image_guid;
  333. resp->max_mr_size = attr->max_mr_size;
  334. resp->page_size_cap = attr->page_size_cap;
  335. resp->vendor_id = attr->vendor_id;
  336. resp->vendor_part_id = attr->vendor_part_id;
  337. resp->hw_ver = attr->hw_ver;
  338. resp->max_qp = attr->max_qp;
  339. resp->max_qp_wr = attr->max_qp_wr;
  340. resp->device_cap_flags = attr->device_cap_flags;
  341. resp->max_sge = attr->max_sge;
  342. resp->max_sge_rd = attr->max_sge_rd;
  343. resp->max_cq = attr->max_cq;
  344. resp->max_cqe = attr->max_cqe;
  345. resp->max_mr = attr->max_mr;
  346. resp->max_pd = attr->max_pd;
  347. resp->max_qp_rd_atom = attr->max_qp_rd_atom;
  348. resp->max_ee_rd_atom = attr->max_ee_rd_atom;
  349. resp->max_res_rd_atom = attr->max_res_rd_atom;
  350. resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
  351. resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
  352. resp->atomic_cap = attr->atomic_cap;
  353. resp->max_ee = attr->max_ee;
  354. resp->max_rdd = attr->max_rdd;
  355. resp->max_mw = attr->max_mw;
  356. resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
  357. resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
  358. resp->max_mcast_grp = attr->max_mcast_grp;
  359. resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
  360. resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
  361. resp->max_ah = attr->max_ah;
  362. resp->max_fmr = attr->max_fmr;
  363. resp->max_map_per_fmr = attr->max_map_per_fmr;
  364. resp->max_srq = attr->max_srq;
  365. resp->max_srq_wr = attr->max_srq_wr;
  366. resp->max_srq_sge = attr->max_srq_sge;
  367. resp->max_pkeys = attr->max_pkeys;
  368. resp->local_ca_ack_delay = attr->local_ca_ack_delay;
  369. resp->phys_port_cnt = ib_dev->phys_port_cnt;
  370. }
  371. ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
  372. struct ib_device *ib_dev,
  373. const char __user *buf,
  374. int in_len, int out_len)
  375. {
  376. struct ib_uverbs_query_device cmd;
  377. struct ib_uverbs_query_device_resp resp;
  378. struct ib_device_attr attr;
  379. int ret;
  380. if (out_len < sizeof resp)
  381. return -ENOSPC;
  382. if (copy_from_user(&cmd, buf, sizeof cmd))
  383. return -EFAULT;
  384. ret = ib_query_device(ib_dev, &attr);
  385. if (ret)
  386. return ret;
  387. memset(&resp, 0, sizeof resp);
  388. copy_query_dev_fields(file, ib_dev, &resp, &attr);
  389. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  390. &resp, sizeof resp))
  391. return -EFAULT;
  392. return in_len;
  393. }
  394. ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
  395. struct ib_device *ib_dev,
  396. const char __user *buf,
  397. int in_len, int out_len)
  398. {
  399. struct ib_uverbs_query_port cmd;
  400. struct ib_uverbs_query_port_resp resp;
  401. struct ib_port_attr attr;
  402. int ret;
  403. if (out_len < sizeof resp)
  404. return -ENOSPC;
  405. if (copy_from_user(&cmd, buf, sizeof cmd))
  406. return -EFAULT;
  407. ret = ib_query_port(ib_dev, cmd.port_num, &attr);
  408. if (ret)
  409. return ret;
  410. memset(&resp, 0, sizeof resp);
  411. resp.state = attr.state;
  412. resp.max_mtu = attr.max_mtu;
  413. resp.active_mtu = attr.active_mtu;
  414. resp.gid_tbl_len = attr.gid_tbl_len;
  415. resp.port_cap_flags = attr.port_cap_flags;
  416. resp.max_msg_sz = attr.max_msg_sz;
  417. resp.bad_pkey_cntr = attr.bad_pkey_cntr;
  418. resp.qkey_viol_cntr = attr.qkey_viol_cntr;
  419. resp.pkey_tbl_len = attr.pkey_tbl_len;
  420. resp.lid = attr.lid;
  421. resp.sm_lid = attr.sm_lid;
  422. resp.lmc = attr.lmc;
  423. resp.max_vl_num = attr.max_vl_num;
  424. resp.sm_sl = attr.sm_sl;
  425. resp.subnet_timeout = attr.subnet_timeout;
  426. resp.init_type_reply = attr.init_type_reply;
  427. resp.active_width = attr.active_width;
  428. resp.active_speed = attr.active_speed;
  429. resp.phys_state = attr.phys_state;
  430. resp.link_layer = rdma_port_get_link_layer(ib_dev,
  431. cmd.port_num);
  432. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  433. &resp, sizeof resp))
  434. return -EFAULT;
  435. return in_len;
  436. }
  437. ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
  438. struct ib_device *ib_dev,
  439. const char __user *buf,
  440. int in_len, int out_len)
  441. {
  442. struct ib_uverbs_alloc_pd cmd;
  443. struct ib_uverbs_alloc_pd_resp resp;
  444. struct ib_udata udata;
  445. struct ib_uobject *uobj;
  446. struct ib_pd *pd;
  447. int ret;
  448. if (out_len < sizeof resp)
  449. return -ENOSPC;
  450. if (copy_from_user(&cmd, buf, sizeof cmd))
  451. return -EFAULT;
  452. INIT_UDATA(&udata, buf + sizeof cmd,
  453. (unsigned long) cmd.response + sizeof resp,
  454. in_len - sizeof cmd, out_len - sizeof resp);
  455. uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
  456. if (!uobj)
  457. return -ENOMEM;
  458. init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
  459. down_write(&uobj->mutex);
  460. pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata);
  461. if (IS_ERR(pd)) {
  462. ret = PTR_ERR(pd);
  463. goto err;
  464. }
  465. pd->device = ib_dev;
  466. pd->uobject = uobj;
  467. pd->local_mr = NULL;
  468. atomic_set(&pd->usecnt, 0);
  469. uobj->object = pd;
  470. ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
  471. if (ret)
  472. goto err_idr;
  473. memset(&resp, 0, sizeof resp);
  474. resp.pd_handle = uobj->id;
  475. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  476. &resp, sizeof resp)) {
  477. ret = -EFAULT;
  478. goto err_copy;
  479. }
  480. mutex_lock(&file->mutex);
  481. list_add_tail(&uobj->list, &file->ucontext->pd_list);
  482. mutex_unlock(&file->mutex);
  483. uobj->live = 1;
  484. up_write(&uobj->mutex);
  485. return in_len;
  486. err_copy:
  487. idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
  488. err_idr:
  489. ib_dealloc_pd(pd);
  490. err:
  491. put_uobj_write(uobj);
  492. return ret;
  493. }
  494. ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
  495. struct ib_device *ib_dev,
  496. const char __user *buf,
  497. int in_len, int out_len)
  498. {
  499. struct ib_uverbs_dealloc_pd cmd;
  500. struct ib_uobject *uobj;
  501. struct ib_pd *pd;
  502. int ret;
  503. if (copy_from_user(&cmd, buf, sizeof cmd))
  504. return -EFAULT;
  505. uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
  506. if (!uobj)
  507. return -EINVAL;
  508. pd = uobj->object;
  509. if (atomic_read(&pd->usecnt)) {
  510. ret = -EBUSY;
  511. goto err_put;
  512. }
  513. ret = pd->device->dealloc_pd(uobj->object);
  514. WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
  515. if (ret)
  516. goto err_put;
  517. uobj->live = 0;
  518. put_uobj_write(uobj);
  519. idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
  520. mutex_lock(&file->mutex);
  521. list_del(&uobj->list);
  522. mutex_unlock(&file->mutex);
  523. put_uobj(uobj);
  524. return in_len;
  525. err_put:
  526. put_uobj_write(uobj);
  527. return ret;
  528. }
  529. struct xrcd_table_entry {
  530. struct rb_node node;
  531. struct ib_xrcd *xrcd;
  532. struct inode *inode;
  533. };
  534. static int xrcd_table_insert(struct ib_uverbs_device *dev,
  535. struct inode *inode,
  536. struct ib_xrcd *xrcd)
  537. {
  538. struct xrcd_table_entry *entry, *scan;
  539. struct rb_node **p = &dev->xrcd_tree.rb_node;
  540. struct rb_node *parent = NULL;
  541. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  542. if (!entry)
  543. return -ENOMEM;
  544. entry->xrcd = xrcd;
  545. entry->inode = inode;
  546. while (*p) {
  547. parent = *p;
  548. scan = rb_entry(parent, struct xrcd_table_entry, node);
  549. if (inode < scan->inode) {
  550. p = &(*p)->rb_left;
  551. } else if (inode > scan->inode) {
  552. p = &(*p)->rb_right;
  553. } else {
  554. kfree(entry);
  555. return -EEXIST;
  556. }
  557. }
  558. rb_link_node(&entry->node, parent, p);
  559. rb_insert_color(&entry->node, &dev->xrcd_tree);
  560. igrab(inode);
  561. return 0;
  562. }
  563. static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
  564. struct inode *inode)
  565. {
  566. struct xrcd_table_entry *entry;
  567. struct rb_node *p = dev->xrcd_tree.rb_node;
  568. while (p) {
  569. entry = rb_entry(p, struct xrcd_table_entry, node);
  570. if (inode < entry->inode)
  571. p = p->rb_left;
  572. else if (inode > entry->inode)
  573. p = p->rb_right;
  574. else
  575. return entry;
  576. }
  577. return NULL;
  578. }
  579. static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
  580. {
  581. struct xrcd_table_entry *entry;
  582. entry = xrcd_table_search(dev, inode);
  583. if (!entry)
  584. return NULL;
  585. return entry->xrcd;
  586. }
  587. static void xrcd_table_delete(struct ib_uverbs_device *dev,
  588. struct inode *inode)
  589. {
  590. struct xrcd_table_entry *entry;
  591. entry = xrcd_table_search(dev, inode);
  592. if (entry) {
  593. iput(inode);
  594. rb_erase(&entry->node, &dev->xrcd_tree);
  595. kfree(entry);
  596. }
  597. }
  598. ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
  599. struct ib_device *ib_dev,
  600. const char __user *buf, int in_len,
  601. int out_len)
  602. {
  603. struct ib_uverbs_open_xrcd cmd;
  604. struct ib_uverbs_open_xrcd_resp resp;
  605. struct ib_udata udata;
  606. struct ib_uxrcd_object *obj;
  607. struct ib_xrcd *xrcd = NULL;
  608. struct fd f = {NULL, 0};
  609. struct inode *inode = NULL;
  610. int ret = 0;
  611. int new_xrcd = 0;
  612. if (out_len < sizeof resp)
  613. return -ENOSPC;
  614. if (copy_from_user(&cmd, buf, sizeof cmd))
  615. return -EFAULT;
  616. INIT_UDATA(&udata, buf + sizeof cmd,
  617. (unsigned long) cmd.response + sizeof resp,
  618. in_len - sizeof cmd, out_len - sizeof resp);
  619. mutex_lock(&file->device->xrcd_tree_mutex);
  620. if (cmd.fd != -1) {
  621. /* search for file descriptor */
  622. f = fdget(cmd.fd);
  623. if (!f.file) {
  624. ret = -EBADF;
  625. goto err_tree_mutex_unlock;
  626. }
  627. inode = file_inode(f.file);
  628. xrcd = find_xrcd(file->device, inode);
  629. if (!xrcd && !(cmd.oflags & O_CREAT)) {
  630. /* no file descriptor. Need CREATE flag */
  631. ret = -EAGAIN;
  632. goto err_tree_mutex_unlock;
  633. }
  634. if (xrcd && cmd.oflags & O_EXCL) {
  635. ret = -EINVAL;
  636. goto err_tree_mutex_unlock;
  637. }
  638. }
  639. obj = kmalloc(sizeof *obj, GFP_KERNEL);
  640. if (!obj) {
  641. ret = -ENOMEM;
  642. goto err_tree_mutex_unlock;
  643. }
  644. init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class);
  645. down_write(&obj->uobject.mutex);
  646. if (!xrcd) {
  647. xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata);
  648. if (IS_ERR(xrcd)) {
  649. ret = PTR_ERR(xrcd);
  650. goto err;
  651. }
  652. xrcd->inode = inode;
  653. xrcd->device = ib_dev;
  654. atomic_set(&xrcd->usecnt, 0);
  655. mutex_init(&xrcd->tgt_qp_mutex);
  656. INIT_LIST_HEAD(&xrcd->tgt_qp_list);
  657. new_xrcd = 1;
  658. }
  659. atomic_set(&obj->refcnt, 0);
  660. obj->uobject.object = xrcd;
  661. ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
  662. if (ret)
  663. goto err_idr;
  664. memset(&resp, 0, sizeof resp);
  665. resp.xrcd_handle = obj->uobject.id;
  666. if (inode) {
  667. if (new_xrcd) {
  668. /* create new inode/xrcd table entry */
  669. ret = xrcd_table_insert(file->device, inode, xrcd);
  670. if (ret)
  671. goto err_insert_xrcd;
  672. }
  673. atomic_inc(&xrcd->usecnt);
  674. }
  675. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  676. &resp, sizeof resp)) {
  677. ret = -EFAULT;
  678. goto err_copy;
  679. }
  680. if (f.file)
  681. fdput(f);
  682. mutex_lock(&file->mutex);
  683. list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list);
  684. mutex_unlock(&file->mutex);
  685. obj->uobject.live = 1;
  686. up_write(&obj->uobject.mutex);
  687. mutex_unlock(&file->device->xrcd_tree_mutex);
  688. return in_len;
  689. err_copy:
  690. if (inode) {
  691. if (new_xrcd)
  692. xrcd_table_delete(file->device, inode);
  693. atomic_dec(&xrcd->usecnt);
  694. }
  695. err_insert_xrcd:
  696. idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
  697. err_idr:
  698. ib_dealloc_xrcd(xrcd);
  699. err:
  700. put_uobj_write(&obj->uobject);
  701. err_tree_mutex_unlock:
  702. if (f.file)
  703. fdput(f);
  704. mutex_unlock(&file->device->xrcd_tree_mutex);
  705. return ret;
  706. }
  707. ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
  708. struct ib_device *ib_dev,
  709. const char __user *buf, int in_len,
  710. int out_len)
  711. {
  712. struct ib_uverbs_close_xrcd cmd;
  713. struct ib_uobject *uobj;
  714. struct ib_xrcd *xrcd = NULL;
  715. struct inode *inode = NULL;
  716. struct ib_uxrcd_object *obj;
  717. int live;
  718. int ret = 0;
  719. if (copy_from_user(&cmd, buf, sizeof cmd))
  720. return -EFAULT;
  721. mutex_lock(&file->device->xrcd_tree_mutex);
  722. uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext);
  723. if (!uobj) {
  724. ret = -EINVAL;
  725. goto out;
  726. }
  727. xrcd = uobj->object;
  728. inode = xrcd->inode;
  729. obj = container_of(uobj, struct ib_uxrcd_object, uobject);
  730. if (atomic_read(&obj->refcnt)) {
  731. put_uobj_write(uobj);
  732. ret = -EBUSY;
  733. goto out;
  734. }
  735. if (!inode || atomic_dec_and_test(&xrcd->usecnt)) {
  736. ret = ib_dealloc_xrcd(uobj->object);
  737. if (!ret)
  738. uobj->live = 0;
  739. }
  740. live = uobj->live;
  741. if (inode && ret)
  742. atomic_inc(&xrcd->usecnt);
  743. put_uobj_write(uobj);
  744. if (ret)
  745. goto out;
  746. if (inode && !live)
  747. xrcd_table_delete(file->device, inode);
  748. idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
  749. mutex_lock(&file->mutex);
  750. list_del(&uobj->list);
  751. mutex_unlock(&file->mutex);
  752. put_uobj(uobj);
  753. ret = in_len;
  754. out:
  755. mutex_unlock(&file->device->xrcd_tree_mutex);
  756. return ret;
  757. }
  758. void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
  759. struct ib_xrcd *xrcd)
  760. {
  761. struct inode *inode;
  762. inode = xrcd->inode;
  763. if (inode && !atomic_dec_and_test(&xrcd->usecnt))
  764. return;
  765. ib_dealloc_xrcd(xrcd);
  766. if (inode)
  767. xrcd_table_delete(dev, inode);
  768. }
  769. ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
  770. struct ib_device *ib_dev,
  771. const char __user *buf, int in_len,
  772. int out_len)
  773. {
  774. struct ib_uverbs_reg_mr cmd;
  775. struct ib_uverbs_reg_mr_resp resp;
  776. struct ib_udata udata;
  777. struct ib_uobject *uobj;
  778. struct ib_pd *pd;
  779. struct ib_mr *mr;
  780. int ret;
  781. if (out_len < sizeof resp)
  782. return -ENOSPC;
  783. if (copy_from_user(&cmd, buf, sizeof cmd))
  784. return -EFAULT;
  785. INIT_UDATA(&udata, buf + sizeof cmd,
  786. (unsigned long) cmd.response + sizeof resp,
  787. in_len - sizeof cmd, out_len - sizeof resp);
  788. if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
  789. return -EINVAL;
  790. ret = ib_check_mr_access(cmd.access_flags);
  791. if (ret)
  792. return ret;
  793. uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
  794. if (!uobj)
  795. return -ENOMEM;
  796. init_uobj(uobj, 0, file->ucontext, &mr_lock_class);
  797. down_write(&uobj->mutex);
  798. pd = idr_read_pd(cmd.pd_handle, file->ucontext);
  799. if (!pd) {
  800. ret = -EINVAL;
  801. goto err_free;
  802. }
  803. if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
  804. struct ib_device_attr attr;
  805. ret = ib_query_device(pd->device, &attr);
  806. if (ret || !(attr.device_cap_flags &
  807. IB_DEVICE_ON_DEMAND_PAGING)) {
  808. pr_debug("ODP support not available\n");
  809. ret = -EINVAL;
  810. goto err_put;
  811. }
  812. }
  813. mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
  814. cmd.access_flags, &udata);
  815. if (IS_ERR(mr)) {
  816. ret = PTR_ERR(mr);
  817. goto err_put;
  818. }
  819. mr->device = pd->device;
  820. mr->pd = pd;
  821. mr->uobject = uobj;
  822. atomic_inc(&pd->usecnt);
  823. atomic_set(&mr->usecnt, 0);
  824. uobj->object = mr;
  825. ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
  826. if (ret)
  827. goto err_unreg;
  828. memset(&resp, 0, sizeof resp);
  829. resp.lkey = mr->lkey;
  830. resp.rkey = mr->rkey;
  831. resp.mr_handle = uobj->id;
  832. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  833. &resp, sizeof resp)) {
  834. ret = -EFAULT;
  835. goto err_copy;
  836. }
  837. put_pd_read(pd);
  838. mutex_lock(&file->mutex);
  839. list_add_tail(&uobj->list, &file->ucontext->mr_list);
  840. mutex_unlock(&file->mutex);
  841. uobj->live = 1;
  842. up_write(&uobj->mutex);
  843. return in_len;
  844. err_copy:
  845. idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
  846. err_unreg:
  847. ib_dereg_mr(mr);
  848. err_put:
  849. put_pd_read(pd);
  850. err_free:
  851. put_uobj_write(uobj);
  852. return ret;
  853. }
  854. ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
  855. struct ib_device *ib_dev,
  856. const char __user *buf, int in_len,
  857. int out_len)
  858. {
  859. struct ib_uverbs_rereg_mr cmd;
  860. struct ib_uverbs_rereg_mr_resp resp;
  861. struct ib_udata udata;
  862. struct ib_pd *pd = NULL;
  863. struct ib_mr *mr;
  864. struct ib_pd *old_pd;
  865. int ret;
  866. struct ib_uobject *uobj;
  867. if (out_len < sizeof(resp))
  868. return -ENOSPC;
  869. if (copy_from_user(&cmd, buf, sizeof(cmd)))
  870. return -EFAULT;
  871. INIT_UDATA(&udata, buf + sizeof(cmd),
  872. (unsigned long) cmd.response + sizeof(resp),
  873. in_len - sizeof(cmd), out_len - sizeof(resp));
  874. if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
  875. return -EINVAL;
  876. if ((cmd.flags & IB_MR_REREG_TRANS) &&
  877. (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
  878. (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
  879. return -EINVAL;
  880. uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle,
  881. file->ucontext);
  882. if (!uobj)
  883. return -EINVAL;
  884. mr = uobj->object;
  885. if (cmd.flags & IB_MR_REREG_ACCESS) {
  886. ret = ib_check_mr_access(cmd.access_flags);
  887. if (ret)
  888. goto put_uobjs;
  889. }
  890. if (cmd.flags & IB_MR_REREG_PD) {
  891. pd = idr_read_pd(cmd.pd_handle, file->ucontext);
  892. if (!pd) {
  893. ret = -EINVAL;
  894. goto put_uobjs;
  895. }
  896. }
  897. if (atomic_read(&mr->usecnt)) {
  898. ret = -EBUSY;
  899. goto put_uobj_pd;
  900. }
  901. old_pd = mr->pd;
  902. ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
  903. cmd.length, cmd.hca_va,
  904. cmd.access_flags, pd, &udata);
  905. if (!ret) {
  906. if (cmd.flags & IB_MR_REREG_PD) {
  907. atomic_inc(&pd->usecnt);
  908. mr->pd = pd;
  909. atomic_dec(&old_pd->usecnt);
  910. }
  911. } else {
  912. goto put_uobj_pd;
  913. }
  914. memset(&resp, 0, sizeof(resp));
  915. resp.lkey = mr->lkey;
  916. resp.rkey = mr->rkey;
  917. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  918. &resp, sizeof(resp)))
  919. ret = -EFAULT;
  920. else
  921. ret = in_len;
  922. put_uobj_pd:
  923. if (cmd.flags & IB_MR_REREG_PD)
  924. put_pd_read(pd);
  925. put_uobjs:
  926. put_uobj_write(mr->uobject);
  927. return ret;
  928. }
  929. ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
  930. struct ib_device *ib_dev,
  931. const char __user *buf, int in_len,
  932. int out_len)
  933. {
  934. struct ib_uverbs_dereg_mr cmd;
  935. struct ib_mr *mr;
  936. struct ib_uobject *uobj;
  937. int ret = -EINVAL;
  938. if (copy_from_user(&cmd, buf, sizeof cmd))
  939. return -EFAULT;
  940. uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
  941. if (!uobj)
  942. return -EINVAL;
  943. mr = uobj->object;
  944. ret = ib_dereg_mr(mr);
  945. if (!ret)
  946. uobj->live = 0;
  947. put_uobj_write(uobj);
  948. if (ret)
  949. return ret;
  950. idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
  951. mutex_lock(&file->mutex);
  952. list_del(&uobj->list);
  953. mutex_unlock(&file->mutex);
  954. put_uobj(uobj);
  955. return in_len;
  956. }
  957. ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
  958. struct ib_device *ib_dev,
  959. const char __user *buf, int in_len,
  960. int out_len)
  961. {
  962. struct ib_uverbs_alloc_mw cmd;
  963. struct ib_uverbs_alloc_mw_resp resp;
  964. struct ib_uobject *uobj;
  965. struct ib_pd *pd;
  966. struct ib_mw *mw;
  967. int ret;
  968. if (out_len < sizeof(resp))
  969. return -ENOSPC;
  970. if (copy_from_user(&cmd, buf, sizeof(cmd)))
  971. return -EFAULT;
  972. uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
  973. if (!uobj)
  974. return -ENOMEM;
  975. init_uobj(uobj, 0, file->ucontext, &mw_lock_class);
  976. down_write(&uobj->mutex);
  977. pd = idr_read_pd(cmd.pd_handle, file->ucontext);
  978. if (!pd) {
  979. ret = -EINVAL;
  980. goto err_free;
  981. }
  982. mw = pd->device->alloc_mw(pd, cmd.mw_type);
  983. if (IS_ERR(mw)) {
  984. ret = PTR_ERR(mw);
  985. goto err_put;
  986. }
  987. mw->device = pd->device;
  988. mw->pd = pd;
  989. mw->uobject = uobj;
  990. atomic_inc(&pd->usecnt);
  991. uobj->object = mw;
  992. ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj);
  993. if (ret)
  994. goto err_unalloc;
  995. memset(&resp, 0, sizeof(resp));
  996. resp.rkey = mw->rkey;
  997. resp.mw_handle = uobj->id;
  998. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  999. &resp, sizeof(resp))) {
  1000. ret = -EFAULT;
  1001. goto err_copy;
  1002. }
  1003. put_pd_read(pd);
  1004. mutex_lock(&file->mutex);
  1005. list_add_tail(&uobj->list, &file->ucontext->mw_list);
  1006. mutex_unlock(&file->mutex);
  1007. uobj->live = 1;
  1008. up_write(&uobj->mutex);
  1009. return in_len;
  1010. err_copy:
  1011. idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
  1012. err_unalloc:
  1013. ib_dealloc_mw(mw);
  1014. err_put:
  1015. put_pd_read(pd);
  1016. err_free:
  1017. put_uobj_write(uobj);
  1018. return ret;
  1019. }
  1020. ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
  1021. struct ib_device *ib_dev,
  1022. const char __user *buf, int in_len,
  1023. int out_len)
  1024. {
  1025. struct ib_uverbs_dealloc_mw cmd;
  1026. struct ib_mw *mw;
  1027. struct ib_uobject *uobj;
  1028. int ret = -EINVAL;
  1029. if (copy_from_user(&cmd, buf, sizeof(cmd)))
  1030. return -EFAULT;
  1031. uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext);
  1032. if (!uobj)
  1033. return -EINVAL;
  1034. mw = uobj->object;
  1035. ret = ib_dealloc_mw(mw);
  1036. if (!ret)
  1037. uobj->live = 0;
  1038. put_uobj_write(uobj);
  1039. if (ret)
  1040. return ret;
  1041. idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
  1042. mutex_lock(&file->mutex);
  1043. list_del(&uobj->list);
  1044. mutex_unlock(&file->mutex);
  1045. put_uobj(uobj);
  1046. return in_len;
  1047. }
  1048. ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
  1049. struct ib_device *ib_dev,
  1050. const char __user *buf, int in_len,
  1051. int out_len)
  1052. {
  1053. struct ib_uverbs_create_comp_channel cmd;
  1054. struct ib_uverbs_create_comp_channel_resp resp;
  1055. struct file *filp;
  1056. int ret;
  1057. if (out_len < sizeof resp)
  1058. return -ENOSPC;
  1059. if (copy_from_user(&cmd, buf, sizeof cmd))
  1060. return -EFAULT;
  1061. ret = get_unused_fd_flags(O_CLOEXEC);
  1062. if (ret < 0)
  1063. return ret;
  1064. resp.fd = ret;
  1065. filp = ib_uverbs_alloc_event_file(file, ib_dev, 0);
  1066. if (IS_ERR(filp)) {
  1067. put_unused_fd(resp.fd);
  1068. return PTR_ERR(filp);
  1069. }
  1070. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  1071. &resp, sizeof resp)) {
  1072. put_unused_fd(resp.fd);
  1073. fput(filp);
  1074. return -EFAULT;
  1075. }
  1076. fd_install(resp.fd, filp);
  1077. return in_len;
  1078. }
  1079. static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
  1080. struct ib_device *ib_dev,
  1081. struct ib_udata *ucore,
  1082. struct ib_udata *uhw,
  1083. struct ib_uverbs_ex_create_cq *cmd,
  1084. size_t cmd_sz,
  1085. int (*cb)(struct ib_uverbs_file *file,
  1086. struct ib_ucq_object *obj,
  1087. struct ib_uverbs_ex_create_cq_resp *resp,
  1088. struct ib_udata *udata,
  1089. void *context),
  1090. void *context)
  1091. {
  1092. struct ib_ucq_object *obj;
  1093. struct ib_uverbs_event_file *ev_file = NULL;
  1094. struct ib_cq *cq;
  1095. int ret;
  1096. struct ib_uverbs_ex_create_cq_resp resp;
  1097. struct ib_cq_init_attr attr = {};
  1098. if (cmd->comp_vector >= file->device->num_comp_vectors)
  1099. return ERR_PTR(-EINVAL);
  1100. obj = kmalloc(sizeof *obj, GFP_KERNEL);
  1101. if (!obj)
  1102. return ERR_PTR(-ENOMEM);
  1103. init_uobj(&obj->uobject, cmd->user_handle, file->ucontext, &cq_lock_class);
  1104. down_write(&obj->uobject.mutex);
  1105. if (cmd->comp_channel >= 0) {
  1106. ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel);
  1107. if (!ev_file) {
  1108. ret = -EINVAL;
  1109. goto err;
  1110. }
  1111. }
  1112. obj->uverbs_file = file;
  1113. obj->comp_events_reported = 0;
  1114. obj->async_events_reported = 0;
  1115. INIT_LIST_HEAD(&obj->comp_list);
  1116. INIT_LIST_HEAD(&obj->async_list);
  1117. attr.cqe = cmd->cqe;
  1118. attr.comp_vector = cmd->comp_vector;
  1119. if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
  1120. attr.flags = cmd->flags;
  1121. cq = ib_dev->create_cq(ib_dev, &attr,
  1122. file->ucontext, uhw);
  1123. if (IS_ERR(cq)) {
  1124. ret = PTR_ERR(cq);
  1125. goto err_file;
  1126. }
  1127. cq->device = ib_dev;
  1128. cq->uobject = &obj->uobject;
  1129. cq->comp_handler = ib_uverbs_comp_handler;
  1130. cq->event_handler = ib_uverbs_cq_event_handler;
  1131. cq->cq_context = ev_file;
  1132. atomic_set(&cq->usecnt, 0);
  1133. obj->uobject.object = cq;
  1134. ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
  1135. if (ret)
  1136. goto err_free;
  1137. memset(&resp, 0, sizeof resp);
  1138. resp.base.cq_handle = obj->uobject.id;
  1139. resp.base.cqe = cq->cqe;
  1140. resp.response_length = offsetof(typeof(resp), response_length) +
  1141. sizeof(resp.response_length);
  1142. ret = cb(file, obj, &resp, ucore, context);
  1143. if (ret)
  1144. goto err_cb;
  1145. mutex_lock(&file->mutex);
  1146. list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
  1147. mutex_unlock(&file->mutex);
  1148. obj->uobject.live = 1;
  1149. up_write(&obj->uobject.mutex);
  1150. return obj;
  1151. err_cb:
  1152. idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
  1153. err_free:
  1154. ib_destroy_cq(cq);
  1155. err_file:
  1156. if (ev_file)
  1157. ib_uverbs_release_ucq(file, ev_file, obj);
  1158. err:
  1159. put_uobj_write(&obj->uobject);
  1160. return ERR_PTR(ret);
  1161. }
  1162. static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file,
  1163. struct ib_ucq_object *obj,
  1164. struct ib_uverbs_ex_create_cq_resp *resp,
  1165. struct ib_udata *ucore, void *context)
  1166. {
  1167. if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
  1168. return -EFAULT;
  1169. return 0;
  1170. }
  1171. ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
  1172. struct ib_device *ib_dev,
  1173. const char __user *buf, int in_len,
  1174. int out_len)
  1175. {
  1176. struct ib_uverbs_create_cq cmd;
  1177. struct ib_uverbs_ex_create_cq cmd_ex;
  1178. struct ib_uverbs_create_cq_resp resp;
  1179. struct ib_udata ucore;
  1180. struct ib_udata uhw;
  1181. struct ib_ucq_object *obj;
  1182. if (out_len < sizeof(resp))
  1183. return -ENOSPC;
  1184. if (copy_from_user(&cmd, buf, sizeof(cmd)))
  1185. return -EFAULT;
  1186. INIT_UDATA(&ucore, buf, cmd.response, sizeof(cmd), sizeof(resp));
  1187. INIT_UDATA(&uhw, buf + sizeof(cmd),
  1188. (unsigned long)cmd.response + sizeof(resp),
  1189. in_len - sizeof(cmd), out_len - sizeof(resp));
  1190. memset(&cmd_ex, 0, sizeof(cmd_ex));
  1191. cmd_ex.user_handle = cmd.user_handle;
  1192. cmd_ex.cqe = cmd.cqe;
  1193. cmd_ex.comp_vector = cmd.comp_vector;
  1194. cmd_ex.comp_channel = cmd.comp_channel;
  1195. obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex,
  1196. offsetof(typeof(cmd_ex), comp_channel) +
  1197. sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb,
  1198. NULL);
  1199. if (IS_ERR(obj))
  1200. return PTR_ERR(obj);
  1201. return in_len;
  1202. }
  1203. static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file,
  1204. struct ib_ucq_object *obj,
  1205. struct ib_uverbs_ex_create_cq_resp *resp,
  1206. struct ib_udata *ucore, void *context)
  1207. {
  1208. if (ib_copy_to_udata(ucore, resp, resp->response_length))
  1209. return -EFAULT;
  1210. return 0;
  1211. }
  1212. int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
  1213. struct ib_device *ib_dev,
  1214. struct ib_udata *ucore,
  1215. struct ib_udata *uhw)
  1216. {
  1217. struct ib_uverbs_ex_create_cq_resp resp;
  1218. struct ib_uverbs_ex_create_cq cmd;
  1219. struct ib_ucq_object *obj;
  1220. int err;
  1221. if (ucore->inlen < sizeof(cmd))
  1222. return -EINVAL;
  1223. err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
  1224. if (err)
  1225. return err;
  1226. if (cmd.comp_mask)
  1227. return -EINVAL;
  1228. if (cmd.reserved)
  1229. return -EINVAL;
  1230. if (ucore->outlen < (offsetof(typeof(resp), response_length) +
  1231. sizeof(resp.response_length)))
  1232. return -ENOSPC;
  1233. obj = create_cq(file, ib_dev, ucore, uhw, &cmd,
  1234. min(ucore->inlen, sizeof(cmd)),
  1235. ib_uverbs_ex_create_cq_cb, NULL);
  1236. if (IS_ERR(obj))
  1237. return PTR_ERR(obj);
  1238. return 0;
  1239. }
  1240. ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
  1241. struct ib_device *ib_dev,
  1242. const char __user *buf, int in_len,
  1243. int out_len)
  1244. {
  1245. struct ib_uverbs_resize_cq cmd;
  1246. struct ib_uverbs_resize_cq_resp resp;
  1247. struct ib_udata udata;
  1248. struct ib_cq *cq;
  1249. int ret = -EINVAL;
  1250. if (copy_from_user(&cmd, buf, sizeof cmd))
  1251. return -EFAULT;
  1252. INIT_UDATA(&udata, buf + sizeof cmd,
  1253. (unsigned long) cmd.response + sizeof resp,
  1254. in_len - sizeof cmd, out_len - sizeof resp);
  1255. cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
  1256. if (!cq)
  1257. return -EINVAL;
  1258. ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
  1259. if (ret)
  1260. goto out;
  1261. resp.cqe = cq->cqe;
  1262. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  1263. &resp, sizeof resp.cqe))
  1264. ret = -EFAULT;
  1265. out:
  1266. put_cq_read(cq);
  1267. return ret ? ret : in_len;
  1268. }
  1269. static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
  1270. {
  1271. struct ib_uverbs_wc tmp;
  1272. tmp.wr_id = wc->wr_id;
  1273. tmp.status = wc->status;
  1274. tmp.opcode = wc->opcode;
  1275. tmp.vendor_err = wc->vendor_err;
  1276. tmp.byte_len = wc->byte_len;
  1277. tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
  1278. tmp.qp_num = wc->qp->qp_num;
  1279. tmp.src_qp = wc->src_qp;
  1280. tmp.wc_flags = wc->wc_flags;
  1281. tmp.pkey_index = wc->pkey_index;
  1282. tmp.slid = wc->slid;
  1283. tmp.sl = wc->sl;
  1284. tmp.dlid_path_bits = wc->dlid_path_bits;
  1285. tmp.port_num = wc->port_num;
  1286. tmp.reserved = 0;
  1287. if (copy_to_user(dest, &tmp, sizeof tmp))
  1288. return -EFAULT;
  1289. return 0;
  1290. }
  1291. ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
  1292. struct ib_device *ib_dev,
  1293. const char __user *buf, int in_len,
  1294. int out_len)
  1295. {
  1296. struct ib_uverbs_poll_cq cmd;
  1297. struct ib_uverbs_poll_cq_resp resp;
  1298. u8 __user *header_ptr;
  1299. u8 __user *data_ptr;
  1300. struct ib_cq *cq;
  1301. struct ib_wc wc;
  1302. int ret;
  1303. if (copy_from_user(&cmd, buf, sizeof cmd))
  1304. return -EFAULT;
  1305. cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
  1306. if (!cq)
  1307. return -EINVAL;
  1308. /* we copy a struct ib_uverbs_poll_cq_resp to user space */
  1309. header_ptr = (void __user *)(unsigned long) cmd.response;
  1310. data_ptr = header_ptr + sizeof resp;
  1311. memset(&resp, 0, sizeof resp);
  1312. while (resp.count < cmd.ne) {
  1313. ret = ib_poll_cq(cq, 1, &wc);
  1314. if (ret < 0)
  1315. goto out_put;
  1316. if (!ret)
  1317. break;
  1318. ret = copy_wc_to_user(data_ptr, &wc);
  1319. if (ret)
  1320. goto out_put;
  1321. data_ptr += sizeof(struct ib_uverbs_wc);
  1322. ++resp.count;
  1323. }
  1324. if (copy_to_user(header_ptr, &resp, sizeof resp)) {
  1325. ret = -EFAULT;
  1326. goto out_put;
  1327. }
  1328. ret = in_len;
  1329. out_put:
  1330. put_cq_read(cq);
  1331. return ret;
  1332. }
  1333. ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
  1334. struct ib_device *ib_dev,
  1335. const char __user *buf, int in_len,
  1336. int out_len)
  1337. {
  1338. struct ib_uverbs_req_notify_cq cmd;
  1339. struct ib_cq *cq;
  1340. if (copy_from_user(&cmd, buf, sizeof cmd))
  1341. return -EFAULT;
  1342. cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
  1343. if (!cq)
  1344. return -EINVAL;
  1345. ib_req_notify_cq(cq, cmd.solicited_only ?
  1346. IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
  1347. put_cq_read(cq);
  1348. return in_len;
  1349. }
  1350. ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
  1351. struct ib_device *ib_dev,
  1352. const char __user *buf, int in_len,
  1353. int out_len)
  1354. {
  1355. struct ib_uverbs_destroy_cq cmd;
  1356. struct ib_uverbs_destroy_cq_resp resp;
  1357. struct ib_uobject *uobj;
  1358. struct ib_cq *cq;
  1359. struct ib_ucq_object *obj;
  1360. struct ib_uverbs_event_file *ev_file;
  1361. int ret = -EINVAL;
  1362. if (copy_from_user(&cmd, buf, sizeof cmd))
  1363. return -EFAULT;
  1364. uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
  1365. if (!uobj)
  1366. return -EINVAL;
  1367. cq = uobj->object;
  1368. ev_file = cq->cq_context;
  1369. obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
  1370. ret = ib_destroy_cq(cq);
  1371. if (!ret)
  1372. uobj->live = 0;
  1373. put_uobj_write(uobj);
  1374. if (ret)
  1375. return ret;
  1376. idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
  1377. mutex_lock(&file->mutex);
  1378. list_del(&uobj->list);
  1379. mutex_unlock(&file->mutex);
  1380. ib_uverbs_release_ucq(file, ev_file, obj);
  1381. memset(&resp, 0, sizeof resp);
  1382. resp.comp_events_reported = obj->comp_events_reported;
  1383. resp.async_events_reported = obj->async_events_reported;
  1384. put_uobj(uobj);
  1385. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  1386. &resp, sizeof resp))
  1387. return -EFAULT;
  1388. return in_len;
  1389. }
  1390. ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
  1391. struct ib_device *ib_dev,
  1392. const char __user *buf, int in_len,
  1393. int out_len)
  1394. {
  1395. struct ib_uverbs_create_qp cmd;
  1396. struct ib_uverbs_create_qp_resp resp;
  1397. struct ib_udata udata;
  1398. struct ib_uqp_object *obj;
  1399. struct ib_device *device;
  1400. struct ib_pd *pd = NULL;
  1401. struct ib_xrcd *xrcd = NULL;
  1402. struct ib_uobject *uninitialized_var(xrcd_uobj);
  1403. struct ib_cq *scq = NULL, *rcq = NULL;
  1404. struct ib_srq *srq = NULL;
  1405. struct ib_qp *qp;
  1406. struct ib_qp_init_attr attr;
  1407. int ret;
  1408. if (out_len < sizeof resp)
  1409. return -ENOSPC;
  1410. if (copy_from_user(&cmd, buf, sizeof cmd))
  1411. return -EFAULT;
  1412. if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
  1413. return -EPERM;
  1414. INIT_UDATA(&udata, buf + sizeof cmd,
  1415. (unsigned long) cmd.response + sizeof resp,
  1416. in_len - sizeof cmd, out_len - sizeof resp);
  1417. obj = kzalloc(sizeof *obj, GFP_KERNEL);
  1418. if (!obj)
  1419. return -ENOMEM;
  1420. init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
  1421. down_write(&obj->uevent.uobject.mutex);
  1422. if (cmd.qp_type == IB_QPT_XRC_TGT) {
  1423. xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
  1424. if (!xrcd) {
  1425. ret = -EINVAL;
  1426. goto err_put;
  1427. }
  1428. device = xrcd->device;
  1429. } else {
  1430. if (cmd.qp_type == IB_QPT_XRC_INI) {
  1431. cmd.max_recv_wr = cmd.max_recv_sge = 0;
  1432. } else {
  1433. if (cmd.is_srq) {
  1434. srq = idr_read_srq(cmd.srq_handle, file->ucontext);
  1435. if (!srq || srq->srq_type != IB_SRQT_BASIC) {
  1436. ret = -EINVAL;
  1437. goto err_put;
  1438. }
  1439. }
  1440. if (cmd.recv_cq_handle != cmd.send_cq_handle) {
  1441. rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0);
  1442. if (!rcq) {
  1443. ret = -EINVAL;
  1444. goto err_put;
  1445. }
  1446. }
  1447. }
  1448. scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq);
  1449. rcq = rcq ?: scq;
  1450. pd = idr_read_pd(cmd.pd_handle, file->ucontext);
  1451. if (!pd || !scq) {
  1452. ret = -EINVAL;
  1453. goto err_put;
  1454. }
  1455. device = pd->device;
  1456. }
  1457. attr.event_handler = ib_uverbs_qp_event_handler;
  1458. attr.qp_context = file;
  1459. attr.send_cq = scq;
  1460. attr.recv_cq = rcq;
  1461. attr.srq = srq;
  1462. attr.xrcd = xrcd;
  1463. attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
  1464. attr.qp_type = cmd.qp_type;
  1465. attr.create_flags = 0;
  1466. attr.cap.max_send_wr = cmd.max_send_wr;
  1467. attr.cap.max_recv_wr = cmd.max_recv_wr;
  1468. attr.cap.max_send_sge = cmd.max_send_sge;
  1469. attr.cap.max_recv_sge = cmd.max_recv_sge;
  1470. attr.cap.max_inline_data = cmd.max_inline_data;
  1471. obj->uevent.events_reported = 0;
  1472. INIT_LIST_HEAD(&obj->uevent.event_list);
  1473. INIT_LIST_HEAD(&obj->mcast_list);
  1474. if (cmd.qp_type == IB_QPT_XRC_TGT)
  1475. qp = ib_create_qp(pd, &attr);
  1476. else
  1477. qp = device->create_qp(pd, &attr, &udata);
  1478. if (IS_ERR(qp)) {
  1479. ret = PTR_ERR(qp);
  1480. goto err_put;
  1481. }
  1482. if (cmd.qp_type != IB_QPT_XRC_TGT) {
  1483. qp->real_qp = qp;
  1484. qp->device = device;
  1485. qp->pd = pd;
  1486. qp->send_cq = attr.send_cq;
  1487. qp->recv_cq = attr.recv_cq;
  1488. qp->srq = attr.srq;
  1489. qp->event_handler = attr.event_handler;
  1490. qp->qp_context = attr.qp_context;
  1491. qp->qp_type = attr.qp_type;
  1492. atomic_set(&qp->usecnt, 0);
  1493. atomic_inc(&pd->usecnt);
  1494. atomic_inc(&attr.send_cq->usecnt);
  1495. if (attr.recv_cq)
  1496. atomic_inc(&attr.recv_cq->usecnt);
  1497. if (attr.srq)
  1498. atomic_inc(&attr.srq->usecnt);
  1499. }
  1500. qp->uobject = &obj->uevent.uobject;
  1501. obj->uevent.uobject.object = qp;
  1502. ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
  1503. if (ret)
  1504. goto err_destroy;
  1505. memset(&resp, 0, sizeof resp);
  1506. resp.qpn = qp->qp_num;
  1507. resp.qp_handle = obj->uevent.uobject.id;
  1508. resp.max_recv_sge = attr.cap.max_recv_sge;
  1509. resp.max_send_sge = attr.cap.max_send_sge;
  1510. resp.max_recv_wr = attr.cap.max_recv_wr;
  1511. resp.max_send_wr = attr.cap.max_send_wr;
  1512. resp.max_inline_data = attr.cap.max_inline_data;
  1513. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  1514. &resp, sizeof resp)) {
  1515. ret = -EFAULT;
  1516. goto err_copy;
  1517. }
  1518. if (xrcd) {
  1519. obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
  1520. uobject);
  1521. atomic_inc(&obj->uxrcd->refcnt);
  1522. put_xrcd_read(xrcd_uobj);
  1523. }
  1524. if (pd)
  1525. put_pd_read(pd);
  1526. if (scq)
  1527. put_cq_read(scq);
  1528. if (rcq && rcq != scq)
  1529. put_cq_read(rcq);
  1530. if (srq)
  1531. put_srq_read(srq);
  1532. mutex_lock(&file->mutex);
  1533. list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
  1534. mutex_unlock(&file->mutex);
  1535. obj->uevent.uobject.live = 1;
  1536. up_write(&obj->uevent.uobject.mutex);
  1537. return in_len;
  1538. err_copy:
  1539. idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
  1540. err_destroy:
  1541. ib_destroy_qp(qp);
  1542. err_put:
  1543. if (xrcd)
  1544. put_xrcd_read(xrcd_uobj);
  1545. if (pd)
  1546. put_pd_read(pd);
  1547. if (scq)
  1548. put_cq_read(scq);
  1549. if (rcq && rcq != scq)
  1550. put_cq_read(rcq);
  1551. if (srq)
  1552. put_srq_read(srq);
  1553. put_uobj_write(&obj->uevent.uobject);
  1554. return ret;
  1555. }
  1556. ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
  1557. struct ib_device *ib_dev,
  1558. const char __user *buf, int in_len, int out_len)
  1559. {
  1560. struct ib_uverbs_open_qp cmd;
  1561. struct ib_uverbs_create_qp_resp resp;
  1562. struct ib_udata udata;
  1563. struct ib_uqp_object *obj;
  1564. struct ib_xrcd *xrcd;
  1565. struct ib_uobject *uninitialized_var(xrcd_uobj);
  1566. struct ib_qp *qp;
  1567. struct ib_qp_open_attr attr;
  1568. int ret;
  1569. if (out_len < sizeof resp)
  1570. return -ENOSPC;
  1571. if (copy_from_user(&cmd, buf, sizeof cmd))
  1572. return -EFAULT;
  1573. INIT_UDATA(&udata, buf + sizeof cmd,
  1574. (unsigned long) cmd.response + sizeof resp,
  1575. in_len - sizeof cmd, out_len - sizeof resp);
  1576. obj = kmalloc(sizeof *obj, GFP_KERNEL);
  1577. if (!obj)
  1578. return -ENOMEM;
  1579. init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
  1580. down_write(&obj->uevent.uobject.mutex);
  1581. xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
  1582. if (!xrcd) {
  1583. ret = -EINVAL;
  1584. goto err_put;
  1585. }
  1586. attr.event_handler = ib_uverbs_qp_event_handler;
  1587. attr.qp_context = file;
  1588. attr.qp_num = cmd.qpn;
  1589. attr.qp_type = cmd.qp_type;
  1590. obj->uevent.events_reported = 0;
  1591. INIT_LIST_HEAD(&obj->uevent.event_list);
  1592. INIT_LIST_HEAD(&obj->mcast_list);
  1593. qp = ib_open_qp(xrcd, &attr);
  1594. if (IS_ERR(qp)) {
  1595. ret = PTR_ERR(qp);
  1596. goto err_put;
  1597. }
  1598. qp->uobject = &obj->uevent.uobject;
  1599. obj->uevent.uobject.object = qp;
  1600. ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
  1601. if (ret)
  1602. goto err_destroy;
  1603. memset(&resp, 0, sizeof resp);
  1604. resp.qpn = qp->qp_num;
  1605. resp.qp_handle = obj->uevent.uobject.id;
  1606. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  1607. &resp, sizeof resp)) {
  1608. ret = -EFAULT;
  1609. goto err_remove;
  1610. }
  1611. obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
  1612. atomic_inc(&obj->uxrcd->refcnt);
  1613. put_xrcd_read(xrcd_uobj);
  1614. mutex_lock(&file->mutex);
  1615. list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
  1616. mutex_unlock(&file->mutex);
  1617. obj->uevent.uobject.live = 1;
  1618. up_write(&obj->uevent.uobject.mutex);
  1619. return in_len;
  1620. err_remove:
  1621. idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
  1622. err_destroy:
  1623. ib_destroy_qp(qp);
  1624. err_put:
  1625. put_xrcd_read(xrcd_uobj);
  1626. put_uobj_write(&obj->uevent.uobject);
  1627. return ret;
  1628. }
  1629. ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
  1630. struct ib_device *ib_dev,
  1631. const char __user *buf, int in_len,
  1632. int out_len)
  1633. {
  1634. struct ib_uverbs_query_qp cmd;
  1635. struct ib_uverbs_query_qp_resp resp;
  1636. struct ib_qp *qp;
  1637. struct ib_qp_attr *attr;
  1638. struct ib_qp_init_attr *init_attr;
  1639. int ret;
  1640. if (copy_from_user(&cmd, buf, sizeof cmd))
  1641. return -EFAULT;
  1642. attr = kmalloc(sizeof *attr, GFP_KERNEL);
  1643. init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
  1644. if (!attr || !init_attr) {
  1645. ret = -ENOMEM;
  1646. goto out;
  1647. }
  1648. qp = idr_read_qp(cmd.qp_handle, file->ucontext);
  1649. if (!qp) {
  1650. ret = -EINVAL;
  1651. goto out;
  1652. }
  1653. ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
  1654. put_qp_read(qp);
  1655. if (ret)
  1656. goto out;
  1657. memset(&resp, 0, sizeof resp);
  1658. resp.qp_state = attr->qp_state;
  1659. resp.cur_qp_state = attr->cur_qp_state;
  1660. resp.path_mtu = attr->path_mtu;
  1661. resp.path_mig_state = attr->path_mig_state;
  1662. resp.qkey = attr->qkey;
  1663. resp.rq_psn = attr->rq_psn;
  1664. resp.sq_psn = attr->sq_psn;
  1665. resp.dest_qp_num = attr->dest_qp_num;
  1666. resp.qp_access_flags = attr->qp_access_flags;
  1667. resp.pkey_index = attr->pkey_index;
  1668. resp.alt_pkey_index = attr->alt_pkey_index;
  1669. resp.sq_draining = attr->sq_draining;
  1670. resp.max_rd_atomic = attr->max_rd_atomic;
  1671. resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
  1672. resp.min_rnr_timer = attr->min_rnr_timer;
  1673. resp.port_num = attr->port_num;
  1674. resp.timeout = attr->timeout;
  1675. resp.retry_cnt = attr->retry_cnt;
  1676. resp.rnr_retry = attr->rnr_retry;
  1677. resp.alt_port_num = attr->alt_port_num;
  1678. resp.alt_timeout = attr->alt_timeout;
  1679. memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
  1680. resp.dest.flow_label = attr->ah_attr.grh.flow_label;
  1681. resp.dest.sgid_index = attr->ah_attr.grh.sgid_index;
  1682. resp.dest.hop_limit = attr->ah_attr.grh.hop_limit;
  1683. resp.dest.traffic_class = attr->ah_attr.grh.traffic_class;
  1684. resp.dest.dlid = attr->ah_attr.dlid;
  1685. resp.dest.sl = attr->ah_attr.sl;
  1686. resp.dest.src_path_bits = attr->ah_attr.src_path_bits;
  1687. resp.dest.static_rate = attr->ah_attr.static_rate;
  1688. resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
  1689. resp.dest.port_num = attr->ah_attr.port_num;
  1690. memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
  1691. resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label;
  1692. resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index;
  1693. resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit;
  1694. resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
  1695. resp.alt_dest.dlid = attr->alt_ah_attr.dlid;
  1696. resp.alt_dest.sl = attr->alt_ah_attr.sl;
  1697. resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
  1698. resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate;
  1699. resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
  1700. resp.alt_dest.port_num = attr->alt_ah_attr.port_num;
  1701. resp.max_send_wr = init_attr->cap.max_send_wr;
  1702. resp.max_recv_wr = init_attr->cap.max_recv_wr;
  1703. resp.max_send_sge = init_attr->cap.max_send_sge;
  1704. resp.max_recv_sge = init_attr->cap.max_recv_sge;
  1705. resp.max_inline_data = init_attr->cap.max_inline_data;
  1706. resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
  1707. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  1708. &resp, sizeof resp))
  1709. ret = -EFAULT;
  1710. out:
  1711. kfree(attr);
  1712. kfree(init_attr);
  1713. return ret ? ret : in_len;
  1714. }
  1715. /* Remove ignored fields set in the attribute mask */
  1716. static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
  1717. {
  1718. switch (qp_type) {
  1719. case IB_QPT_XRC_INI:
  1720. return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
  1721. case IB_QPT_XRC_TGT:
  1722. return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
  1723. IB_QP_RNR_RETRY);
  1724. default:
  1725. return mask;
  1726. }
  1727. }
  1728. ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
  1729. struct ib_device *ib_dev,
  1730. const char __user *buf, int in_len,
  1731. int out_len)
  1732. {
  1733. struct ib_uverbs_modify_qp cmd;
  1734. struct ib_udata udata;
  1735. struct ib_qp *qp;
  1736. struct ib_qp_attr *attr;
  1737. int ret;
  1738. if (copy_from_user(&cmd, buf, sizeof cmd))
  1739. return -EFAULT;
  1740. INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
  1741. out_len);
  1742. attr = kmalloc(sizeof *attr, GFP_KERNEL);
  1743. if (!attr)
  1744. return -ENOMEM;
  1745. qp = idr_read_qp(cmd.qp_handle, file->ucontext);
  1746. if (!qp) {
  1747. ret = -EINVAL;
  1748. goto out;
  1749. }
  1750. attr->qp_state = cmd.qp_state;
  1751. attr->cur_qp_state = cmd.cur_qp_state;
  1752. attr->path_mtu = cmd.path_mtu;
  1753. attr->path_mig_state = cmd.path_mig_state;
  1754. attr->qkey = cmd.qkey;
  1755. attr->rq_psn = cmd.rq_psn;
  1756. attr->sq_psn = cmd.sq_psn;
  1757. attr->dest_qp_num = cmd.dest_qp_num;
  1758. attr->qp_access_flags = cmd.qp_access_flags;
  1759. attr->pkey_index = cmd.pkey_index;
  1760. attr->alt_pkey_index = cmd.alt_pkey_index;
  1761. attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
  1762. attr->max_rd_atomic = cmd.max_rd_atomic;
  1763. attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
  1764. attr->min_rnr_timer = cmd.min_rnr_timer;
  1765. attr->port_num = cmd.port_num;
  1766. attr->timeout = cmd.timeout;
  1767. attr->retry_cnt = cmd.retry_cnt;
  1768. attr->rnr_retry = cmd.rnr_retry;
  1769. attr->alt_port_num = cmd.alt_port_num;
  1770. attr->alt_timeout = cmd.alt_timeout;
  1771. memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
  1772. attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
  1773. attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
  1774. attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
  1775. attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
  1776. attr->ah_attr.dlid = cmd.dest.dlid;
  1777. attr->ah_attr.sl = cmd.dest.sl;
  1778. attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
  1779. attr->ah_attr.static_rate = cmd.dest.static_rate;
  1780. attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
  1781. attr->ah_attr.port_num = cmd.dest.port_num;
  1782. memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
  1783. attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
  1784. attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
  1785. attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
  1786. attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
  1787. attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
  1788. attr->alt_ah_attr.sl = cmd.alt_dest.sl;
  1789. attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
  1790. attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
  1791. attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
  1792. attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
  1793. if (qp->real_qp == qp) {
  1794. ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
  1795. if (ret)
  1796. goto release_qp;
  1797. ret = qp->device->modify_qp(qp, attr,
  1798. modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
  1799. } else {
  1800. ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
  1801. }
  1802. if (ret)
  1803. goto release_qp;
  1804. ret = in_len;
  1805. release_qp:
  1806. put_qp_read(qp);
  1807. out:
  1808. kfree(attr);
  1809. return ret;
  1810. }
  1811. ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
  1812. struct ib_device *ib_dev,
  1813. const char __user *buf, int in_len,
  1814. int out_len)
  1815. {
  1816. struct ib_uverbs_destroy_qp cmd;
  1817. struct ib_uverbs_destroy_qp_resp resp;
  1818. struct ib_uobject *uobj;
  1819. struct ib_qp *qp;
  1820. struct ib_uqp_object *obj;
  1821. int ret = -EINVAL;
  1822. if (copy_from_user(&cmd, buf, sizeof cmd))
  1823. return -EFAULT;
  1824. memset(&resp, 0, sizeof resp);
  1825. uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
  1826. if (!uobj)
  1827. return -EINVAL;
  1828. qp = uobj->object;
  1829. obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
  1830. if (!list_empty(&obj->mcast_list)) {
  1831. put_uobj_write(uobj);
  1832. return -EBUSY;
  1833. }
  1834. ret = ib_destroy_qp(qp);
  1835. if (!ret)
  1836. uobj->live = 0;
  1837. put_uobj_write(uobj);
  1838. if (ret)
  1839. return ret;
  1840. if (obj->uxrcd)
  1841. atomic_dec(&obj->uxrcd->refcnt);
  1842. idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
  1843. mutex_lock(&file->mutex);
  1844. list_del(&uobj->list);
  1845. mutex_unlock(&file->mutex);
  1846. ib_uverbs_release_uevent(file, &obj->uevent);
  1847. resp.events_reported = obj->uevent.events_reported;
  1848. put_uobj(uobj);
  1849. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  1850. &resp, sizeof resp))
  1851. return -EFAULT;
  1852. return in_len;
  1853. }
  1854. ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
  1855. struct ib_device *ib_dev,
  1856. const char __user *buf, int in_len,
  1857. int out_len)
  1858. {
  1859. struct ib_uverbs_post_send cmd;
  1860. struct ib_uverbs_post_send_resp resp;
  1861. struct ib_uverbs_send_wr *user_wr;
  1862. struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
  1863. struct ib_qp *qp;
  1864. int i, sg_ind;
  1865. int is_ud;
  1866. ssize_t ret = -EINVAL;
  1867. if (copy_from_user(&cmd, buf, sizeof cmd))
  1868. return -EFAULT;
  1869. if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
  1870. cmd.sge_count * sizeof (struct ib_uverbs_sge))
  1871. return -EINVAL;
  1872. if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
  1873. return -EINVAL;
  1874. user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
  1875. if (!user_wr)
  1876. return -ENOMEM;
  1877. qp = idr_read_qp(cmd.qp_handle, file->ucontext);
  1878. if (!qp)
  1879. goto out;
  1880. is_ud = qp->qp_type == IB_QPT_UD;
  1881. sg_ind = 0;
  1882. last = NULL;
  1883. for (i = 0; i < cmd.wr_count; ++i) {
  1884. if (copy_from_user(user_wr,
  1885. buf + sizeof cmd + i * cmd.wqe_size,
  1886. cmd.wqe_size)) {
  1887. ret = -EFAULT;
  1888. goto out_put;
  1889. }
  1890. if (user_wr->num_sge + sg_ind > cmd.sge_count) {
  1891. ret = -EINVAL;
  1892. goto out_put;
  1893. }
  1894. next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
  1895. user_wr->num_sge * sizeof (struct ib_sge),
  1896. GFP_KERNEL);
  1897. if (!next) {
  1898. ret = -ENOMEM;
  1899. goto out_put;
  1900. }
  1901. if (!last)
  1902. wr = next;
  1903. else
  1904. last->next = next;
  1905. last = next;
  1906. next->next = NULL;
  1907. next->wr_id = user_wr->wr_id;
  1908. next->num_sge = user_wr->num_sge;
  1909. next->opcode = user_wr->opcode;
  1910. next->send_flags = user_wr->send_flags;
  1911. if (is_ud) {
  1912. if (next->opcode != IB_WR_SEND &&
  1913. next->opcode != IB_WR_SEND_WITH_IMM) {
  1914. ret = -EINVAL;
  1915. goto out_put;
  1916. }
  1917. next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
  1918. file->ucontext);
  1919. if (!next->wr.ud.ah) {
  1920. ret = -EINVAL;
  1921. goto out_put;
  1922. }
  1923. next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn;
  1924. next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
  1925. if (next->opcode == IB_WR_SEND_WITH_IMM)
  1926. next->ex.imm_data =
  1927. (__be32 __force) user_wr->ex.imm_data;
  1928. } else {
  1929. switch (next->opcode) {
  1930. case IB_WR_RDMA_WRITE_WITH_IMM:
  1931. next->ex.imm_data =
  1932. (__be32 __force) user_wr->ex.imm_data;
  1933. case IB_WR_RDMA_WRITE:
  1934. case IB_WR_RDMA_READ:
  1935. next->wr.rdma.remote_addr =
  1936. user_wr->wr.rdma.remote_addr;
  1937. next->wr.rdma.rkey =
  1938. user_wr->wr.rdma.rkey;
  1939. break;
  1940. case IB_WR_SEND_WITH_IMM:
  1941. next->ex.imm_data =
  1942. (__be32 __force) user_wr->ex.imm_data;
  1943. break;
  1944. case IB_WR_SEND_WITH_INV:
  1945. next->ex.invalidate_rkey =
  1946. user_wr->ex.invalidate_rkey;
  1947. break;
  1948. case IB_WR_ATOMIC_CMP_AND_SWP:
  1949. case IB_WR_ATOMIC_FETCH_AND_ADD:
  1950. next->wr.atomic.remote_addr =
  1951. user_wr->wr.atomic.remote_addr;
  1952. next->wr.atomic.compare_add =
  1953. user_wr->wr.atomic.compare_add;
  1954. next->wr.atomic.swap = user_wr->wr.atomic.swap;
  1955. next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
  1956. case IB_WR_SEND:
  1957. break;
  1958. default:
  1959. ret = -EINVAL;
  1960. goto out_put;
  1961. }
  1962. }
  1963. if (next->num_sge) {
  1964. next->sg_list = (void *) next +
  1965. ALIGN(sizeof *next, sizeof (struct ib_sge));
  1966. if (copy_from_user(next->sg_list,
  1967. buf + sizeof cmd +
  1968. cmd.wr_count * cmd.wqe_size +
  1969. sg_ind * sizeof (struct ib_sge),
  1970. next->num_sge * sizeof (struct ib_sge))) {
  1971. ret = -EFAULT;
  1972. goto out_put;
  1973. }
  1974. sg_ind += next->num_sge;
  1975. } else
  1976. next->sg_list = NULL;
  1977. }
  1978. resp.bad_wr = 0;
  1979. ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
  1980. if (ret)
  1981. for (next = wr; next; next = next->next) {
  1982. ++resp.bad_wr;
  1983. if (next == bad_wr)
  1984. break;
  1985. }
  1986. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  1987. &resp, sizeof resp))
  1988. ret = -EFAULT;
  1989. out_put:
  1990. put_qp_read(qp);
  1991. while (wr) {
  1992. if (is_ud && wr->wr.ud.ah)
  1993. put_ah_read(wr->wr.ud.ah);
  1994. next = wr->next;
  1995. kfree(wr);
  1996. wr = next;
  1997. }
  1998. out:
  1999. kfree(user_wr);
  2000. return ret ? ret : in_len;
  2001. }
  2002. static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
  2003. int in_len,
  2004. u32 wr_count,
  2005. u32 sge_count,
  2006. u32 wqe_size)
  2007. {
  2008. struct ib_uverbs_recv_wr *user_wr;
  2009. struct ib_recv_wr *wr = NULL, *last, *next;
  2010. int sg_ind;
  2011. int i;
  2012. int ret;
  2013. if (in_len < wqe_size * wr_count +
  2014. sge_count * sizeof (struct ib_uverbs_sge))
  2015. return ERR_PTR(-EINVAL);
  2016. if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
  2017. return ERR_PTR(-EINVAL);
  2018. user_wr = kmalloc(wqe_size, GFP_KERNEL);
  2019. if (!user_wr)
  2020. return ERR_PTR(-ENOMEM);
  2021. sg_ind = 0;
  2022. last = NULL;
  2023. for (i = 0; i < wr_count; ++i) {
  2024. if (copy_from_user(user_wr, buf + i * wqe_size,
  2025. wqe_size)) {
  2026. ret = -EFAULT;
  2027. goto err;
  2028. }
  2029. if (user_wr->num_sge + sg_ind > sge_count) {
  2030. ret = -EINVAL;
  2031. goto err;
  2032. }
  2033. next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
  2034. user_wr->num_sge * sizeof (struct ib_sge),
  2035. GFP_KERNEL);
  2036. if (!next) {
  2037. ret = -ENOMEM;
  2038. goto err;
  2039. }
  2040. if (!last)
  2041. wr = next;
  2042. else
  2043. last->next = next;
  2044. last = next;
  2045. next->next = NULL;
  2046. next->wr_id = user_wr->wr_id;
  2047. next->num_sge = user_wr->num_sge;
  2048. if (next->num_sge) {
  2049. next->sg_list = (void *) next +
  2050. ALIGN(sizeof *next, sizeof (struct ib_sge));
  2051. if (copy_from_user(next->sg_list,
  2052. buf + wr_count * wqe_size +
  2053. sg_ind * sizeof (struct ib_sge),
  2054. next->num_sge * sizeof (struct ib_sge))) {
  2055. ret = -EFAULT;
  2056. goto err;
  2057. }
  2058. sg_ind += next->num_sge;
  2059. } else
  2060. next->sg_list = NULL;
  2061. }
  2062. kfree(user_wr);
  2063. return wr;
  2064. err:
  2065. kfree(user_wr);
  2066. while (wr) {
  2067. next = wr->next;
  2068. kfree(wr);
  2069. wr = next;
  2070. }
  2071. return ERR_PTR(ret);
  2072. }
  2073. ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
  2074. struct ib_device *ib_dev,
  2075. const char __user *buf, int in_len,
  2076. int out_len)
  2077. {
  2078. struct ib_uverbs_post_recv cmd;
  2079. struct ib_uverbs_post_recv_resp resp;
  2080. struct ib_recv_wr *wr, *next, *bad_wr;
  2081. struct ib_qp *qp;
  2082. ssize_t ret = -EINVAL;
  2083. if (copy_from_user(&cmd, buf, sizeof cmd))
  2084. return -EFAULT;
  2085. wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
  2086. in_len - sizeof cmd, cmd.wr_count,
  2087. cmd.sge_count, cmd.wqe_size);
  2088. if (IS_ERR(wr))
  2089. return PTR_ERR(wr);
  2090. qp = idr_read_qp(cmd.qp_handle, file->ucontext);
  2091. if (!qp)
  2092. goto out;
  2093. resp.bad_wr = 0;
  2094. ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
  2095. put_qp_read(qp);
  2096. if (ret)
  2097. for (next = wr; next; next = next->next) {
  2098. ++resp.bad_wr;
  2099. if (next == bad_wr)
  2100. break;
  2101. }
  2102. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  2103. &resp, sizeof resp))
  2104. ret = -EFAULT;
  2105. out:
  2106. while (wr) {
  2107. next = wr->next;
  2108. kfree(wr);
  2109. wr = next;
  2110. }
  2111. return ret ? ret : in_len;
  2112. }
  2113. ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
  2114. struct ib_device *ib_dev,
  2115. const char __user *buf, int in_len,
  2116. int out_len)
  2117. {
  2118. struct ib_uverbs_post_srq_recv cmd;
  2119. struct ib_uverbs_post_srq_recv_resp resp;
  2120. struct ib_recv_wr *wr, *next, *bad_wr;
  2121. struct ib_srq *srq;
  2122. ssize_t ret = -EINVAL;
  2123. if (copy_from_user(&cmd, buf, sizeof cmd))
  2124. return -EFAULT;
  2125. wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
  2126. in_len - sizeof cmd, cmd.wr_count,
  2127. cmd.sge_count, cmd.wqe_size);
  2128. if (IS_ERR(wr))
  2129. return PTR_ERR(wr);
  2130. srq = idr_read_srq(cmd.srq_handle, file->ucontext);
  2131. if (!srq)
  2132. goto out;
  2133. resp.bad_wr = 0;
  2134. ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
  2135. put_srq_read(srq);
  2136. if (ret)
  2137. for (next = wr; next; next = next->next) {
  2138. ++resp.bad_wr;
  2139. if (next == bad_wr)
  2140. break;
  2141. }
  2142. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  2143. &resp, sizeof resp))
  2144. ret = -EFAULT;
  2145. out:
  2146. while (wr) {
  2147. next = wr->next;
  2148. kfree(wr);
  2149. wr = next;
  2150. }
  2151. return ret ? ret : in_len;
  2152. }
  2153. ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
  2154. struct ib_device *ib_dev,
  2155. const char __user *buf, int in_len,
  2156. int out_len)
  2157. {
  2158. struct ib_uverbs_create_ah cmd;
  2159. struct ib_uverbs_create_ah_resp resp;
  2160. struct ib_uobject *uobj;
  2161. struct ib_pd *pd;
  2162. struct ib_ah *ah;
  2163. struct ib_ah_attr attr;
  2164. int ret;
  2165. if (out_len < sizeof resp)
  2166. return -ENOSPC;
  2167. if (copy_from_user(&cmd, buf, sizeof cmd))
  2168. return -EFAULT;
  2169. uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
  2170. if (!uobj)
  2171. return -ENOMEM;
  2172. init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class);
  2173. down_write(&uobj->mutex);
  2174. pd = idr_read_pd(cmd.pd_handle, file->ucontext);
  2175. if (!pd) {
  2176. ret = -EINVAL;
  2177. goto err;
  2178. }
  2179. attr.dlid = cmd.attr.dlid;
  2180. attr.sl = cmd.attr.sl;
  2181. attr.src_path_bits = cmd.attr.src_path_bits;
  2182. attr.static_rate = cmd.attr.static_rate;
  2183. attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0;
  2184. attr.port_num = cmd.attr.port_num;
  2185. attr.grh.flow_label = cmd.attr.grh.flow_label;
  2186. attr.grh.sgid_index = cmd.attr.grh.sgid_index;
  2187. attr.grh.hop_limit = cmd.attr.grh.hop_limit;
  2188. attr.grh.traffic_class = cmd.attr.grh.traffic_class;
  2189. attr.vlan_id = 0;
  2190. memset(&attr.dmac, 0, sizeof(attr.dmac));
  2191. memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
  2192. ah = ib_create_ah(pd, &attr);
  2193. if (IS_ERR(ah)) {
  2194. ret = PTR_ERR(ah);
  2195. goto err_put;
  2196. }
  2197. ah->uobject = uobj;
  2198. uobj->object = ah;
  2199. ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
  2200. if (ret)
  2201. goto err_destroy;
  2202. resp.ah_handle = uobj->id;
  2203. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  2204. &resp, sizeof resp)) {
  2205. ret = -EFAULT;
  2206. goto err_copy;
  2207. }
  2208. put_pd_read(pd);
  2209. mutex_lock(&file->mutex);
  2210. list_add_tail(&uobj->list, &file->ucontext->ah_list);
  2211. mutex_unlock(&file->mutex);
  2212. uobj->live = 1;
  2213. up_write(&uobj->mutex);
  2214. return in_len;
  2215. err_copy:
  2216. idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
  2217. err_destroy:
  2218. ib_destroy_ah(ah);
  2219. err_put:
  2220. put_pd_read(pd);
  2221. err:
  2222. put_uobj_write(uobj);
  2223. return ret;
  2224. }
  2225. ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
  2226. struct ib_device *ib_dev,
  2227. const char __user *buf, int in_len, int out_len)
  2228. {
  2229. struct ib_uverbs_destroy_ah cmd;
  2230. struct ib_ah *ah;
  2231. struct ib_uobject *uobj;
  2232. int ret;
  2233. if (copy_from_user(&cmd, buf, sizeof cmd))
  2234. return -EFAULT;
  2235. uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
  2236. if (!uobj)
  2237. return -EINVAL;
  2238. ah = uobj->object;
  2239. ret = ib_destroy_ah(ah);
  2240. if (!ret)
  2241. uobj->live = 0;
  2242. put_uobj_write(uobj);
  2243. if (ret)
  2244. return ret;
  2245. idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
  2246. mutex_lock(&file->mutex);
  2247. list_del(&uobj->list);
  2248. mutex_unlock(&file->mutex);
  2249. put_uobj(uobj);
  2250. return in_len;
  2251. }
  2252. ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
  2253. struct ib_device *ib_dev,
  2254. const char __user *buf, int in_len,
  2255. int out_len)
  2256. {
  2257. struct ib_uverbs_attach_mcast cmd;
  2258. struct ib_qp *qp;
  2259. struct ib_uqp_object *obj;
  2260. struct ib_uverbs_mcast_entry *mcast;
  2261. int ret;
  2262. if (copy_from_user(&cmd, buf, sizeof cmd))
  2263. return -EFAULT;
  2264. qp = idr_write_qp(cmd.qp_handle, file->ucontext);
  2265. if (!qp)
  2266. return -EINVAL;
  2267. obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
  2268. list_for_each_entry(mcast, &obj->mcast_list, list)
  2269. if (cmd.mlid == mcast->lid &&
  2270. !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
  2271. ret = 0;
  2272. goto out_put;
  2273. }
  2274. mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
  2275. if (!mcast) {
  2276. ret = -ENOMEM;
  2277. goto out_put;
  2278. }
  2279. mcast->lid = cmd.mlid;
  2280. memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
  2281. ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
  2282. if (!ret)
  2283. list_add_tail(&mcast->list, &obj->mcast_list);
  2284. else
  2285. kfree(mcast);
  2286. out_put:
  2287. put_qp_write(qp);
  2288. return ret ? ret : in_len;
  2289. }
  2290. ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
  2291. struct ib_device *ib_dev,
  2292. const char __user *buf, int in_len,
  2293. int out_len)
  2294. {
  2295. struct ib_uverbs_detach_mcast cmd;
  2296. struct ib_uqp_object *obj;
  2297. struct ib_qp *qp;
  2298. struct ib_uverbs_mcast_entry *mcast;
  2299. int ret = -EINVAL;
  2300. if (copy_from_user(&cmd, buf, sizeof cmd))
  2301. return -EFAULT;
  2302. qp = idr_write_qp(cmd.qp_handle, file->ucontext);
  2303. if (!qp)
  2304. return -EINVAL;
  2305. ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
  2306. if (ret)
  2307. goto out_put;
  2308. obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
  2309. list_for_each_entry(mcast, &obj->mcast_list, list)
  2310. if (cmd.mlid == mcast->lid &&
  2311. !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
  2312. list_del(&mcast->list);
  2313. kfree(mcast);
  2314. break;
  2315. }
  2316. out_put:
  2317. put_qp_write(qp);
  2318. return ret ? ret : in_len;
  2319. }
  2320. static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
  2321. union ib_flow_spec *ib_spec)
  2322. {
  2323. if (kern_spec->reserved)
  2324. return -EINVAL;
  2325. ib_spec->type = kern_spec->type;
  2326. switch (ib_spec->type) {
  2327. case IB_FLOW_SPEC_ETH:
  2328. ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
  2329. if (ib_spec->eth.size != kern_spec->eth.size)
  2330. return -EINVAL;
  2331. memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
  2332. sizeof(struct ib_flow_eth_filter));
  2333. memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
  2334. sizeof(struct ib_flow_eth_filter));
  2335. break;
  2336. case IB_FLOW_SPEC_IPV4:
  2337. ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
  2338. if (ib_spec->ipv4.size != kern_spec->ipv4.size)
  2339. return -EINVAL;
  2340. memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
  2341. sizeof(struct ib_flow_ipv4_filter));
  2342. memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
  2343. sizeof(struct ib_flow_ipv4_filter));
  2344. break;
  2345. case IB_FLOW_SPEC_TCP:
  2346. case IB_FLOW_SPEC_UDP:
  2347. ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
  2348. if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
  2349. return -EINVAL;
  2350. memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
  2351. sizeof(struct ib_flow_tcp_udp_filter));
  2352. memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
  2353. sizeof(struct ib_flow_tcp_udp_filter));
  2354. break;
  2355. default:
  2356. return -EINVAL;
  2357. }
  2358. return 0;
  2359. }
  2360. int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
  2361. struct ib_device *ib_dev,
  2362. struct ib_udata *ucore,
  2363. struct ib_udata *uhw)
  2364. {
  2365. struct ib_uverbs_create_flow cmd;
  2366. struct ib_uverbs_create_flow_resp resp;
  2367. struct ib_uobject *uobj;
  2368. struct ib_flow *flow_id;
  2369. struct ib_uverbs_flow_attr *kern_flow_attr;
  2370. struct ib_flow_attr *flow_attr;
  2371. struct ib_qp *qp;
  2372. int err = 0;
  2373. void *kern_spec;
  2374. void *ib_spec;
  2375. int i;
  2376. if (ucore->inlen < sizeof(cmd))
  2377. return -EINVAL;
  2378. if (ucore->outlen < sizeof(resp))
  2379. return -ENOSPC;
  2380. err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
  2381. if (err)
  2382. return err;
  2383. ucore->inbuf += sizeof(cmd);
  2384. ucore->inlen -= sizeof(cmd);
  2385. if (cmd.comp_mask)
  2386. return -EINVAL;
  2387. if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER &&
  2388. !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
  2389. return -EPERM;
  2390. if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
  2391. return -EINVAL;
  2392. if (cmd.flow_attr.size > ucore->inlen ||
  2393. cmd.flow_attr.size >
  2394. (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
  2395. return -EINVAL;
  2396. if (cmd.flow_attr.reserved[0] ||
  2397. cmd.flow_attr.reserved[1])
  2398. return -EINVAL;
  2399. if (cmd.flow_attr.num_of_specs) {
  2400. kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
  2401. GFP_KERNEL);
  2402. if (!kern_flow_attr)
  2403. return -ENOMEM;
  2404. memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
  2405. err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
  2406. cmd.flow_attr.size);
  2407. if (err)
  2408. goto err_free_attr;
  2409. } else {
  2410. kern_flow_attr = &cmd.flow_attr;
  2411. }
  2412. uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
  2413. if (!uobj) {
  2414. err = -ENOMEM;
  2415. goto err_free_attr;
  2416. }
  2417. init_uobj(uobj, 0, file->ucontext, &rule_lock_class);
  2418. down_write(&uobj->mutex);
  2419. qp = idr_read_qp(cmd.qp_handle, file->ucontext);
  2420. if (!qp) {
  2421. err = -EINVAL;
  2422. goto err_uobj;
  2423. }
  2424. flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
  2425. if (!flow_attr) {
  2426. err = -ENOMEM;
  2427. goto err_put;
  2428. }
  2429. flow_attr->type = kern_flow_attr->type;
  2430. flow_attr->priority = kern_flow_attr->priority;
  2431. flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
  2432. flow_attr->port = kern_flow_attr->port;
  2433. flow_attr->flags = kern_flow_attr->flags;
  2434. flow_attr->size = sizeof(*flow_attr);
  2435. kern_spec = kern_flow_attr + 1;
  2436. ib_spec = flow_attr + 1;
  2437. for (i = 0; i < flow_attr->num_of_specs &&
  2438. cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
  2439. cmd.flow_attr.size >=
  2440. ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
  2441. err = kern_spec_to_ib_spec(kern_spec, ib_spec);
  2442. if (err)
  2443. goto err_free;
  2444. flow_attr->size +=
  2445. ((union ib_flow_spec *) ib_spec)->size;
  2446. cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
  2447. kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
  2448. ib_spec += ((union ib_flow_spec *) ib_spec)->size;
  2449. }
  2450. if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
  2451. pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
  2452. i, cmd.flow_attr.size);
  2453. err = -EINVAL;
  2454. goto err_free;
  2455. }
  2456. flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
  2457. if (IS_ERR(flow_id)) {
  2458. err = PTR_ERR(flow_id);
  2459. goto err_free;
  2460. }
  2461. flow_id->qp = qp;
  2462. flow_id->uobject = uobj;
  2463. uobj->object = flow_id;
  2464. err = idr_add_uobj(&ib_uverbs_rule_idr, uobj);
  2465. if (err)
  2466. goto destroy_flow;
  2467. memset(&resp, 0, sizeof(resp));
  2468. resp.flow_handle = uobj->id;
  2469. err = ib_copy_to_udata(ucore,
  2470. &resp, sizeof(resp));
  2471. if (err)
  2472. goto err_copy;
  2473. put_qp_read(qp);
  2474. mutex_lock(&file->mutex);
  2475. list_add_tail(&uobj->list, &file->ucontext->rule_list);
  2476. mutex_unlock(&file->mutex);
  2477. uobj->live = 1;
  2478. up_write(&uobj->mutex);
  2479. kfree(flow_attr);
  2480. if (cmd.flow_attr.num_of_specs)
  2481. kfree(kern_flow_attr);
  2482. return 0;
  2483. err_copy:
  2484. idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
  2485. destroy_flow:
  2486. ib_destroy_flow(flow_id);
  2487. err_free:
  2488. kfree(flow_attr);
  2489. err_put:
  2490. put_qp_read(qp);
  2491. err_uobj:
  2492. put_uobj_write(uobj);
  2493. err_free_attr:
  2494. if (cmd.flow_attr.num_of_specs)
  2495. kfree(kern_flow_attr);
  2496. return err;
  2497. }
  2498. int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
  2499. struct ib_device *ib_dev,
  2500. struct ib_udata *ucore,
  2501. struct ib_udata *uhw)
  2502. {
  2503. struct ib_uverbs_destroy_flow cmd;
  2504. struct ib_flow *flow_id;
  2505. struct ib_uobject *uobj;
  2506. int ret;
  2507. if (ucore->inlen < sizeof(cmd))
  2508. return -EINVAL;
  2509. ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
  2510. if (ret)
  2511. return ret;
  2512. if (cmd.comp_mask)
  2513. return -EINVAL;
  2514. uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
  2515. file->ucontext);
  2516. if (!uobj)
  2517. return -EINVAL;
  2518. flow_id = uobj->object;
  2519. ret = ib_destroy_flow(flow_id);
  2520. if (!ret)
  2521. uobj->live = 0;
  2522. put_uobj_write(uobj);
  2523. idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
  2524. mutex_lock(&file->mutex);
  2525. list_del(&uobj->list);
  2526. mutex_unlock(&file->mutex);
  2527. put_uobj(uobj);
  2528. return ret;
  2529. }
  2530. static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
  2531. struct ib_device *ib_dev,
  2532. struct ib_uverbs_create_xsrq *cmd,
  2533. struct ib_udata *udata)
  2534. {
  2535. struct ib_uverbs_create_srq_resp resp;
  2536. struct ib_usrq_object *obj;
  2537. struct ib_pd *pd;
  2538. struct ib_srq *srq;
  2539. struct ib_uobject *uninitialized_var(xrcd_uobj);
  2540. struct ib_srq_init_attr attr;
  2541. int ret;
  2542. obj = kmalloc(sizeof *obj, GFP_KERNEL);
  2543. if (!obj)
  2544. return -ENOMEM;
  2545. init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class);
  2546. down_write(&obj->uevent.uobject.mutex);
  2547. if (cmd->srq_type == IB_SRQT_XRC) {
  2548. attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
  2549. if (!attr.ext.xrc.xrcd) {
  2550. ret = -EINVAL;
  2551. goto err;
  2552. }
  2553. obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
  2554. atomic_inc(&obj->uxrcd->refcnt);
  2555. attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
  2556. if (!attr.ext.xrc.cq) {
  2557. ret = -EINVAL;
  2558. goto err_put_xrcd;
  2559. }
  2560. }
  2561. pd = idr_read_pd(cmd->pd_handle, file->ucontext);
  2562. if (!pd) {
  2563. ret = -EINVAL;
  2564. goto err_put_cq;
  2565. }
  2566. attr.event_handler = ib_uverbs_srq_event_handler;
  2567. attr.srq_context = file;
  2568. attr.srq_type = cmd->srq_type;
  2569. attr.attr.max_wr = cmd->max_wr;
  2570. attr.attr.max_sge = cmd->max_sge;
  2571. attr.attr.srq_limit = cmd->srq_limit;
  2572. obj->uevent.events_reported = 0;
  2573. INIT_LIST_HEAD(&obj->uevent.event_list);
  2574. srq = pd->device->create_srq(pd, &attr, udata);
  2575. if (IS_ERR(srq)) {
  2576. ret = PTR_ERR(srq);
  2577. goto err_put;
  2578. }
  2579. srq->device = pd->device;
  2580. srq->pd = pd;
  2581. srq->srq_type = cmd->srq_type;
  2582. srq->uobject = &obj->uevent.uobject;
  2583. srq->event_handler = attr.event_handler;
  2584. srq->srq_context = attr.srq_context;
  2585. if (cmd->srq_type == IB_SRQT_XRC) {
  2586. srq->ext.xrc.cq = attr.ext.xrc.cq;
  2587. srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
  2588. atomic_inc(&attr.ext.xrc.cq->usecnt);
  2589. atomic_inc(&attr.ext.xrc.xrcd->usecnt);
  2590. }
  2591. atomic_inc(&pd->usecnt);
  2592. atomic_set(&srq->usecnt, 0);
  2593. obj->uevent.uobject.object = srq;
  2594. ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
  2595. if (ret)
  2596. goto err_destroy;
  2597. memset(&resp, 0, sizeof resp);
  2598. resp.srq_handle = obj->uevent.uobject.id;
  2599. resp.max_wr = attr.attr.max_wr;
  2600. resp.max_sge = attr.attr.max_sge;
  2601. if (cmd->srq_type == IB_SRQT_XRC)
  2602. resp.srqn = srq->ext.xrc.srq_num;
  2603. if (copy_to_user((void __user *) (unsigned long) cmd->response,
  2604. &resp, sizeof resp)) {
  2605. ret = -EFAULT;
  2606. goto err_copy;
  2607. }
  2608. if (cmd->srq_type == IB_SRQT_XRC) {
  2609. put_uobj_read(xrcd_uobj);
  2610. put_cq_read(attr.ext.xrc.cq);
  2611. }
  2612. put_pd_read(pd);
  2613. mutex_lock(&file->mutex);
  2614. list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list);
  2615. mutex_unlock(&file->mutex);
  2616. obj->uevent.uobject.live = 1;
  2617. up_write(&obj->uevent.uobject.mutex);
  2618. return 0;
  2619. err_copy:
  2620. idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
  2621. err_destroy:
  2622. ib_destroy_srq(srq);
  2623. err_put:
  2624. put_pd_read(pd);
  2625. err_put_cq:
  2626. if (cmd->srq_type == IB_SRQT_XRC)
  2627. put_cq_read(attr.ext.xrc.cq);
  2628. err_put_xrcd:
  2629. if (cmd->srq_type == IB_SRQT_XRC) {
  2630. atomic_dec(&obj->uxrcd->refcnt);
  2631. put_uobj_read(xrcd_uobj);
  2632. }
  2633. err:
  2634. put_uobj_write(&obj->uevent.uobject);
  2635. return ret;
  2636. }
  2637. ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
  2638. struct ib_device *ib_dev,
  2639. const char __user *buf, int in_len,
  2640. int out_len)
  2641. {
  2642. struct ib_uverbs_create_srq cmd;
  2643. struct ib_uverbs_create_xsrq xcmd;
  2644. struct ib_uverbs_create_srq_resp resp;
  2645. struct ib_udata udata;
  2646. int ret;
  2647. if (out_len < sizeof resp)
  2648. return -ENOSPC;
  2649. if (copy_from_user(&cmd, buf, sizeof cmd))
  2650. return -EFAULT;
  2651. xcmd.response = cmd.response;
  2652. xcmd.user_handle = cmd.user_handle;
  2653. xcmd.srq_type = IB_SRQT_BASIC;
  2654. xcmd.pd_handle = cmd.pd_handle;
  2655. xcmd.max_wr = cmd.max_wr;
  2656. xcmd.max_sge = cmd.max_sge;
  2657. xcmd.srq_limit = cmd.srq_limit;
  2658. INIT_UDATA(&udata, buf + sizeof cmd,
  2659. (unsigned long) cmd.response + sizeof resp,
  2660. in_len - sizeof cmd, out_len - sizeof resp);
  2661. ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
  2662. if (ret)
  2663. return ret;
  2664. return in_len;
  2665. }
  2666. ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
  2667. struct ib_device *ib_dev,
  2668. const char __user *buf, int in_len, int out_len)
  2669. {
  2670. struct ib_uverbs_create_xsrq cmd;
  2671. struct ib_uverbs_create_srq_resp resp;
  2672. struct ib_udata udata;
  2673. int ret;
  2674. if (out_len < sizeof resp)
  2675. return -ENOSPC;
  2676. if (copy_from_user(&cmd, buf, sizeof cmd))
  2677. return -EFAULT;
  2678. INIT_UDATA(&udata, buf + sizeof cmd,
  2679. (unsigned long) cmd.response + sizeof resp,
  2680. in_len - sizeof cmd, out_len - sizeof resp);
  2681. ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
  2682. if (ret)
  2683. return ret;
  2684. return in_len;
  2685. }
  2686. ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
  2687. struct ib_device *ib_dev,
  2688. const char __user *buf, int in_len,
  2689. int out_len)
  2690. {
  2691. struct ib_uverbs_modify_srq cmd;
  2692. struct ib_udata udata;
  2693. struct ib_srq *srq;
  2694. struct ib_srq_attr attr;
  2695. int ret;
  2696. if (copy_from_user(&cmd, buf, sizeof cmd))
  2697. return -EFAULT;
  2698. INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
  2699. out_len);
  2700. srq = idr_read_srq(cmd.srq_handle, file->ucontext);
  2701. if (!srq)
  2702. return -EINVAL;
  2703. attr.max_wr = cmd.max_wr;
  2704. attr.srq_limit = cmd.srq_limit;
  2705. ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
  2706. put_srq_read(srq);
  2707. return ret ? ret : in_len;
  2708. }
  2709. ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
  2710. struct ib_device *ib_dev,
  2711. const char __user *buf,
  2712. int in_len, int out_len)
  2713. {
  2714. struct ib_uverbs_query_srq cmd;
  2715. struct ib_uverbs_query_srq_resp resp;
  2716. struct ib_srq_attr attr;
  2717. struct ib_srq *srq;
  2718. int ret;
  2719. if (out_len < sizeof resp)
  2720. return -ENOSPC;
  2721. if (copy_from_user(&cmd, buf, sizeof cmd))
  2722. return -EFAULT;
  2723. srq = idr_read_srq(cmd.srq_handle, file->ucontext);
  2724. if (!srq)
  2725. return -EINVAL;
  2726. ret = ib_query_srq(srq, &attr);
  2727. put_srq_read(srq);
  2728. if (ret)
  2729. return ret;
  2730. memset(&resp, 0, sizeof resp);
  2731. resp.max_wr = attr.max_wr;
  2732. resp.max_sge = attr.max_sge;
  2733. resp.srq_limit = attr.srq_limit;
  2734. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  2735. &resp, sizeof resp))
  2736. return -EFAULT;
  2737. return in_len;
  2738. }
  2739. ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
  2740. struct ib_device *ib_dev,
  2741. const char __user *buf, int in_len,
  2742. int out_len)
  2743. {
  2744. struct ib_uverbs_destroy_srq cmd;
  2745. struct ib_uverbs_destroy_srq_resp resp;
  2746. struct ib_uobject *uobj;
  2747. struct ib_srq *srq;
  2748. struct ib_uevent_object *obj;
  2749. int ret = -EINVAL;
  2750. struct ib_usrq_object *us;
  2751. enum ib_srq_type srq_type;
  2752. if (copy_from_user(&cmd, buf, sizeof cmd))
  2753. return -EFAULT;
  2754. uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
  2755. if (!uobj)
  2756. return -EINVAL;
  2757. srq = uobj->object;
  2758. obj = container_of(uobj, struct ib_uevent_object, uobject);
  2759. srq_type = srq->srq_type;
  2760. ret = ib_destroy_srq(srq);
  2761. if (!ret)
  2762. uobj->live = 0;
  2763. put_uobj_write(uobj);
  2764. if (ret)
  2765. return ret;
  2766. if (srq_type == IB_SRQT_XRC) {
  2767. us = container_of(obj, struct ib_usrq_object, uevent);
  2768. atomic_dec(&us->uxrcd->refcnt);
  2769. }
  2770. idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
  2771. mutex_lock(&file->mutex);
  2772. list_del(&uobj->list);
  2773. mutex_unlock(&file->mutex);
  2774. ib_uverbs_release_uevent(file, obj);
  2775. memset(&resp, 0, sizeof resp);
  2776. resp.events_reported = obj->events_reported;
  2777. put_uobj(uobj);
  2778. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  2779. &resp, sizeof resp))
  2780. ret = -EFAULT;
  2781. return ret ? ret : in_len;
  2782. }
  2783. int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
  2784. struct ib_device *ib_dev,
  2785. struct ib_udata *ucore,
  2786. struct ib_udata *uhw)
  2787. {
  2788. struct ib_uverbs_ex_query_device_resp resp;
  2789. struct ib_uverbs_ex_query_device cmd;
  2790. struct ib_device_attr attr;
  2791. int err;
  2792. if (ucore->inlen < sizeof(cmd))
  2793. return -EINVAL;
  2794. err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
  2795. if (err)
  2796. return err;
  2797. if (cmd.comp_mask)
  2798. return -EINVAL;
  2799. if (cmd.reserved)
  2800. return -EINVAL;
  2801. resp.response_length = offsetof(typeof(resp), odp_caps);
  2802. if (ucore->outlen < resp.response_length)
  2803. return -ENOSPC;
  2804. memset(&attr, 0, sizeof(attr));
  2805. err = ib_dev->query_device(ib_dev, &attr, uhw);
  2806. if (err)
  2807. return err;
  2808. copy_query_dev_fields(file, ib_dev, &resp.base, &attr);
  2809. resp.comp_mask = 0;
  2810. if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
  2811. goto end;
  2812. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  2813. resp.odp_caps.general_caps = attr.odp_caps.general_caps;
  2814. resp.odp_caps.per_transport_caps.rc_odp_caps =
  2815. attr.odp_caps.per_transport_caps.rc_odp_caps;
  2816. resp.odp_caps.per_transport_caps.uc_odp_caps =
  2817. attr.odp_caps.per_transport_caps.uc_odp_caps;
  2818. resp.odp_caps.per_transport_caps.ud_odp_caps =
  2819. attr.odp_caps.per_transport_caps.ud_odp_caps;
  2820. resp.odp_caps.reserved = 0;
  2821. #else
  2822. memset(&resp.odp_caps, 0, sizeof(resp.odp_caps));
  2823. #endif
  2824. resp.response_length += sizeof(resp.odp_caps);
  2825. if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask))
  2826. goto end;
  2827. resp.timestamp_mask = attr.timestamp_mask;
  2828. resp.response_length += sizeof(resp.timestamp_mask);
  2829. if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock))
  2830. goto end;
  2831. resp.hca_core_clock = attr.hca_core_clock;
  2832. resp.response_length += sizeof(resp.hca_core_clock);
  2833. end:
  2834. err = ib_copy_to_udata(ucore, &resp, resp.response_length);
  2835. if (err)
  2836. return err;
  2837. return 0;
  2838. }