i40iw_verbs.c 78 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899
  1. /*******************************************************************************
  2. *
  3. * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenFabrics.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. *
  33. *******************************************************************************/
  34. #include <linux/module.h>
  35. #include <linux/moduleparam.h>
  36. #include <linux/random.h>
  37. #include <linux/highmem.h>
  38. #include <linux/time.h>
  39. #include <linux/hugetlb.h>
  40. #include <asm/byteorder.h>
  41. #include <net/ip.h>
  42. #include <rdma/ib_verbs.h>
  43. #include <rdma/iw_cm.h>
  44. #include <rdma/ib_user_verbs.h>
  45. #include <rdma/ib_umem.h>
  46. #include "i40iw.h"
  47. /**
  48. * i40iw_query_device - get device attributes
  49. * @ibdev: device pointer from stack
  50. * @props: returning device attributes
  51. * @udata: user data
  52. */
  53. static int i40iw_query_device(struct ib_device *ibdev,
  54. struct ib_device_attr *props,
  55. struct ib_udata *udata)
  56. {
  57. struct i40iw_device *iwdev = to_iwdev(ibdev);
  58. if (udata->inlen || udata->outlen)
  59. return -EINVAL;
  60. memset(props, 0, sizeof(*props));
  61. ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
  62. props->fw_ver = I40IW_FW_VERSION;
  63. props->device_cap_flags = iwdev->device_cap_flags;
  64. props->vendor_id = iwdev->ldev->pcidev->vendor;
  65. props->vendor_part_id = iwdev->ldev->pcidev->device;
  66. props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
  67. props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
  68. props->max_qp = iwdev->max_qp - iwdev->used_qps;
  69. props->max_qp_wr = (I40IW_MAX_WQ_ENTRIES >> 2) - 1;
  70. props->max_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
  71. props->max_cq = iwdev->max_cq - iwdev->used_cqs;
  72. props->max_cqe = iwdev->max_cqe;
  73. props->max_mr = iwdev->max_mr - iwdev->used_mrs;
  74. props->max_pd = iwdev->max_pd - iwdev->used_pds;
  75. props->max_sge_rd = I40IW_MAX_SGE_RD;
  76. props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
  77. props->max_qp_init_rd_atom = props->max_qp_rd_atom;
  78. props->atomic_cap = IB_ATOMIC_NONE;
  79. props->max_map_per_fmr = 1;
  80. props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
  81. return 0;
  82. }
  83. /**
  84. * i40iw_query_port - get port attrubutes
  85. * @ibdev: device pointer from stack
  86. * @port: port number for query
  87. * @props: returning device attributes
  88. */
  89. static int i40iw_query_port(struct ib_device *ibdev,
  90. u8 port,
  91. struct ib_port_attr *props)
  92. {
  93. struct i40iw_device *iwdev = to_iwdev(ibdev);
  94. struct net_device *netdev = iwdev->netdev;
  95. /* props being zeroed by the caller, avoid zeroing it here */
  96. props->max_mtu = IB_MTU_4096;
  97. props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
  98. props->lid = 1;
  99. if (netif_carrier_ok(iwdev->netdev))
  100. props->state = IB_PORT_ACTIVE;
  101. else
  102. props->state = IB_PORT_DOWN;
  103. props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
  104. IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
  105. props->gid_tbl_len = 1;
  106. props->pkey_tbl_len = 1;
  107. props->active_width = IB_WIDTH_4X;
  108. props->active_speed = 1;
  109. props->max_msg_sz = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
  110. return 0;
  111. }
  112. /**
  113. * i40iw_alloc_ucontext - Allocate the user context data structure
  114. * @ibdev: device pointer from stack
  115. * @udata: user data
  116. *
  117. * This keeps track of all objects associated with a particular
  118. * user-mode client.
  119. */
  120. static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
  121. struct ib_udata *udata)
  122. {
  123. struct i40iw_device *iwdev = to_iwdev(ibdev);
  124. struct i40iw_alloc_ucontext_req req;
  125. struct i40iw_alloc_ucontext_resp uresp;
  126. struct i40iw_ucontext *ucontext;
  127. if (ib_copy_from_udata(&req, udata, sizeof(req)))
  128. return ERR_PTR(-EINVAL);
  129. if (req.userspace_ver < 4 || req.userspace_ver > I40IW_ABI_VER) {
  130. i40iw_pr_err("Unsupported provider library version %u.\n", req.userspace_ver);
  131. return ERR_PTR(-EINVAL);
  132. }
  133. memset(&uresp, 0, sizeof(uresp));
  134. uresp.max_qps = iwdev->max_qp;
  135. uresp.max_pds = iwdev->max_pd;
  136. uresp.wq_size = iwdev->max_qp_wr * 2;
  137. uresp.kernel_ver = req.userspace_ver;
  138. ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
  139. if (!ucontext)
  140. return ERR_PTR(-ENOMEM);
  141. ucontext->iwdev = iwdev;
  142. ucontext->abi_ver = req.userspace_ver;
  143. if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
  144. kfree(ucontext);
  145. return ERR_PTR(-EFAULT);
  146. }
  147. INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
  148. spin_lock_init(&ucontext->cq_reg_mem_list_lock);
  149. INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
  150. spin_lock_init(&ucontext->qp_reg_mem_list_lock);
  151. return &ucontext->ibucontext;
  152. }
  153. /**
  154. * i40iw_dealloc_ucontext - deallocate the user context data structure
  155. * @context: user context created during alloc
  156. */
  157. static int i40iw_dealloc_ucontext(struct ib_ucontext *context)
  158. {
  159. struct i40iw_ucontext *ucontext = to_ucontext(context);
  160. unsigned long flags;
  161. spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
  162. if (!list_empty(&ucontext->cq_reg_mem_list)) {
  163. spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
  164. return -EBUSY;
  165. }
  166. spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
  167. spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
  168. if (!list_empty(&ucontext->qp_reg_mem_list)) {
  169. spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
  170. return -EBUSY;
  171. }
  172. spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
  173. kfree(ucontext);
  174. return 0;
  175. }
  176. /**
  177. * i40iw_mmap - user memory map
  178. * @context: context created during alloc
  179. * @vma: kernel info for user memory map
  180. */
  181. static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
  182. {
  183. struct i40iw_ucontext *ucontext;
  184. u64 db_addr_offset;
  185. u64 push_offset;
  186. ucontext = to_ucontext(context);
  187. if (ucontext->iwdev->sc_dev.is_pf) {
  188. db_addr_offset = I40IW_DB_ADDR_OFFSET;
  189. push_offset = I40IW_PUSH_OFFSET;
  190. if (vma->vm_pgoff)
  191. vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
  192. } else {
  193. db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
  194. push_offset = I40IW_VF_PUSH_OFFSET;
  195. if (vma->vm_pgoff)
  196. vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
  197. }
  198. vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
  199. if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
  200. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  201. vma->vm_private_data = ucontext;
  202. } else {
  203. if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
  204. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  205. else
  206. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  207. }
  208. if (io_remap_pfn_range(vma, vma->vm_start,
  209. vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
  210. PAGE_SIZE, vma->vm_page_prot))
  211. return -EAGAIN;
  212. return 0;
  213. }
  214. /**
  215. * i40iw_alloc_push_page - allocate a push page for qp
  216. * @iwdev: iwarp device
  217. * @qp: hardware control qp
  218. */
  219. static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
  220. {
  221. struct i40iw_cqp_request *cqp_request;
  222. struct cqp_commands_info *cqp_info;
  223. enum i40iw_status_code status;
  224. if (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX)
  225. return;
  226. cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
  227. if (!cqp_request)
  228. return;
  229. atomic_inc(&cqp_request->refcount);
  230. cqp_info = &cqp_request->info;
  231. cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
  232. cqp_info->post_sq = 1;
  233. cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
  234. cqp_info->in.u.manage_push_page.info.free_page = 0;
  235. cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
  236. cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
  237. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  238. if (!status)
  239. qp->push_idx = cqp_request->compl_info.op_ret_val;
  240. else
  241. i40iw_pr_err("CQP-OP Push page fail");
  242. i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
  243. }
  244. /**
  245. * i40iw_dealloc_push_page - free a push page for qp
  246. * @iwdev: iwarp device
  247. * @qp: hardware control qp
  248. */
  249. static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
  250. {
  251. struct i40iw_cqp_request *cqp_request;
  252. struct cqp_commands_info *cqp_info;
  253. enum i40iw_status_code status;
  254. if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX)
  255. return;
  256. cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
  257. if (!cqp_request)
  258. return;
  259. cqp_info = &cqp_request->info;
  260. cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
  261. cqp_info->post_sq = 1;
  262. cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
  263. cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
  264. cqp_info->in.u.manage_push_page.info.free_page = 1;
  265. cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
  266. cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
  267. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  268. if (!status)
  269. qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
  270. else
  271. i40iw_pr_err("CQP-OP Push page fail");
  272. }
  273. /**
  274. * i40iw_alloc_pd - allocate protection domain
  275. * @ibdev: device pointer from stack
  276. * @context: user context created during alloc
  277. * @udata: user data
  278. */
  279. static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
  280. struct ib_ucontext *context,
  281. struct ib_udata *udata)
  282. {
  283. struct i40iw_pd *iwpd;
  284. struct i40iw_device *iwdev = to_iwdev(ibdev);
  285. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  286. struct i40iw_alloc_pd_resp uresp;
  287. struct i40iw_sc_pd *sc_pd;
  288. struct i40iw_ucontext *ucontext;
  289. u32 pd_id = 0;
  290. int err;
  291. if (iwdev->closing)
  292. return ERR_PTR(-ENODEV);
  293. err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
  294. iwdev->max_pd, &pd_id, &iwdev->next_pd);
  295. if (err) {
  296. i40iw_pr_err("alloc resource failed\n");
  297. return ERR_PTR(err);
  298. }
  299. iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL);
  300. if (!iwpd) {
  301. err = -ENOMEM;
  302. goto free_res;
  303. }
  304. sc_pd = &iwpd->sc_pd;
  305. if (context) {
  306. ucontext = to_ucontext(context);
  307. dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
  308. memset(&uresp, 0, sizeof(uresp));
  309. uresp.pd_id = pd_id;
  310. if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
  311. err = -EFAULT;
  312. goto error;
  313. }
  314. } else {
  315. dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, -1);
  316. }
  317. i40iw_add_pdusecount(iwpd);
  318. return &iwpd->ibpd;
  319. error:
  320. kfree(iwpd);
  321. free_res:
  322. i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id);
  323. return ERR_PTR(err);
  324. }
  325. /**
  326. * i40iw_dealloc_pd - deallocate pd
  327. * @ibpd: ptr of pd to be deallocated
  328. */
  329. static int i40iw_dealloc_pd(struct ib_pd *ibpd)
  330. {
  331. struct i40iw_pd *iwpd = to_iwpd(ibpd);
  332. struct i40iw_device *iwdev = to_iwdev(ibpd->device);
  333. i40iw_rem_pdusecount(iwpd, iwdev);
  334. return 0;
  335. }
  336. /**
  337. * i40iw_qp_roundup - return round up qp ring size
  338. * @wr_ring_size: ring size to round up
  339. */
  340. static int i40iw_qp_roundup(u32 wr_ring_size)
  341. {
  342. int scount = 1;
  343. if (wr_ring_size < I40IWQP_SW_MIN_WQSIZE)
  344. wr_ring_size = I40IWQP_SW_MIN_WQSIZE;
  345. for (wr_ring_size--; scount <= 16; scount *= 2)
  346. wr_ring_size |= wr_ring_size >> scount;
  347. return ++wr_ring_size;
  348. }
  349. /**
  350. * i40iw_get_pbl - Retrieve pbl from a list given a virtual
  351. * address
  352. * @va: user virtual address
  353. * @pbl_list: pbl list to search in (QP's or CQ's)
  354. */
  355. static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
  356. struct list_head *pbl_list)
  357. {
  358. struct i40iw_pbl *iwpbl;
  359. list_for_each_entry(iwpbl, pbl_list, list) {
  360. if (iwpbl->user_base == va) {
  361. list_del(&iwpbl->list);
  362. return iwpbl;
  363. }
  364. }
  365. return NULL;
  366. }
  367. /**
  368. * i40iw_free_qp_resources - free up memory resources for qp
  369. * @iwdev: iwarp device
  370. * @iwqp: qp ptr (user or kernel)
  371. * @qp_num: qp number assigned
  372. */
  373. void i40iw_free_qp_resources(struct i40iw_device *iwdev,
  374. struct i40iw_qp *iwqp,
  375. u32 qp_num)
  376. {
  377. i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
  378. if (qp_num)
  379. i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
  380. i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
  381. i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
  382. kfree(iwqp->kqp.wrid_mem);
  383. iwqp->kqp.wrid_mem = NULL;
  384. kfree(iwqp->allocated_buffer);
  385. }
  386. /**
  387. * i40iw_clean_cqes - clean cq entries for qp
  388. * @iwqp: qp ptr (user or kernel)
  389. * @iwcq: cq ptr
  390. */
  391. static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
  392. {
  393. struct i40iw_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
  394. ukcq->ops.iw_cq_clean(&iwqp->sc_qp.qp_uk, ukcq);
  395. }
  396. /**
  397. * i40iw_destroy_qp - destroy qp
  398. * @ibqp: qp's ib pointer also to get to device's qp address
  399. */
  400. static int i40iw_destroy_qp(struct ib_qp *ibqp)
  401. {
  402. struct i40iw_qp *iwqp = to_iwqp(ibqp);
  403. iwqp->destroyed = 1;
  404. if (iwqp->ibqp_state >= IB_QPS_INIT && iwqp->ibqp_state < IB_QPS_RTS)
  405. i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 0, 0, 0);
  406. if (!iwqp->user_mode) {
  407. if (iwqp->iwscq) {
  408. i40iw_clean_cqes(iwqp, iwqp->iwscq);
  409. if (iwqp->iwrcq != iwqp->iwscq)
  410. i40iw_clean_cqes(iwqp, iwqp->iwrcq);
  411. }
  412. }
  413. i40iw_rem_ref(&iwqp->ibqp);
  414. return 0;
  415. }
  416. /**
  417. * i40iw_setup_virt_qp - setup for allocation of virtual qp
  418. * @dev: iwarp device
  419. * @qp: qp ptr
  420. * @init_info: initialize info to return
  421. */
  422. static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
  423. struct i40iw_qp *iwqp,
  424. struct i40iw_qp_init_info *init_info)
  425. {
  426. struct i40iw_pbl *iwpbl = iwqp->iwpbl;
  427. struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
  428. iwqp->page = qpmr->sq_page;
  429. init_info->shadow_area_pa = cpu_to_le64(qpmr->shadow);
  430. if (iwpbl->pbl_allocated) {
  431. init_info->virtual_map = true;
  432. init_info->sq_pa = qpmr->sq_pbl.idx;
  433. init_info->rq_pa = qpmr->rq_pbl.idx;
  434. } else {
  435. init_info->sq_pa = qpmr->sq_pbl.addr;
  436. init_info->rq_pa = qpmr->rq_pbl.addr;
  437. }
  438. return 0;
  439. }
  440. /**
  441. * i40iw_setup_kmode_qp - setup initialization for kernel mode qp
  442. * @iwdev: iwarp device
  443. * @iwqp: qp ptr (user or kernel)
  444. * @info: initialize info to return
  445. */
  446. static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
  447. struct i40iw_qp *iwqp,
  448. struct i40iw_qp_init_info *info)
  449. {
  450. struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
  451. u32 sqdepth, rqdepth;
  452. u32 sq_size, rq_size;
  453. u8 sqshift;
  454. u32 size;
  455. enum i40iw_status_code status;
  456. struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
  457. sq_size = i40iw_qp_roundup(ukinfo->sq_size + 1);
  458. rq_size = i40iw_qp_roundup(ukinfo->rq_size + 1);
  459. status = i40iw_get_wqe_shift(sq_size, ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
  460. if (status)
  461. return -ENOMEM;
  462. sqdepth = sq_size << sqshift;
  463. rqdepth = rq_size << I40IW_MAX_RQ_WQE_SHIFT;
  464. size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
  465. iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
  466. ukinfo->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)iwqp->kqp.wrid_mem;
  467. if (!ukinfo->sq_wrtrk_array)
  468. return -ENOMEM;
  469. ukinfo->rq_wrid_array = (u64 *)&ukinfo->sq_wrtrk_array[sqdepth];
  470. size = (sqdepth + rqdepth) * I40IW_QP_WQE_MIN_SIZE;
  471. size += (I40IW_SHADOW_AREA_SIZE << 3);
  472. status = i40iw_allocate_dma_mem(iwdev->sc_dev.hw, mem, size, 256);
  473. if (status) {
  474. kfree(ukinfo->sq_wrtrk_array);
  475. ukinfo->sq_wrtrk_array = NULL;
  476. return -ENOMEM;
  477. }
  478. ukinfo->sq = mem->va;
  479. info->sq_pa = mem->pa;
  480. ukinfo->rq = &ukinfo->sq[sqdepth];
  481. info->rq_pa = info->sq_pa + (sqdepth * I40IW_QP_WQE_MIN_SIZE);
  482. ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
  483. info->shadow_area_pa = info->rq_pa + (rqdepth * I40IW_QP_WQE_MIN_SIZE);
  484. ukinfo->sq_size = sq_size;
  485. ukinfo->rq_size = rq_size;
  486. ukinfo->qp_id = iwqp->ibqp.qp_num;
  487. return 0;
  488. }
  489. /**
  490. * i40iw_create_qp - create qp
  491. * @ibpd: ptr of pd
  492. * @init_attr: attributes for qp
  493. * @udata: user data for create qp
  494. */
  495. static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
  496. struct ib_qp_init_attr *init_attr,
  497. struct ib_udata *udata)
  498. {
  499. struct i40iw_pd *iwpd = to_iwpd(ibpd);
  500. struct i40iw_device *iwdev = to_iwdev(ibpd->device);
  501. struct i40iw_cqp *iwcqp = &iwdev->cqp;
  502. struct i40iw_qp *iwqp;
  503. struct i40iw_ucontext *ucontext;
  504. struct i40iw_create_qp_req req;
  505. struct i40iw_create_qp_resp uresp;
  506. u32 qp_num = 0;
  507. void *mem;
  508. enum i40iw_status_code ret;
  509. int err_code;
  510. int sq_size;
  511. int rq_size;
  512. struct i40iw_sc_qp *qp;
  513. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  514. struct i40iw_qp_init_info init_info;
  515. struct i40iw_create_qp_info *qp_info;
  516. struct i40iw_cqp_request *cqp_request;
  517. struct cqp_commands_info *cqp_info;
  518. struct i40iw_qp_host_ctx_info *ctx_info;
  519. struct i40iwarp_offload_info *iwarp_info;
  520. unsigned long flags;
  521. if (iwdev->closing)
  522. return ERR_PTR(-ENODEV);
  523. if (init_attr->create_flags)
  524. return ERR_PTR(-EINVAL);
  525. if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
  526. init_attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
  527. if (init_attr->cap.max_send_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
  528. init_attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
  529. if (init_attr->cap.max_recv_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
  530. init_attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
  531. memset(&init_info, 0, sizeof(init_info));
  532. sq_size = init_attr->cap.max_send_wr;
  533. rq_size = init_attr->cap.max_recv_wr;
  534. init_info.vsi = &iwdev->vsi;
  535. init_info.qp_uk_init_info.sq_size = sq_size;
  536. init_info.qp_uk_init_info.rq_size = rq_size;
  537. init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
  538. init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
  539. init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
  540. mem = kzalloc(sizeof(*iwqp), GFP_KERNEL);
  541. if (!mem)
  542. return ERR_PTR(-ENOMEM);
  543. iwqp = (struct i40iw_qp *)mem;
  544. qp = &iwqp->sc_qp;
  545. qp->back_qp = (void *)iwqp;
  546. qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
  547. iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
  548. if (i40iw_allocate_dma_mem(dev->hw,
  549. &iwqp->q2_ctx_mem,
  550. I40IW_Q2_BUFFER_SIZE + I40IW_QP_CTX_SIZE,
  551. 256)) {
  552. i40iw_pr_err("dma_mem failed\n");
  553. err_code = -ENOMEM;
  554. goto error;
  555. }
  556. init_info.q2 = iwqp->q2_ctx_mem.va;
  557. init_info.q2_pa = iwqp->q2_ctx_mem.pa;
  558. init_info.host_ctx = (void *)init_info.q2 + I40IW_Q2_BUFFER_SIZE;
  559. init_info.host_ctx_pa = init_info.q2_pa + I40IW_Q2_BUFFER_SIZE;
  560. err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_qps, iwdev->max_qp,
  561. &qp_num, &iwdev->next_qp);
  562. if (err_code) {
  563. i40iw_pr_err("qp resource\n");
  564. goto error;
  565. }
  566. iwqp->allocated_buffer = mem;
  567. iwqp->iwdev = iwdev;
  568. iwqp->iwpd = iwpd;
  569. iwqp->ibqp.qp_num = qp_num;
  570. qp = &iwqp->sc_qp;
  571. iwqp->iwscq = to_iwcq(init_attr->send_cq);
  572. iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
  573. iwqp->host_ctx.va = init_info.host_ctx;
  574. iwqp->host_ctx.pa = init_info.host_ctx_pa;
  575. iwqp->host_ctx.size = I40IW_QP_CTX_SIZE;
  576. init_info.pd = &iwpd->sc_pd;
  577. init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
  578. iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
  579. if (init_attr->qp_type != IB_QPT_RC) {
  580. err_code = -EINVAL;
  581. goto error;
  582. }
  583. if (iwdev->push_mode)
  584. i40iw_alloc_push_page(iwdev, qp);
  585. if (udata) {
  586. err_code = ib_copy_from_udata(&req, udata, sizeof(req));
  587. if (err_code) {
  588. i40iw_pr_err("ib_copy_from_data\n");
  589. goto error;
  590. }
  591. iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
  592. if (ibpd->uobject && ibpd->uobject->context) {
  593. iwqp->user_mode = 1;
  594. ucontext = to_ucontext(ibpd->uobject->context);
  595. if (req.user_wqe_buffers) {
  596. spin_lock_irqsave(
  597. &ucontext->qp_reg_mem_list_lock, flags);
  598. iwqp->iwpbl = i40iw_get_pbl(
  599. (unsigned long)req.user_wqe_buffers,
  600. &ucontext->qp_reg_mem_list);
  601. spin_unlock_irqrestore(
  602. &ucontext->qp_reg_mem_list_lock, flags);
  603. if (!iwqp->iwpbl) {
  604. err_code = -ENODATA;
  605. i40iw_pr_err("no pbl info\n");
  606. goto error;
  607. }
  608. }
  609. }
  610. err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
  611. } else {
  612. err_code = i40iw_setup_kmode_qp(iwdev, iwqp, &init_info);
  613. }
  614. if (err_code) {
  615. i40iw_pr_err("setup qp failed\n");
  616. goto error;
  617. }
  618. init_info.type = I40IW_QP_TYPE_IWARP;
  619. ret = dev->iw_priv_qp_ops->qp_init(qp, &init_info);
  620. if (ret) {
  621. err_code = -EPROTO;
  622. i40iw_pr_err("qp_init fail\n");
  623. goto error;
  624. }
  625. ctx_info = &iwqp->ctx_info;
  626. iwarp_info = &iwqp->iwarp_info;
  627. iwarp_info->rd_enable = true;
  628. iwarp_info->wr_rdresp_en = true;
  629. if (!iwqp->user_mode) {
  630. iwarp_info->fast_reg_en = true;
  631. iwarp_info->priv_mode_en = true;
  632. }
  633. iwarp_info->ddp_ver = 1;
  634. iwarp_info->rdmap_ver = 1;
  635. ctx_info->iwarp_info_valid = true;
  636. ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
  637. ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
  638. if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX) {
  639. ctx_info->push_mode_en = false;
  640. } else {
  641. ctx_info->push_mode_en = true;
  642. ctx_info->push_idx = qp->push_idx;
  643. }
  644. ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
  645. (u64 *)iwqp->host_ctx.va,
  646. ctx_info);
  647. ctx_info->iwarp_info_valid = false;
  648. cqp_request = i40iw_get_cqp_request(iwcqp, true);
  649. if (!cqp_request) {
  650. err_code = -ENOMEM;
  651. goto error;
  652. }
  653. cqp_info = &cqp_request->info;
  654. qp_info = &cqp_request->info.in.u.qp_create.info;
  655. memset(qp_info, 0, sizeof(*qp_info));
  656. qp_info->cq_num_valid = true;
  657. qp_info->next_iwarp_state = I40IW_QP_STATE_IDLE;
  658. cqp_info->cqp_cmd = OP_QP_CREATE;
  659. cqp_info->post_sq = 1;
  660. cqp_info->in.u.qp_create.qp = qp;
  661. cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
  662. ret = i40iw_handle_cqp_op(iwdev, cqp_request);
  663. if (ret) {
  664. i40iw_pr_err("CQP-OP QP create fail");
  665. err_code = -EACCES;
  666. goto error;
  667. }
  668. i40iw_add_ref(&iwqp->ibqp);
  669. spin_lock_init(&iwqp->lock);
  670. iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
  671. iwdev->qp_table[qp_num] = iwqp;
  672. i40iw_add_pdusecount(iwqp->iwpd);
  673. i40iw_add_devusecount(iwdev);
  674. if (ibpd->uobject && udata) {
  675. memset(&uresp, 0, sizeof(uresp));
  676. uresp.actual_sq_size = sq_size;
  677. uresp.actual_rq_size = rq_size;
  678. uresp.qp_id = qp_num;
  679. uresp.push_idx = qp->push_idx;
  680. err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
  681. if (err_code) {
  682. i40iw_pr_err("copy_to_udata failed\n");
  683. i40iw_destroy_qp(&iwqp->ibqp);
  684. /* let the completion of the qp destroy free the qp */
  685. return ERR_PTR(err_code);
  686. }
  687. }
  688. init_completion(&iwqp->sq_drained);
  689. init_completion(&iwqp->rq_drained);
  690. return &iwqp->ibqp;
  691. error:
  692. i40iw_free_qp_resources(iwdev, iwqp, qp_num);
  693. return ERR_PTR(err_code);
  694. }
  695. /**
  696. * i40iw_query - query qp attributes
  697. * @ibqp: qp pointer
  698. * @attr: attributes pointer
  699. * @attr_mask: Not used
  700. * @init_attr: qp attributes to return
  701. */
  702. static int i40iw_query_qp(struct ib_qp *ibqp,
  703. struct ib_qp_attr *attr,
  704. int attr_mask,
  705. struct ib_qp_init_attr *init_attr)
  706. {
  707. struct i40iw_qp *iwqp = to_iwqp(ibqp);
  708. struct i40iw_sc_qp *qp = &iwqp->sc_qp;
  709. attr->qp_access_flags = 0;
  710. attr->cap.max_send_wr = qp->qp_uk.sq_size;
  711. attr->cap.max_recv_wr = qp->qp_uk.rq_size;
  712. attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
  713. attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
  714. attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
  715. init_attr->event_handler = iwqp->ibqp.event_handler;
  716. init_attr->qp_context = iwqp->ibqp.qp_context;
  717. init_attr->send_cq = iwqp->ibqp.send_cq;
  718. init_attr->recv_cq = iwqp->ibqp.recv_cq;
  719. init_attr->srq = iwqp->ibqp.srq;
  720. init_attr->cap = attr->cap;
  721. return 0;
  722. }
  723. /**
  724. * i40iw_hw_modify_qp - setup cqp for modify qp
  725. * @iwdev: iwarp device
  726. * @iwqp: qp ptr (user or kernel)
  727. * @info: info for modify qp
  728. * @wait: flag to wait or not for modify qp completion
  729. */
  730. void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
  731. struct i40iw_modify_qp_info *info, bool wait)
  732. {
  733. enum i40iw_status_code status;
  734. struct i40iw_cqp_request *cqp_request;
  735. struct cqp_commands_info *cqp_info;
  736. struct i40iw_modify_qp_info *m_info;
  737. cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
  738. if (!cqp_request)
  739. return;
  740. cqp_info = &cqp_request->info;
  741. m_info = &cqp_info->in.u.qp_modify.info;
  742. memcpy(m_info, info, sizeof(*m_info));
  743. cqp_info->cqp_cmd = OP_QP_MODIFY;
  744. cqp_info->post_sq = 1;
  745. cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
  746. cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
  747. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  748. if (status)
  749. i40iw_pr_err("CQP-OP Modify QP fail");
  750. }
  751. /**
  752. * i40iw_modify_qp - modify qp request
  753. * @ibqp: qp's pointer for modify
  754. * @attr: access attributes
  755. * @attr_mask: state mask
  756. * @udata: user data
  757. */
  758. int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  759. int attr_mask, struct ib_udata *udata)
  760. {
  761. struct i40iw_qp *iwqp = to_iwqp(ibqp);
  762. struct i40iw_device *iwdev = iwqp->iwdev;
  763. struct i40iw_qp_host_ctx_info *ctx_info;
  764. struct i40iwarp_offload_info *iwarp_info;
  765. struct i40iw_modify_qp_info info;
  766. u8 issue_modify_qp = 0;
  767. u8 dont_wait = 0;
  768. u32 err;
  769. unsigned long flags;
  770. memset(&info, 0, sizeof(info));
  771. ctx_info = &iwqp->ctx_info;
  772. iwarp_info = &iwqp->iwarp_info;
  773. spin_lock_irqsave(&iwqp->lock, flags);
  774. if (attr_mask & IB_QP_STATE) {
  775. if (iwdev->closing && attr->qp_state != IB_QPS_ERR) {
  776. err = -EINVAL;
  777. goto exit;
  778. }
  779. switch (attr->qp_state) {
  780. case IB_QPS_INIT:
  781. case IB_QPS_RTR:
  782. if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_IDLE) {
  783. err = -EINVAL;
  784. goto exit;
  785. }
  786. if (iwqp->iwarp_state == I40IW_QP_STATE_INVALID) {
  787. info.next_iwarp_state = I40IW_QP_STATE_IDLE;
  788. issue_modify_qp = 1;
  789. }
  790. break;
  791. case IB_QPS_RTS:
  792. if ((iwqp->iwarp_state > (u32)I40IW_QP_STATE_RTS) ||
  793. (!iwqp->cm_id)) {
  794. err = -EINVAL;
  795. goto exit;
  796. }
  797. issue_modify_qp = 1;
  798. iwqp->hw_tcp_state = I40IW_TCP_STATE_ESTABLISHED;
  799. iwqp->hte_added = 1;
  800. info.next_iwarp_state = I40IW_QP_STATE_RTS;
  801. info.tcp_ctx_valid = true;
  802. info.ord_valid = true;
  803. info.arp_cache_idx_valid = true;
  804. info.cq_num_valid = true;
  805. break;
  806. case IB_QPS_SQD:
  807. if (iwqp->hw_iwarp_state > (u32)I40IW_QP_STATE_RTS) {
  808. err = 0;
  809. goto exit;
  810. }
  811. if ((iwqp->iwarp_state == (u32)I40IW_QP_STATE_CLOSING) ||
  812. (iwqp->iwarp_state < (u32)I40IW_QP_STATE_RTS)) {
  813. err = 0;
  814. goto exit;
  815. }
  816. if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_CLOSING) {
  817. err = -EINVAL;
  818. goto exit;
  819. }
  820. info.next_iwarp_state = I40IW_QP_STATE_CLOSING;
  821. issue_modify_qp = 1;
  822. break;
  823. case IB_QPS_SQE:
  824. if (iwqp->iwarp_state >= (u32)I40IW_QP_STATE_TERMINATE) {
  825. err = -EINVAL;
  826. goto exit;
  827. }
  828. info.next_iwarp_state = I40IW_QP_STATE_TERMINATE;
  829. issue_modify_qp = 1;
  830. break;
  831. case IB_QPS_ERR:
  832. case IB_QPS_RESET:
  833. if (iwqp->iwarp_state == (u32)I40IW_QP_STATE_ERROR) {
  834. err = -EINVAL;
  835. goto exit;
  836. }
  837. if (iwqp->sc_qp.term_flags)
  838. i40iw_terminate_del_timer(&iwqp->sc_qp);
  839. info.next_iwarp_state = I40IW_QP_STATE_ERROR;
  840. if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) &&
  841. iwdev->iw_status &&
  842. (iwqp->hw_tcp_state != I40IW_TCP_STATE_TIME_WAIT))
  843. info.reset_tcp_conn = true;
  844. else
  845. dont_wait = 1;
  846. issue_modify_qp = 1;
  847. info.next_iwarp_state = I40IW_QP_STATE_ERROR;
  848. break;
  849. default:
  850. err = -EINVAL;
  851. goto exit;
  852. }
  853. iwqp->ibqp_state = attr->qp_state;
  854. if (issue_modify_qp)
  855. iwqp->iwarp_state = info.next_iwarp_state;
  856. else
  857. info.next_iwarp_state = iwqp->iwarp_state;
  858. }
  859. if (attr_mask & IB_QP_ACCESS_FLAGS) {
  860. ctx_info->iwarp_info_valid = true;
  861. if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
  862. iwarp_info->wr_rdresp_en = true;
  863. if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
  864. iwarp_info->wr_rdresp_en = true;
  865. if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
  866. iwarp_info->rd_enable = true;
  867. if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
  868. iwarp_info->bind_en = true;
  869. if (iwqp->user_mode) {
  870. iwarp_info->rd_enable = true;
  871. iwarp_info->wr_rdresp_en = true;
  872. iwarp_info->priv_mode_en = false;
  873. }
  874. }
  875. if (ctx_info->iwarp_info_valid) {
  876. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  877. int ret;
  878. ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
  879. ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
  880. ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
  881. (u64 *)iwqp->host_ctx.va,
  882. ctx_info);
  883. if (ret) {
  884. i40iw_pr_err("setting QP context\n");
  885. err = -EINVAL;
  886. goto exit;
  887. }
  888. }
  889. spin_unlock_irqrestore(&iwqp->lock, flags);
  890. if (issue_modify_qp)
  891. i40iw_hw_modify_qp(iwdev, iwqp, &info, true);
  892. if (issue_modify_qp && (iwqp->ibqp_state > IB_QPS_RTS)) {
  893. if (dont_wait) {
  894. if (iwqp->cm_id && iwqp->hw_tcp_state) {
  895. spin_lock_irqsave(&iwqp->lock, flags);
  896. iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
  897. iwqp->last_aeq = I40IW_AE_RESET_SENT;
  898. spin_unlock_irqrestore(&iwqp->lock, flags);
  899. }
  900. }
  901. }
  902. return 0;
  903. exit:
  904. spin_unlock_irqrestore(&iwqp->lock, flags);
  905. return err;
  906. }
  907. /**
  908. * cq_free_resources - free up recources for cq
  909. * @iwdev: iwarp device
  910. * @iwcq: cq ptr
  911. */
  912. static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq)
  913. {
  914. struct i40iw_sc_cq *cq = &iwcq->sc_cq;
  915. if (!iwcq->user_mode)
  916. i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwcq->kmem);
  917. i40iw_free_resource(iwdev, iwdev->allocated_cqs, cq->cq_uk.cq_id);
  918. }
  919. /**
  920. * i40iw_cq_wq_destroy - send cq destroy cqp
  921. * @iwdev: iwarp device
  922. * @cq: hardware control cq
  923. */
  924. void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
  925. {
  926. enum i40iw_status_code status;
  927. struct i40iw_cqp_request *cqp_request;
  928. struct cqp_commands_info *cqp_info;
  929. cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
  930. if (!cqp_request)
  931. return;
  932. cqp_info = &cqp_request->info;
  933. cqp_info->cqp_cmd = OP_CQ_DESTROY;
  934. cqp_info->post_sq = 1;
  935. cqp_info->in.u.cq_destroy.cq = cq;
  936. cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
  937. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  938. if (status)
  939. i40iw_pr_err("CQP-OP Destroy QP fail");
  940. }
  941. /**
  942. * i40iw_destroy_cq - destroy cq
  943. * @ib_cq: cq pointer
  944. */
  945. static int i40iw_destroy_cq(struct ib_cq *ib_cq)
  946. {
  947. struct i40iw_cq *iwcq;
  948. struct i40iw_device *iwdev;
  949. struct i40iw_sc_cq *cq;
  950. if (!ib_cq) {
  951. i40iw_pr_err("ib_cq == NULL\n");
  952. return 0;
  953. }
  954. iwcq = to_iwcq(ib_cq);
  955. iwdev = to_iwdev(ib_cq->device);
  956. cq = &iwcq->sc_cq;
  957. i40iw_cq_wq_destroy(iwdev, cq);
  958. cq_free_resources(iwdev, iwcq);
  959. kfree(iwcq);
  960. i40iw_rem_devusecount(iwdev);
  961. return 0;
  962. }
  963. /**
  964. * i40iw_create_cq - create cq
  965. * @ibdev: device pointer from stack
  966. * @attr: attributes for cq
  967. * @context: user context created during alloc
  968. * @udata: user data
  969. */
  970. static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
  971. const struct ib_cq_init_attr *attr,
  972. struct ib_ucontext *context,
  973. struct ib_udata *udata)
  974. {
  975. struct i40iw_device *iwdev = to_iwdev(ibdev);
  976. struct i40iw_cq *iwcq;
  977. struct i40iw_pbl *iwpbl;
  978. u32 cq_num = 0;
  979. struct i40iw_sc_cq *cq;
  980. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  981. struct i40iw_cq_init_info info;
  982. enum i40iw_status_code status;
  983. struct i40iw_cqp_request *cqp_request;
  984. struct cqp_commands_info *cqp_info;
  985. struct i40iw_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
  986. unsigned long flags;
  987. int err_code;
  988. int entries = attr->cqe;
  989. if (iwdev->closing)
  990. return ERR_PTR(-ENODEV);
  991. if (entries > iwdev->max_cqe)
  992. return ERR_PTR(-EINVAL);
  993. iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL);
  994. if (!iwcq)
  995. return ERR_PTR(-ENOMEM);
  996. memset(&info, 0, sizeof(info));
  997. err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_cqs,
  998. iwdev->max_cq, &cq_num,
  999. &iwdev->next_cq);
  1000. if (err_code)
  1001. goto error;
  1002. cq = &iwcq->sc_cq;
  1003. cq->back_cq = (void *)iwcq;
  1004. spin_lock_init(&iwcq->lock);
  1005. info.dev = dev;
  1006. ukinfo->cq_size = max(entries, 4);
  1007. ukinfo->cq_id = cq_num;
  1008. iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
  1009. info.ceqe_mask = 0;
  1010. if (attr->comp_vector < iwdev->ceqs_count)
  1011. info.ceq_id = attr->comp_vector;
  1012. info.ceq_id_valid = true;
  1013. info.ceqe_mask = 1;
  1014. info.type = I40IW_CQ_TYPE_IWARP;
  1015. if (context) {
  1016. struct i40iw_ucontext *ucontext;
  1017. struct i40iw_create_cq_req req;
  1018. struct i40iw_cq_mr *cqmr;
  1019. memset(&req, 0, sizeof(req));
  1020. iwcq->user_mode = true;
  1021. ucontext = to_ucontext(context);
  1022. if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req)))
  1023. goto cq_free_resources;
  1024. spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
  1025. iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
  1026. &ucontext->cq_reg_mem_list);
  1027. spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
  1028. if (!iwpbl) {
  1029. err_code = -EPROTO;
  1030. goto cq_free_resources;
  1031. }
  1032. iwcq->iwpbl = iwpbl;
  1033. iwcq->cq_mem_size = 0;
  1034. cqmr = &iwpbl->cq_mr;
  1035. info.shadow_area_pa = cpu_to_le64(cqmr->shadow);
  1036. if (iwpbl->pbl_allocated) {
  1037. info.virtual_map = true;
  1038. info.pbl_chunk_size = 1;
  1039. info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
  1040. } else {
  1041. info.cq_base_pa = cqmr->cq_pbl.addr;
  1042. }
  1043. } else {
  1044. /* Kmode allocations */
  1045. int rsize;
  1046. int shadow;
  1047. rsize = info.cq_uk_init_info.cq_size * sizeof(struct i40iw_cqe);
  1048. rsize = round_up(rsize, 256);
  1049. shadow = I40IW_SHADOW_AREA_SIZE << 3;
  1050. status = i40iw_allocate_dma_mem(dev->hw, &iwcq->kmem,
  1051. rsize + shadow, 256);
  1052. if (status) {
  1053. err_code = -ENOMEM;
  1054. goto cq_free_resources;
  1055. }
  1056. ukinfo->cq_base = iwcq->kmem.va;
  1057. info.cq_base_pa = iwcq->kmem.pa;
  1058. info.shadow_area_pa = info.cq_base_pa + rsize;
  1059. ukinfo->shadow_area = iwcq->kmem.va + rsize;
  1060. }
  1061. if (dev->iw_priv_cq_ops->cq_init(cq, &info)) {
  1062. i40iw_pr_err("init cq fail\n");
  1063. err_code = -EPROTO;
  1064. goto cq_free_resources;
  1065. }
  1066. cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
  1067. if (!cqp_request) {
  1068. err_code = -ENOMEM;
  1069. goto cq_free_resources;
  1070. }
  1071. cqp_info = &cqp_request->info;
  1072. cqp_info->cqp_cmd = OP_CQ_CREATE;
  1073. cqp_info->post_sq = 1;
  1074. cqp_info->in.u.cq_create.cq = cq;
  1075. cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
  1076. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  1077. if (status) {
  1078. i40iw_pr_err("CQP-OP Create QP fail");
  1079. err_code = -EPROTO;
  1080. goto cq_free_resources;
  1081. }
  1082. if (context) {
  1083. struct i40iw_create_cq_resp resp;
  1084. memset(&resp, 0, sizeof(resp));
  1085. resp.cq_id = info.cq_uk_init_info.cq_id;
  1086. resp.cq_size = info.cq_uk_init_info.cq_size;
  1087. if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
  1088. i40iw_pr_err("copy to user data\n");
  1089. err_code = -EPROTO;
  1090. goto cq_destroy;
  1091. }
  1092. }
  1093. i40iw_add_devusecount(iwdev);
  1094. return (struct ib_cq *)iwcq;
  1095. cq_destroy:
  1096. i40iw_cq_wq_destroy(iwdev, cq);
  1097. cq_free_resources:
  1098. cq_free_resources(iwdev, iwcq);
  1099. error:
  1100. kfree(iwcq);
  1101. return ERR_PTR(err_code);
  1102. }
  1103. /**
  1104. * i40iw_get_user_access - get hw access from IB access
  1105. * @acc: IB access to return hw access
  1106. */
  1107. static inline u16 i40iw_get_user_access(int acc)
  1108. {
  1109. u16 access = 0;
  1110. access |= (acc & IB_ACCESS_LOCAL_WRITE) ? I40IW_ACCESS_FLAGS_LOCALWRITE : 0;
  1111. access |= (acc & IB_ACCESS_REMOTE_WRITE) ? I40IW_ACCESS_FLAGS_REMOTEWRITE : 0;
  1112. access |= (acc & IB_ACCESS_REMOTE_READ) ? I40IW_ACCESS_FLAGS_REMOTEREAD : 0;
  1113. access |= (acc & IB_ACCESS_MW_BIND) ? I40IW_ACCESS_FLAGS_BIND_WINDOW : 0;
  1114. return access;
  1115. }
  1116. /**
  1117. * i40iw_free_stag - free stag resource
  1118. * @iwdev: iwarp device
  1119. * @stag: stag to free
  1120. */
  1121. static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag)
  1122. {
  1123. u32 stag_idx;
  1124. stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT;
  1125. i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx);
  1126. i40iw_rem_devusecount(iwdev);
  1127. }
  1128. /**
  1129. * i40iw_create_stag - create random stag
  1130. * @iwdev: iwarp device
  1131. */
  1132. static u32 i40iw_create_stag(struct i40iw_device *iwdev)
  1133. {
  1134. u32 stag = 0;
  1135. u32 stag_index = 0;
  1136. u32 next_stag_index;
  1137. u32 driver_key;
  1138. u32 random;
  1139. u8 consumer_key;
  1140. int ret;
  1141. get_random_bytes(&random, sizeof(random));
  1142. consumer_key = (u8)random;
  1143. driver_key = random & ~iwdev->mr_stagmask;
  1144. next_stag_index = (random & iwdev->mr_stagmask) >> 8;
  1145. next_stag_index %= iwdev->max_mr;
  1146. ret = i40iw_alloc_resource(iwdev,
  1147. iwdev->allocated_mrs, iwdev->max_mr,
  1148. &stag_index, &next_stag_index);
  1149. if (!ret) {
  1150. stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT;
  1151. stag |= driver_key;
  1152. stag += (u32)consumer_key;
  1153. i40iw_add_devusecount(iwdev);
  1154. }
  1155. return stag;
  1156. }
  1157. /**
  1158. * i40iw_next_pbl_addr - Get next pbl address
  1159. * @pbl: pointer to a pble
  1160. * @pinfo: info pointer
  1161. * @idx: index
  1162. */
  1163. static inline u64 *i40iw_next_pbl_addr(u64 *pbl,
  1164. struct i40iw_pble_info **pinfo,
  1165. u32 *idx)
  1166. {
  1167. *idx += 1;
  1168. if ((!(*pinfo)) || (*idx != (*pinfo)->cnt))
  1169. return ++pbl;
  1170. *idx = 0;
  1171. (*pinfo)++;
  1172. return (u64 *)(*pinfo)->addr;
  1173. }
  1174. /**
  1175. * i40iw_copy_user_pgaddrs - copy user page address to pble's os locally
  1176. * @iwmr: iwmr for IB's user page addresses
  1177. * @pbl: ple pointer to save 1 level or 0 level pble
  1178. * @level: indicated level 0, 1 or 2
  1179. */
  1180. static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
  1181. u64 *pbl,
  1182. enum i40iw_pble_level level)
  1183. {
  1184. struct ib_umem *region = iwmr->region;
  1185. struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
  1186. int chunk_pages, entry, i;
  1187. struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
  1188. struct i40iw_pble_info *pinfo;
  1189. struct scatterlist *sg;
  1190. u64 pg_addr = 0;
  1191. u32 idx = 0;
  1192. pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
  1193. for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
  1194. chunk_pages = sg_dma_len(sg) >> region->page_shift;
  1195. if ((iwmr->type == IW_MEMREG_TYPE_QP) &&
  1196. !iwpbl->qp_mr.sq_page)
  1197. iwpbl->qp_mr.sq_page = sg_page(sg);
  1198. for (i = 0; i < chunk_pages; i++) {
  1199. pg_addr = sg_dma_address(sg) +
  1200. (i << region->page_shift);
  1201. if ((entry + i) == 0)
  1202. *pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
  1203. else if (!(pg_addr & ~iwmr->page_msk))
  1204. *pbl = cpu_to_le64(pg_addr);
  1205. else
  1206. continue;
  1207. pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
  1208. }
  1209. }
  1210. }
  1211. /**
  1212. * i40iw_set_hugetlb_params - set MR pg size and mask to huge pg values.
  1213. * @addr: virtual address
  1214. * @iwmr: mr pointer for this memory registration
  1215. */
  1216. static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr)
  1217. {
  1218. struct vm_area_struct *vma;
  1219. struct hstate *h;
  1220. vma = find_vma(current->mm, addr);
  1221. if (vma && is_vm_hugetlb_page(vma)) {
  1222. h = hstate_vma(vma);
  1223. if (huge_page_size(h) == 0x200000) {
  1224. iwmr->page_size = huge_page_size(h);
  1225. iwmr->page_msk = huge_page_mask(h);
  1226. }
  1227. }
  1228. }
  1229. /**
  1230. * i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous
  1231. * @arr: lvl1 pbl array
  1232. * @npages: page count
  1233. * pg_size: page size
  1234. *
  1235. */
  1236. static bool i40iw_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
  1237. {
  1238. u32 pg_idx;
  1239. for (pg_idx = 0; pg_idx < npages; pg_idx++) {
  1240. if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
  1241. return false;
  1242. }
  1243. return true;
  1244. }
  1245. /**
  1246. * i40iw_check_mr_contiguous - check if MR is physically contiguous
  1247. * @palloc: pbl allocation struct
  1248. * pg_size: page size
  1249. */
  1250. static bool i40iw_check_mr_contiguous(struct i40iw_pble_alloc *palloc, u32 pg_size)
  1251. {
  1252. struct i40iw_pble_level2 *lvl2 = &palloc->level2;
  1253. struct i40iw_pble_info *leaf = lvl2->leaf;
  1254. u64 *arr = NULL;
  1255. u64 *start_addr = NULL;
  1256. int i;
  1257. bool ret;
  1258. if (palloc->level == I40IW_LEVEL_1) {
  1259. arr = (u64 *)palloc->level1.addr;
  1260. ret = i40iw_check_mem_contiguous(arr, palloc->total_cnt, pg_size);
  1261. return ret;
  1262. }
  1263. start_addr = (u64 *)leaf->addr;
  1264. for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
  1265. arr = (u64 *)leaf->addr;
  1266. if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
  1267. return false;
  1268. ret = i40iw_check_mem_contiguous(arr, leaf->cnt, pg_size);
  1269. if (!ret)
  1270. return false;
  1271. }
  1272. return true;
  1273. }
  1274. /**
  1275. * i40iw_setup_pbles - copy user pg address to pble's
  1276. * @iwdev: iwarp device
  1277. * @iwmr: mr pointer for this memory registration
  1278. * @use_pbles: flag if to use pble's
  1279. */
  1280. static int i40iw_setup_pbles(struct i40iw_device *iwdev,
  1281. struct i40iw_mr *iwmr,
  1282. bool use_pbles)
  1283. {
  1284. struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
  1285. struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
  1286. struct i40iw_pble_info *pinfo;
  1287. u64 *pbl;
  1288. enum i40iw_status_code status;
  1289. enum i40iw_pble_level level = I40IW_LEVEL_1;
  1290. if (use_pbles) {
  1291. mutex_lock(&iwdev->pbl_mutex);
  1292. status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
  1293. mutex_unlock(&iwdev->pbl_mutex);
  1294. if (status)
  1295. return -ENOMEM;
  1296. iwpbl->pbl_allocated = true;
  1297. level = palloc->level;
  1298. pinfo = (level == I40IW_LEVEL_1) ? &palloc->level1 : palloc->level2.leaf;
  1299. pbl = (u64 *)pinfo->addr;
  1300. } else {
  1301. pbl = iwmr->pgaddrmem;
  1302. }
  1303. i40iw_copy_user_pgaddrs(iwmr, pbl, level);
  1304. if (use_pbles)
  1305. iwmr->pgaddrmem[0] = *pbl;
  1306. return 0;
  1307. }
  1308. /**
  1309. * i40iw_handle_q_mem - handle memory for qp and cq
  1310. * @iwdev: iwarp device
  1311. * @req: information for q memory management
  1312. * @iwpbl: pble struct
  1313. * @use_pbles: flag to use pble
  1314. */
  1315. static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
  1316. struct i40iw_mem_reg_req *req,
  1317. struct i40iw_pbl *iwpbl,
  1318. bool use_pbles)
  1319. {
  1320. struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
  1321. struct i40iw_mr *iwmr = iwpbl->iwmr;
  1322. struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
  1323. struct i40iw_cq_mr *cqmr = &iwpbl->cq_mr;
  1324. struct i40iw_hmc_pble *hmc_p;
  1325. u64 *arr = iwmr->pgaddrmem;
  1326. u32 pg_size;
  1327. int err;
  1328. int total;
  1329. bool ret = true;
  1330. total = req->sq_pages + req->rq_pages + req->cq_pages;
  1331. pg_size = iwmr->page_size;
  1332. err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
  1333. if (err)
  1334. return err;
  1335. if (use_pbles && (palloc->level != I40IW_LEVEL_1)) {
  1336. i40iw_free_pble(iwdev->pble_rsrc, palloc);
  1337. iwpbl->pbl_allocated = false;
  1338. return -ENOMEM;
  1339. }
  1340. if (use_pbles)
  1341. arr = (u64 *)palloc->level1.addr;
  1342. if (iwmr->type == IW_MEMREG_TYPE_QP) {
  1343. hmc_p = &qpmr->sq_pbl;
  1344. qpmr->shadow = (dma_addr_t)arr[total];
  1345. if (use_pbles) {
  1346. ret = i40iw_check_mem_contiguous(arr, req->sq_pages, pg_size);
  1347. if (ret)
  1348. ret = i40iw_check_mem_contiguous(&arr[req->sq_pages], req->rq_pages, pg_size);
  1349. }
  1350. if (!ret) {
  1351. hmc_p->idx = palloc->level1.idx;
  1352. hmc_p = &qpmr->rq_pbl;
  1353. hmc_p->idx = palloc->level1.idx + req->sq_pages;
  1354. } else {
  1355. hmc_p->addr = arr[0];
  1356. hmc_p = &qpmr->rq_pbl;
  1357. hmc_p->addr = arr[req->sq_pages];
  1358. }
  1359. } else { /* CQ */
  1360. hmc_p = &cqmr->cq_pbl;
  1361. cqmr->shadow = (dma_addr_t)arr[total];
  1362. if (use_pbles)
  1363. ret = i40iw_check_mem_contiguous(arr, req->cq_pages, pg_size);
  1364. if (!ret)
  1365. hmc_p->idx = palloc->level1.idx;
  1366. else
  1367. hmc_p->addr = arr[0];
  1368. }
  1369. if (use_pbles && ret) {
  1370. i40iw_free_pble(iwdev->pble_rsrc, palloc);
  1371. iwpbl->pbl_allocated = false;
  1372. }
  1373. return err;
  1374. }
  1375. /**
  1376. * i40iw_hw_alloc_stag - cqp command to allocate stag
  1377. * @iwdev: iwarp device
  1378. * @iwmr: iwarp mr pointer
  1379. */
  1380. static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr)
  1381. {
  1382. struct i40iw_allocate_stag_info *info;
  1383. struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
  1384. enum i40iw_status_code status;
  1385. int err = 0;
  1386. struct i40iw_cqp_request *cqp_request;
  1387. struct cqp_commands_info *cqp_info;
  1388. cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
  1389. if (!cqp_request)
  1390. return -ENOMEM;
  1391. cqp_info = &cqp_request->info;
  1392. info = &cqp_info->in.u.alloc_stag.info;
  1393. memset(info, 0, sizeof(*info));
  1394. info->page_size = PAGE_SIZE;
  1395. info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
  1396. info->pd_id = iwpd->sc_pd.pd_id;
  1397. info->total_len = iwmr->length;
  1398. info->remote_access = true;
  1399. cqp_info->cqp_cmd = OP_ALLOC_STAG;
  1400. cqp_info->post_sq = 1;
  1401. cqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev;
  1402. cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
  1403. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  1404. if (status) {
  1405. err = -ENOMEM;
  1406. i40iw_pr_err("CQP-OP MR Reg fail");
  1407. }
  1408. return err;
  1409. }
  1410. /**
  1411. * i40iw_alloc_mr - register stag for fast memory registration
  1412. * @pd: ibpd pointer
  1413. * @mr_type: memory for stag registrion
  1414. * @max_num_sg: man number of pages
  1415. */
  1416. static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd,
  1417. enum ib_mr_type mr_type,
  1418. u32 max_num_sg)
  1419. {
  1420. struct i40iw_pd *iwpd = to_iwpd(pd);
  1421. struct i40iw_device *iwdev = to_iwdev(pd->device);
  1422. struct i40iw_pble_alloc *palloc;
  1423. struct i40iw_pbl *iwpbl;
  1424. struct i40iw_mr *iwmr;
  1425. enum i40iw_status_code status;
  1426. u32 stag;
  1427. int err_code = -ENOMEM;
  1428. iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
  1429. if (!iwmr)
  1430. return ERR_PTR(-ENOMEM);
  1431. stag = i40iw_create_stag(iwdev);
  1432. if (!stag) {
  1433. err_code = -EOVERFLOW;
  1434. goto err;
  1435. }
  1436. iwmr->stag = stag;
  1437. iwmr->ibmr.rkey = stag;
  1438. iwmr->ibmr.lkey = stag;
  1439. iwmr->ibmr.pd = pd;
  1440. iwmr->ibmr.device = pd->device;
  1441. iwpbl = &iwmr->iwpbl;
  1442. iwpbl->iwmr = iwmr;
  1443. iwmr->type = IW_MEMREG_TYPE_MEM;
  1444. palloc = &iwpbl->pble_alloc;
  1445. iwmr->page_cnt = max_num_sg;
  1446. mutex_lock(&iwdev->pbl_mutex);
  1447. status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
  1448. mutex_unlock(&iwdev->pbl_mutex);
  1449. if (status)
  1450. goto err1;
  1451. if (palloc->level != I40IW_LEVEL_1)
  1452. goto err2;
  1453. err_code = i40iw_hw_alloc_stag(iwdev, iwmr);
  1454. if (err_code)
  1455. goto err2;
  1456. iwpbl->pbl_allocated = true;
  1457. i40iw_add_pdusecount(iwpd);
  1458. return &iwmr->ibmr;
  1459. err2:
  1460. i40iw_free_pble(iwdev->pble_rsrc, palloc);
  1461. err1:
  1462. i40iw_free_stag(iwdev, stag);
  1463. err:
  1464. kfree(iwmr);
  1465. return ERR_PTR(err_code);
  1466. }
  1467. /**
  1468. * i40iw_set_page - populate pbl list for fmr
  1469. * @ibmr: ib mem to access iwarp mr pointer
  1470. * @addr: page dma address fro pbl list
  1471. */
  1472. static int i40iw_set_page(struct ib_mr *ibmr, u64 addr)
  1473. {
  1474. struct i40iw_mr *iwmr = to_iwmr(ibmr);
  1475. struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
  1476. struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
  1477. u64 *pbl;
  1478. if (unlikely(iwmr->npages == iwmr->page_cnt))
  1479. return -ENOMEM;
  1480. pbl = (u64 *)palloc->level1.addr;
  1481. pbl[iwmr->npages++] = cpu_to_le64(addr);
  1482. return 0;
  1483. }
  1484. /**
  1485. * i40iw_map_mr_sg - map of sg list for fmr
  1486. * @ibmr: ib mem to access iwarp mr pointer
  1487. * @sg: scatter gather list for fmr
  1488. * @sg_nents: number of sg pages
  1489. */
  1490. static int i40iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
  1491. int sg_nents, unsigned int *sg_offset)
  1492. {
  1493. struct i40iw_mr *iwmr = to_iwmr(ibmr);
  1494. iwmr->npages = 0;
  1495. return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, i40iw_set_page);
  1496. }
  1497. /**
  1498. * i40iw_drain_sq - drain the send queue
  1499. * @ibqp: ib qp pointer
  1500. */
  1501. static void i40iw_drain_sq(struct ib_qp *ibqp)
  1502. {
  1503. struct i40iw_qp *iwqp = to_iwqp(ibqp);
  1504. struct i40iw_sc_qp *qp = &iwqp->sc_qp;
  1505. if (I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
  1506. wait_for_completion(&iwqp->sq_drained);
  1507. }
  1508. /**
  1509. * i40iw_drain_rq - drain the receive queue
  1510. * @ibqp: ib qp pointer
  1511. */
  1512. static void i40iw_drain_rq(struct ib_qp *ibqp)
  1513. {
  1514. struct i40iw_qp *iwqp = to_iwqp(ibqp);
  1515. struct i40iw_sc_qp *qp = &iwqp->sc_qp;
  1516. if (I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
  1517. wait_for_completion(&iwqp->rq_drained);
  1518. }
  1519. /**
  1520. * i40iw_hwreg_mr - send cqp command for memory registration
  1521. * @iwdev: iwarp device
  1522. * @iwmr: iwarp mr pointer
  1523. * @access: access for MR
  1524. */
  1525. static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
  1526. struct i40iw_mr *iwmr,
  1527. u16 access)
  1528. {
  1529. struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
  1530. struct i40iw_reg_ns_stag_info *stag_info;
  1531. struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
  1532. struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
  1533. enum i40iw_status_code status;
  1534. int err = 0;
  1535. struct i40iw_cqp_request *cqp_request;
  1536. struct cqp_commands_info *cqp_info;
  1537. cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
  1538. if (!cqp_request)
  1539. return -ENOMEM;
  1540. cqp_info = &cqp_request->info;
  1541. stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
  1542. memset(stag_info, 0, sizeof(*stag_info));
  1543. stag_info->va = (void *)(unsigned long)iwpbl->user_base;
  1544. stag_info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
  1545. stag_info->stag_key = (u8)iwmr->stag;
  1546. stag_info->total_len = iwmr->length;
  1547. stag_info->access_rights = access;
  1548. stag_info->pd_id = iwpd->sc_pd.pd_id;
  1549. stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
  1550. stag_info->page_size = iwmr->page_size;
  1551. if (iwpbl->pbl_allocated) {
  1552. if (palloc->level == I40IW_LEVEL_1) {
  1553. stag_info->first_pm_pbl_index = palloc->level1.idx;
  1554. stag_info->chunk_size = 1;
  1555. } else {
  1556. stag_info->first_pm_pbl_index = palloc->level2.root.idx;
  1557. stag_info->chunk_size = 3;
  1558. }
  1559. } else {
  1560. stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
  1561. }
  1562. cqp_info->cqp_cmd = OP_MR_REG_NON_SHARED;
  1563. cqp_info->post_sq = 1;
  1564. cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->sc_dev;
  1565. cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
  1566. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  1567. if (status) {
  1568. err = -ENOMEM;
  1569. i40iw_pr_err("CQP-OP MR Reg fail");
  1570. }
  1571. return err;
  1572. }
  1573. /**
  1574. * i40iw_reg_user_mr - Register a user memory region
  1575. * @pd: ptr of pd
  1576. * @start: virtual start address
  1577. * @length: length of mr
  1578. * @virt: virtual address
  1579. * @acc: access of mr
  1580. * @udata: user data
  1581. */
  1582. static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
  1583. u64 start,
  1584. u64 length,
  1585. u64 virt,
  1586. int acc,
  1587. struct ib_udata *udata)
  1588. {
  1589. struct i40iw_pd *iwpd = to_iwpd(pd);
  1590. struct i40iw_device *iwdev = to_iwdev(pd->device);
  1591. struct i40iw_ucontext *ucontext;
  1592. struct i40iw_pble_alloc *palloc;
  1593. struct i40iw_pbl *iwpbl;
  1594. struct i40iw_mr *iwmr;
  1595. struct ib_umem *region;
  1596. struct i40iw_mem_reg_req req;
  1597. u64 pbl_depth = 0;
  1598. u32 stag = 0;
  1599. u16 access;
  1600. u64 region_length;
  1601. bool use_pbles = false;
  1602. unsigned long flags;
  1603. int err = -ENOSYS;
  1604. int ret;
  1605. int pg_shift;
  1606. if (iwdev->closing)
  1607. return ERR_PTR(-ENODEV);
  1608. if (length > I40IW_MAX_MR_SIZE)
  1609. return ERR_PTR(-EINVAL);
  1610. region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
  1611. if (IS_ERR(region))
  1612. return (struct ib_mr *)region;
  1613. if (ib_copy_from_udata(&req, udata, sizeof(req))) {
  1614. ib_umem_release(region);
  1615. return ERR_PTR(-EFAULT);
  1616. }
  1617. iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
  1618. if (!iwmr) {
  1619. ib_umem_release(region);
  1620. return ERR_PTR(-ENOMEM);
  1621. }
  1622. iwpbl = &iwmr->iwpbl;
  1623. iwpbl->iwmr = iwmr;
  1624. iwmr->region = region;
  1625. iwmr->ibmr.pd = pd;
  1626. iwmr->ibmr.device = pd->device;
  1627. ucontext = to_ucontext(pd->uobject->context);
  1628. iwmr->page_size = PAGE_SIZE;
  1629. iwmr->page_msk = PAGE_MASK;
  1630. if (region->hugetlb && (req.reg_type == IW_MEMREG_TYPE_MEM))
  1631. i40iw_set_hugetlb_values(start, iwmr);
  1632. region_length = region->length + (start & (iwmr->page_size - 1));
  1633. pg_shift = ffs(iwmr->page_size) - 1;
  1634. pbl_depth = region_length >> pg_shift;
  1635. pbl_depth += (region_length & (iwmr->page_size - 1)) ? 1 : 0;
  1636. iwmr->length = region->length;
  1637. iwpbl->user_base = virt;
  1638. palloc = &iwpbl->pble_alloc;
  1639. iwmr->type = req.reg_type;
  1640. iwmr->page_cnt = (u32)pbl_depth;
  1641. switch (req.reg_type) {
  1642. case IW_MEMREG_TYPE_QP:
  1643. use_pbles = ((req.sq_pages + req.rq_pages) > 2);
  1644. err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
  1645. if (err)
  1646. goto error;
  1647. spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
  1648. list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
  1649. spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
  1650. break;
  1651. case IW_MEMREG_TYPE_CQ:
  1652. use_pbles = (req.cq_pages > 1);
  1653. err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
  1654. if (err)
  1655. goto error;
  1656. spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
  1657. list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
  1658. spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
  1659. break;
  1660. case IW_MEMREG_TYPE_MEM:
  1661. use_pbles = (iwmr->page_cnt != 1);
  1662. access = I40IW_ACCESS_FLAGS_LOCALREAD;
  1663. err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
  1664. if (err)
  1665. goto error;
  1666. if (use_pbles) {
  1667. ret = i40iw_check_mr_contiguous(palloc, iwmr->page_size);
  1668. if (ret) {
  1669. i40iw_free_pble(iwdev->pble_rsrc, palloc);
  1670. iwpbl->pbl_allocated = false;
  1671. }
  1672. }
  1673. access |= i40iw_get_user_access(acc);
  1674. stag = i40iw_create_stag(iwdev);
  1675. if (!stag) {
  1676. err = -ENOMEM;
  1677. goto error;
  1678. }
  1679. iwmr->stag = stag;
  1680. iwmr->ibmr.rkey = stag;
  1681. iwmr->ibmr.lkey = stag;
  1682. err = i40iw_hwreg_mr(iwdev, iwmr, access);
  1683. if (err) {
  1684. i40iw_free_stag(iwdev, stag);
  1685. goto error;
  1686. }
  1687. break;
  1688. default:
  1689. goto error;
  1690. }
  1691. iwmr->type = req.reg_type;
  1692. if (req.reg_type == IW_MEMREG_TYPE_MEM)
  1693. i40iw_add_pdusecount(iwpd);
  1694. return &iwmr->ibmr;
  1695. error:
  1696. if (palloc->level != I40IW_LEVEL_0 && iwpbl->pbl_allocated)
  1697. i40iw_free_pble(iwdev->pble_rsrc, palloc);
  1698. ib_umem_release(region);
  1699. kfree(iwmr);
  1700. return ERR_PTR(err);
  1701. }
  1702. /**
  1703. * i40iw_reg_phys_mr - register kernel physical memory
  1704. * @pd: ibpd pointer
  1705. * @addr: physical address of memory to register
  1706. * @size: size of memory to register
  1707. * @acc: Access rights
  1708. * @iova_start: start of virtual address for physical buffers
  1709. */
  1710. struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *pd,
  1711. u64 addr,
  1712. u64 size,
  1713. int acc,
  1714. u64 *iova_start)
  1715. {
  1716. struct i40iw_pd *iwpd = to_iwpd(pd);
  1717. struct i40iw_device *iwdev = to_iwdev(pd->device);
  1718. struct i40iw_pbl *iwpbl;
  1719. struct i40iw_mr *iwmr;
  1720. enum i40iw_status_code status;
  1721. u32 stag;
  1722. u16 access = I40IW_ACCESS_FLAGS_LOCALREAD;
  1723. int ret;
  1724. iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
  1725. if (!iwmr)
  1726. return ERR_PTR(-ENOMEM);
  1727. iwmr->ibmr.pd = pd;
  1728. iwmr->ibmr.device = pd->device;
  1729. iwpbl = &iwmr->iwpbl;
  1730. iwpbl->iwmr = iwmr;
  1731. iwmr->type = IW_MEMREG_TYPE_MEM;
  1732. iwpbl->user_base = *iova_start;
  1733. stag = i40iw_create_stag(iwdev);
  1734. if (!stag) {
  1735. ret = -EOVERFLOW;
  1736. goto err;
  1737. }
  1738. access |= i40iw_get_user_access(acc);
  1739. iwmr->stag = stag;
  1740. iwmr->ibmr.rkey = stag;
  1741. iwmr->ibmr.lkey = stag;
  1742. iwmr->page_cnt = 1;
  1743. iwmr->pgaddrmem[0] = addr;
  1744. iwmr->length = size;
  1745. status = i40iw_hwreg_mr(iwdev, iwmr, access);
  1746. if (status) {
  1747. i40iw_free_stag(iwdev, stag);
  1748. ret = -ENOMEM;
  1749. goto err;
  1750. }
  1751. i40iw_add_pdusecount(iwpd);
  1752. return &iwmr->ibmr;
  1753. err:
  1754. kfree(iwmr);
  1755. return ERR_PTR(ret);
  1756. }
  1757. /**
  1758. * i40iw_get_dma_mr - register physical mem
  1759. * @pd: ptr of pd
  1760. * @acc: access for memory
  1761. */
  1762. static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc)
  1763. {
  1764. u64 kva = 0;
  1765. return i40iw_reg_phys_mr(pd, 0, 0, acc, &kva);
  1766. }
  1767. /**
  1768. * i40iw_del_mem_list - Deleting pbl list entries for CQ/QP
  1769. * @iwmr: iwmr for IB's user page addresses
  1770. * @ucontext: ptr to user context
  1771. */
  1772. static void i40iw_del_memlist(struct i40iw_mr *iwmr,
  1773. struct i40iw_ucontext *ucontext)
  1774. {
  1775. struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
  1776. unsigned long flags;
  1777. switch (iwmr->type) {
  1778. case IW_MEMREG_TYPE_CQ:
  1779. spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
  1780. if (!list_empty(&ucontext->cq_reg_mem_list))
  1781. list_del(&iwpbl->list);
  1782. spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
  1783. break;
  1784. case IW_MEMREG_TYPE_QP:
  1785. spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
  1786. if (!list_empty(&ucontext->qp_reg_mem_list))
  1787. list_del(&iwpbl->list);
  1788. spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
  1789. break;
  1790. default:
  1791. break;
  1792. }
  1793. }
  1794. /**
  1795. * i40iw_dereg_mr - deregister mr
  1796. * @ib_mr: mr ptr for dereg
  1797. */
  1798. static int i40iw_dereg_mr(struct ib_mr *ib_mr)
  1799. {
  1800. struct ib_pd *ibpd = ib_mr->pd;
  1801. struct i40iw_pd *iwpd = to_iwpd(ibpd);
  1802. struct i40iw_mr *iwmr = to_iwmr(ib_mr);
  1803. struct i40iw_device *iwdev = to_iwdev(ib_mr->device);
  1804. enum i40iw_status_code status;
  1805. struct i40iw_dealloc_stag_info *info;
  1806. struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
  1807. struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
  1808. struct i40iw_cqp_request *cqp_request;
  1809. struct cqp_commands_info *cqp_info;
  1810. u32 stag_idx;
  1811. if (iwmr->region)
  1812. ib_umem_release(iwmr->region);
  1813. if (iwmr->type != IW_MEMREG_TYPE_MEM) {
  1814. if (ibpd->uobject) {
  1815. struct i40iw_ucontext *ucontext;
  1816. ucontext = to_ucontext(ibpd->uobject->context);
  1817. i40iw_del_memlist(iwmr, ucontext);
  1818. }
  1819. if (iwpbl->pbl_allocated)
  1820. i40iw_free_pble(iwdev->pble_rsrc, palloc);
  1821. kfree(iwmr);
  1822. return 0;
  1823. }
  1824. cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
  1825. if (!cqp_request)
  1826. return -ENOMEM;
  1827. cqp_info = &cqp_request->info;
  1828. info = &cqp_info->in.u.dealloc_stag.info;
  1829. memset(info, 0, sizeof(*info));
  1830. info->pd_id = cpu_to_le32(iwpd->sc_pd.pd_id & 0x00007fff);
  1831. info->stag_idx = RS_64_1(ib_mr->rkey, I40IW_CQPSQ_STAG_IDX_SHIFT);
  1832. stag_idx = info->stag_idx;
  1833. info->mr = true;
  1834. if (iwpbl->pbl_allocated)
  1835. info->dealloc_pbl = true;
  1836. cqp_info->cqp_cmd = OP_DEALLOC_STAG;
  1837. cqp_info->post_sq = 1;
  1838. cqp_info->in.u.dealloc_stag.dev = &iwdev->sc_dev;
  1839. cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
  1840. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  1841. if (status)
  1842. i40iw_pr_err("CQP-OP dealloc failed for stag_idx = 0x%x\n", stag_idx);
  1843. i40iw_rem_pdusecount(iwpd, iwdev);
  1844. i40iw_free_stag(iwdev, iwmr->stag);
  1845. if (iwpbl->pbl_allocated)
  1846. i40iw_free_pble(iwdev->pble_rsrc, palloc);
  1847. kfree(iwmr);
  1848. return 0;
  1849. }
  1850. /**
  1851. * i40iw_show_rev
  1852. */
  1853. static ssize_t i40iw_show_rev(struct device *dev,
  1854. struct device_attribute *attr, char *buf)
  1855. {
  1856. struct i40iw_ib_device *iwibdev = container_of(dev,
  1857. struct i40iw_ib_device,
  1858. ibdev.dev);
  1859. u32 hw_rev = iwibdev->iwdev->sc_dev.hw_rev;
  1860. return sprintf(buf, "%x\n", hw_rev);
  1861. }
  1862. /**
  1863. * i40iw_show_hca
  1864. */
  1865. static ssize_t i40iw_show_hca(struct device *dev,
  1866. struct device_attribute *attr, char *buf)
  1867. {
  1868. return sprintf(buf, "I40IW\n");
  1869. }
  1870. /**
  1871. * i40iw_show_board
  1872. */
  1873. static ssize_t i40iw_show_board(struct device *dev,
  1874. struct device_attribute *attr,
  1875. char *buf)
  1876. {
  1877. return sprintf(buf, "%.*s\n", 32, "I40IW Board ID");
  1878. }
  1879. static DEVICE_ATTR(hw_rev, S_IRUGO, i40iw_show_rev, NULL);
  1880. static DEVICE_ATTR(hca_type, S_IRUGO, i40iw_show_hca, NULL);
  1881. static DEVICE_ATTR(board_id, S_IRUGO, i40iw_show_board, NULL);
  1882. static struct device_attribute *i40iw_dev_attributes[] = {
  1883. &dev_attr_hw_rev,
  1884. &dev_attr_hca_type,
  1885. &dev_attr_board_id
  1886. };
  1887. /**
  1888. * i40iw_copy_sg_list - copy sg list for qp
  1889. * @sg_list: copied into sg_list
  1890. * @sgl: copy from sgl
  1891. * @num_sges: count of sg entries
  1892. */
  1893. static void i40iw_copy_sg_list(struct i40iw_sge *sg_list, struct ib_sge *sgl, int num_sges)
  1894. {
  1895. unsigned int i;
  1896. for (i = 0; (i < num_sges) && (i < I40IW_MAX_WQ_FRAGMENT_COUNT); i++) {
  1897. sg_list[i].tag_off = sgl[i].addr;
  1898. sg_list[i].len = sgl[i].length;
  1899. sg_list[i].stag = sgl[i].lkey;
  1900. }
  1901. }
  1902. /**
  1903. * i40iw_post_send - kernel application wr
  1904. * @ibqp: qp ptr for wr
  1905. * @ib_wr: work request ptr
  1906. * @bad_wr: return of bad wr if err
  1907. */
  1908. static int i40iw_post_send(struct ib_qp *ibqp,
  1909. struct ib_send_wr *ib_wr,
  1910. struct ib_send_wr **bad_wr)
  1911. {
  1912. struct i40iw_qp *iwqp;
  1913. struct i40iw_qp_uk *ukqp;
  1914. struct i40iw_post_sq_info info;
  1915. enum i40iw_status_code ret;
  1916. int err = 0;
  1917. unsigned long flags;
  1918. bool inv_stag;
  1919. iwqp = (struct i40iw_qp *)ibqp;
  1920. ukqp = &iwqp->sc_qp.qp_uk;
  1921. spin_lock_irqsave(&iwqp->lock, flags);
  1922. while (ib_wr) {
  1923. inv_stag = false;
  1924. memset(&info, 0, sizeof(info));
  1925. info.wr_id = (u64)(ib_wr->wr_id);
  1926. if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
  1927. info.signaled = true;
  1928. if (ib_wr->send_flags & IB_SEND_FENCE)
  1929. info.read_fence = true;
  1930. switch (ib_wr->opcode) {
  1931. case IB_WR_SEND:
  1932. /* fall-through */
  1933. case IB_WR_SEND_WITH_INV:
  1934. if (ib_wr->opcode == IB_WR_SEND) {
  1935. if (ib_wr->send_flags & IB_SEND_SOLICITED)
  1936. info.op_type = I40IW_OP_TYPE_SEND_SOL;
  1937. else
  1938. info.op_type = I40IW_OP_TYPE_SEND;
  1939. } else {
  1940. if (ib_wr->send_flags & IB_SEND_SOLICITED)
  1941. info.op_type = I40IW_OP_TYPE_SEND_SOL_INV;
  1942. else
  1943. info.op_type = I40IW_OP_TYPE_SEND_INV;
  1944. }
  1945. if (ib_wr->send_flags & IB_SEND_INLINE) {
  1946. info.op.inline_send.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
  1947. info.op.inline_send.len = ib_wr->sg_list[0].length;
  1948. ret = ukqp->ops.iw_inline_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
  1949. } else {
  1950. info.op.send.num_sges = ib_wr->num_sge;
  1951. info.op.send.sg_list = (struct i40iw_sge *)ib_wr->sg_list;
  1952. ret = ukqp->ops.iw_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
  1953. }
  1954. if (ret) {
  1955. if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
  1956. err = -ENOMEM;
  1957. else
  1958. err = -EINVAL;
  1959. }
  1960. break;
  1961. case IB_WR_RDMA_WRITE:
  1962. info.op_type = I40IW_OP_TYPE_RDMA_WRITE;
  1963. if (ib_wr->send_flags & IB_SEND_INLINE) {
  1964. info.op.inline_rdma_write.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
  1965. info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
  1966. info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
  1967. info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
  1968. info.op.inline_rdma_write.rem_addr.len = ib_wr->sg_list->length;
  1969. ret = ukqp->ops.iw_inline_rdma_write(ukqp, &info, false);
  1970. } else {
  1971. info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
  1972. info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
  1973. info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
  1974. info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
  1975. info.op.rdma_write.rem_addr.len = ib_wr->sg_list->length;
  1976. ret = ukqp->ops.iw_rdma_write(ukqp, &info, false);
  1977. }
  1978. if (ret) {
  1979. if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
  1980. err = -ENOMEM;
  1981. else
  1982. err = -EINVAL;
  1983. }
  1984. break;
  1985. case IB_WR_RDMA_READ_WITH_INV:
  1986. inv_stag = true;
  1987. /* fall-through*/
  1988. case IB_WR_RDMA_READ:
  1989. if (ib_wr->num_sge > I40IW_MAX_SGE_RD) {
  1990. err = -EINVAL;
  1991. break;
  1992. }
  1993. info.op_type = I40IW_OP_TYPE_RDMA_READ;
  1994. info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
  1995. info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
  1996. info.op.rdma_read.rem_addr.len = ib_wr->sg_list->length;
  1997. info.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr;
  1998. info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
  1999. info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
  2000. ret = ukqp->ops.iw_rdma_read(ukqp, &info, inv_stag, false);
  2001. if (ret) {
  2002. if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
  2003. err = -ENOMEM;
  2004. else
  2005. err = -EINVAL;
  2006. }
  2007. break;
  2008. case IB_WR_LOCAL_INV:
  2009. info.op_type = I40IW_OP_TYPE_INV_STAG;
  2010. info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
  2011. ret = ukqp->ops.iw_stag_local_invalidate(ukqp, &info, true);
  2012. if (ret)
  2013. err = -ENOMEM;
  2014. break;
  2015. case IB_WR_REG_MR:
  2016. {
  2017. struct i40iw_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
  2018. int flags = reg_wr(ib_wr)->access;
  2019. struct i40iw_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
  2020. struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
  2021. struct i40iw_fast_reg_stag_info info;
  2022. memset(&info, 0, sizeof(info));
  2023. info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD;
  2024. info.access_rights |= i40iw_get_user_access(flags);
  2025. info.stag_key = reg_wr(ib_wr)->key & 0xff;
  2026. info.stag_idx = reg_wr(ib_wr)->key >> 8;
  2027. info.page_size = reg_wr(ib_wr)->mr->page_size;
  2028. info.wr_id = ib_wr->wr_id;
  2029. info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
  2030. info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
  2031. info.total_len = iwmr->ibmr.length;
  2032. info.reg_addr_pa = *(u64 *)palloc->level1.addr;
  2033. info.first_pm_pbl_index = palloc->level1.idx;
  2034. info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
  2035. info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;
  2036. if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
  2037. info.chunk_size = 1;
  2038. ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);
  2039. if (ret)
  2040. err = -ENOMEM;
  2041. break;
  2042. }
  2043. default:
  2044. err = -EINVAL;
  2045. i40iw_pr_err(" upost_send bad opcode = 0x%x\n",
  2046. ib_wr->opcode);
  2047. break;
  2048. }
  2049. if (err)
  2050. break;
  2051. ib_wr = ib_wr->next;
  2052. }
  2053. if (err)
  2054. *bad_wr = ib_wr;
  2055. else
  2056. ukqp->ops.iw_qp_post_wr(ukqp);
  2057. spin_unlock_irqrestore(&iwqp->lock, flags);
  2058. return err;
  2059. }
  2060. /**
  2061. * i40iw_post_recv - post receive wr for kernel application
  2062. * @ibqp: ib qp pointer
  2063. * @ib_wr: work request for receive
  2064. * @bad_wr: bad wr caused an error
  2065. */
  2066. static int i40iw_post_recv(struct ib_qp *ibqp,
  2067. struct ib_recv_wr *ib_wr,
  2068. struct ib_recv_wr **bad_wr)
  2069. {
  2070. struct i40iw_qp *iwqp;
  2071. struct i40iw_qp_uk *ukqp;
  2072. struct i40iw_post_rq_info post_recv;
  2073. struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT];
  2074. enum i40iw_status_code ret = 0;
  2075. unsigned long flags;
  2076. int err = 0;
  2077. iwqp = (struct i40iw_qp *)ibqp;
  2078. ukqp = &iwqp->sc_qp.qp_uk;
  2079. memset(&post_recv, 0, sizeof(post_recv));
  2080. spin_lock_irqsave(&iwqp->lock, flags);
  2081. while (ib_wr) {
  2082. post_recv.num_sges = ib_wr->num_sge;
  2083. post_recv.wr_id = ib_wr->wr_id;
  2084. i40iw_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
  2085. post_recv.sg_list = sg_list;
  2086. ret = ukqp->ops.iw_post_receive(ukqp, &post_recv);
  2087. if (ret) {
  2088. i40iw_pr_err(" post_recv err %d\n", ret);
  2089. if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
  2090. err = -ENOMEM;
  2091. else
  2092. err = -EINVAL;
  2093. *bad_wr = ib_wr;
  2094. goto out;
  2095. }
  2096. ib_wr = ib_wr->next;
  2097. }
  2098. out:
  2099. spin_unlock_irqrestore(&iwqp->lock, flags);
  2100. return err;
  2101. }
  2102. /**
  2103. * i40iw_poll_cq - poll cq for completion (kernel apps)
  2104. * @ibcq: cq to poll
  2105. * @num_entries: number of entries to poll
  2106. * @entry: wr of entry completed
  2107. */
  2108. static int i40iw_poll_cq(struct ib_cq *ibcq,
  2109. int num_entries,
  2110. struct ib_wc *entry)
  2111. {
  2112. struct i40iw_cq *iwcq;
  2113. int cqe_count = 0;
  2114. struct i40iw_cq_poll_info cq_poll_info;
  2115. enum i40iw_status_code ret;
  2116. struct i40iw_cq_uk *ukcq;
  2117. struct i40iw_sc_qp *qp;
  2118. struct i40iw_qp *iwqp;
  2119. unsigned long flags;
  2120. iwcq = (struct i40iw_cq *)ibcq;
  2121. ukcq = &iwcq->sc_cq.cq_uk;
  2122. spin_lock_irqsave(&iwcq->lock, flags);
  2123. while (cqe_count < num_entries) {
  2124. ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info);
  2125. if (ret == I40IW_ERR_QUEUE_EMPTY) {
  2126. break;
  2127. } else if (ret == I40IW_ERR_QUEUE_DESTROYED) {
  2128. continue;
  2129. } else if (ret) {
  2130. if (!cqe_count)
  2131. cqe_count = -1;
  2132. break;
  2133. }
  2134. entry->wc_flags = 0;
  2135. entry->wr_id = cq_poll_info.wr_id;
  2136. if (cq_poll_info.error) {
  2137. entry->status = IB_WC_WR_FLUSH_ERR;
  2138. entry->vendor_err = cq_poll_info.major_err << 16 | cq_poll_info.minor_err;
  2139. } else {
  2140. entry->status = IB_WC_SUCCESS;
  2141. }
  2142. switch (cq_poll_info.op_type) {
  2143. case I40IW_OP_TYPE_RDMA_WRITE:
  2144. entry->opcode = IB_WC_RDMA_WRITE;
  2145. break;
  2146. case I40IW_OP_TYPE_RDMA_READ_INV_STAG:
  2147. case I40IW_OP_TYPE_RDMA_READ:
  2148. entry->opcode = IB_WC_RDMA_READ;
  2149. break;
  2150. case I40IW_OP_TYPE_SEND_SOL:
  2151. case I40IW_OP_TYPE_SEND_SOL_INV:
  2152. case I40IW_OP_TYPE_SEND_INV:
  2153. case I40IW_OP_TYPE_SEND:
  2154. entry->opcode = IB_WC_SEND;
  2155. break;
  2156. case I40IW_OP_TYPE_REC:
  2157. entry->opcode = IB_WC_RECV;
  2158. break;
  2159. default:
  2160. entry->opcode = IB_WC_RECV;
  2161. break;
  2162. }
  2163. entry->ex.imm_data = 0;
  2164. qp = (struct i40iw_sc_qp *)cq_poll_info.qp_handle;
  2165. entry->qp = (struct ib_qp *)qp->back_qp;
  2166. entry->src_qp = cq_poll_info.qp_id;
  2167. iwqp = (struct i40iw_qp *)qp->back_qp;
  2168. if (iwqp->iwarp_state > I40IW_QP_STATE_RTS) {
  2169. if (!I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
  2170. complete(&iwqp->sq_drained);
  2171. if (!I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
  2172. complete(&iwqp->rq_drained);
  2173. }
  2174. entry->byte_len = cq_poll_info.bytes_xfered;
  2175. entry++;
  2176. cqe_count++;
  2177. }
  2178. spin_unlock_irqrestore(&iwcq->lock, flags);
  2179. return cqe_count;
  2180. }
  2181. /**
  2182. * i40iw_req_notify_cq - arm cq kernel application
  2183. * @ibcq: cq to arm
  2184. * @notify_flags: notofication flags
  2185. */
  2186. static int i40iw_req_notify_cq(struct ib_cq *ibcq,
  2187. enum ib_cq_notify_flags notify_flags)
  2188. {
  2189. struct i40iw_cq *iwcq;
  2190. struct i40iw_cq_uk *ukcq;
  2191. unsigned long flags;
  2192. enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT;
  2193. iwcq = (struct i40iw_cq *)ibcq;
  2194. ukcq = &iwcq->sc_cq.cq_uk;
  2195. if (notify_flags == IB_CQ_SOLICITED)
  2196. cq_notify = IW_CQ_COMPL_SOLICITED;
  2197. spin_lock_irqsave(&iwcq->lock, flags);
  2198. ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
  2199. spin_unlock_irqrestore(&iwcq->lock, flags);
  2200. return 0;
  2201. }
  2202. /**
  2203. * i40iw_port_immutable - return port's immutable data
  2204. * @ibdev: ib dev struct
  2205. * @port_num: port number
  2206. * @immutable: immutable data for the port return
  2207. */
  2208. static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num,
  2209. struct ib_port_immutable *immutable)
  2210. {
  2211. struct ib_port_attr attr;
  2212. int err;
  2213. immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
  2214. err = ib_query_port(ibdev, port_num, &attr);
  2215. if (err)
  2216. return err;
  2217. immutable->pkey_tbl_len = attr.pkey_tbl_len;
  2218. immutable->gid_tbl_len = attr.gid_tbl_len;
  2219. return 0;
  2220. }
  2221. static const char * const i40iw_hw_stat_names[] = {
  2222. // 32bit names
  2223. [I40IW_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
  2224. [I40IW_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
  2225. [I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
  2226. [I40IW_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
  2227. [I40IW_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
  2228. [I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
  2229. [I40IW_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs",
  2230. [I40IW_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors",
  2231. [I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors",
  2232. // 64bit names
  2233. [I40IW_HW_STAT_INDEX_IP4RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
  2234. "ip4InOctets",
  2235. [I40IW_HW_STAT_INDEX_IP4RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
  2236. "ip4InPkts",
  2237. [I40IW_HW_STAT_INDEX_IP4RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
  2238. "ip4InReasmRqd",
  2239. [I40IW_HW_STAT_INDEX_IP4RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
  2240. "ip4InMcastPkts",
  2241. [I40IW_HW_STAT_INDEX_IP4TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
  2242. "ip4OutOctets",
  2243. [I40IW_HW_STAT_INDEX_IP4TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
  2244. "ip4OutPkts",
  2245. [I40IW_HW_STAT_INDEX_IP4TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
  2246. "ip4OutSegRqd",
  2247. [I40IW_HW_STAT_INDEX_IP4TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
  2248. "ip4OutMcastPkts",
  2249. [I40IW_HW_STAT_INDEX_IP6RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
  2250. "ip6InOctets",
  2251. [I40IW_HW_STAT_INDEX_IP6RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
  2252. "ip6InPkts",
  2253. [I40IW_HW_STAT_INDEX_IP6RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
  2254. "ip6InReasmRqd",
  2255. [I40IW_HW_STAT_INDEX_IP6RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
  2256. "ip6InMcastPkts",
  2257. [I40IW_HW_STAT_INDEX_IP6TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
  2258. "ip6OutOctets",
  2259. [I40IW_HW_STAT_INDEX_IP6TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
  2260. "ip6OutPkts",
  2261. [I40IW_HW_STAT_INDEX_IP6TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
  2262. "ip6OutSegRqd",
  2263. [I40IW_HW_STAT_INDEX_IP6TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
  2264. "ip6OutMcastPkts",
  2265. [I40IW_HW_STAT_INDEX_TCPRXSEGS + I40IW_HW_STAT_INDEX_MAX_32] =
  2266. "tcpInSegs",
  2267. [I40IW_HW_STAT_INDEX_TCPTXSEG + I40IW_HW_STAT_INDEX_MAX_32] =
  2268. "tcpOutSegs",
  2269. [I40IW_HW_STAT_INDEX_RDMARXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
  2270. "iwInRdmaReads",
  2271. [I40IW_HW_STAT_INDEX_RDMARXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
  2272. "iwInRdmaSends",
  2273. [I40IW_HW_STAT_INDEX_RDMARXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
  2274. "iwInRdmaWrites",
  2275. [I40IW_HW_STAT_INDEX_RDMATXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
  2276. "iwOutRdmaReads",
  2277. [I40IW_HW_STAT_INDEX_RDMATXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
  2278. "iwOutRdmaSends",
  2279. [I40IW_HW_STAT_INDEX_RDMATXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
  2280. "iwOutRdmaWrites",
  2281. [I40IW_HW_STAT_INDEX_RDMAVBND + I40IW_HW_STAT_INDEX_MAX_32] =
  2282. "iwRdmaBnd",
  2283. [I40IW_HW_STAT_INDEX_RDMAVINV + I40IW_HW_STAT_INDEX_MAX_32] =
  2284. "iwRdmaInv"
  2285. };
  2286. static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str,
  2287. size_t str_len)
  2288. {
  2289. u32 firmware_version = I40IW_FW_VERSION;
  2290. snprintf(str, str_len, "%u.%u", firmware_version,
  2291. (firmware_version & 0x000000ff));
  2292. }
  2293. /**
  2294. * i40iw_alloc_hw_stats - Allocate a hw stats structure
  2295. * @ibdev: device pointer from stack
  2296. * @port_num: port number
  2297. */
  2298. static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev,
  2299. u8 port_num)
  2300. {
  2301. struct i40iw_device *iwdev = to_iwdev(ibdev);
  2302. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  2303. int num_counters = I40IW_HW_STAT_INDEX_MAX_32 +
  2304. I40IW_HW_STAT_INDEX_MAX_64;
  2305. unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
  2306. BUILD_BUG_ON(ARRAY_SIZE(i40iw_hw_stat_names) !=
  2307. (I40IW_HW_STAT_INDEX_MAX_32 +
  2308. I40IW_HW_STAT_INDEX_MAX_64));
  2309. /*
  2310. * PFs get the default update lifespan, but VFs only update once
  2311. * per second
  2312. */
  2313. if (!dev->is_pf)
  2314. lifespan = 1000;
  2315. return rdma_alloc_hw_stats_struct(i40iw_hw_stat_names, num_counters,
  2316. lifespan);
  2317. }
  2318. /**
  2319. * i40iw_get_hw_stats - Populates the rdma_hw_stats structure
  2320. * @ibdev: device pointer from stack
  2321. * @stats: stats pointer from stack
  2322. * @port_num: port number
  2323. * @index: which hw counter the stack is requesting we update
  2324. */
  2325. static int i40iw_get_hw_stats(struct ib_device *ibdev,
  2326. struct rdma_hw_stats *stats,
  2327. u8 port_num, int index)
  2328. {
  2329. struct i40iw_device *iwdev = to_iwdev(ibdev);
  2330. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  2331. struct i40iw_vsi_pestat *devstat = iwdev->vsi.pestat;
  2332. struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
  2333. if (dev->is_pf) {
  2334. i40iw_hw_stats_read_all(devstat, &devstat->hw_stats);
  2335. } else {
  2336. if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))
  2337. return -ENOSYS;
  2338. }
  2339. memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats));
  2340. return stats->num_counters;
  2341. }
  2342. /**
  2343. * i40iw_query_gid - Query port GID
  2344. * @ibdev: device pointer from stack
  2345. * @port: port number
  2346. * @index: Entry index
  2347. * @gid: Global ID
  2348. */
  2349. static int i40iw_query_gid(struct ib_device *ibdev,
  2350. u8 port,
  2351. int index,
  2352. union ib_gid *gid)
  2353. {
  2354. struct i40iw_device *iwdev = to_iwdev(ibdev);
  2355. memset(gid->raw, 0, sizeof(gid->raw));
  2356. ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
  2357. return 0;
  2358. }
  2359. /**
  2360. * i40iw_modify_port Modify port properties
  2361. * @ibdev: device pointer from stack
  2362. * @port: port number
  2363. * @port_modify_mask: mask for port modifications
  2364. * @props: port properties
  2365. */
  2366. static int i40iw_modify_port(struct ib_device *ibdev,
  2367. u8 port,
  2368. int port_modify_mask,
  2369. struct ib_port_modify *props)
  2370. {
  2371. return -ENOSYS;
  2372. }
  2373. /**
  2374. * i40iw_query_pkey - Query partition key
  2375. * @ibdev: device pointer from stack
  2376. * @port: port number
  2377. * @index: index of pkey
  2378. * @pkey: pointer to store the pkey
  2379. */
  2380. static int i40iw_query_pkey(struct ib_device *ibdev,
  2381. u8 port,
  2382. u16 index,
  2383. u16 *pkey)
  2384. {
  2385. *pkey = 0;
  2386. return 0;
  2387. }
  2388. /**
  2389. * i40iw_create_ah - create address handle
  2390. * @ibpd: ptr of pd
  2391. * @ah_attr: address handle attributes
  2392. */
  2393. static struct ib_ah *i40iw_create_ah(struct ib_pd *ibpd,
  2394. struct rdma_ah_attr *attr,
  2395. struct ib_udata *udata)
  2396. {
  2397. return ERR_PTR(-ENOSYS);
  2398. }
  2399. /**
  2400. * i40iw_destroy_ah - Destroy address handle
  2401. * @ah: pointer to address handle
  2402. */
  2403. static int i40iw_destroy_ah(struct ib_ah *ah)
  2404. {
  2405. return -ENOSYS;
  2406. }
  2407. /**
  2408. * i40iw_init_rdma_device - initialization of iwarp device
  2409. * @iwdev: iwarp device
  2410. */
  2411. static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev)
  2412. {
  2413. struct i40iw_ib_device *iwibdev;
  2414. struct net_device *netdev = iwdev->netdev;
  2415. struct pci_dev *pcidev = (struct pci_dev *)iwdev->hw.dev_context;
  2416. iwibdev = (struct i40iw_ib_device *)ib_alloc_device(sizeof(*iwibdev));
  2417. if (!iwibdev) {
  2418. i40iw_pr_err("iwdev == NULL\n");
  2419. return NULL;
  2420. }
  2421. strlcpy(iwibdev->ibdev.name, "i40iw%d", IB_DEVICE_NAME_MAX);
  2422. iwibdev->ibdev.owner = THIS_MODULE;
  2423. iwdev->iwibdev = iwibdev;
  2424. iwibdev->iwdev = iwdev;
  2425. iwibdev->ibdev.node_type = RDMA_NODE_RNIC;
  2426. ether_addr_copy((u8 *)&iwibdev->ibdev.node_guid, netdev->dev_addr);
  2427. iwibdev->ibdev.uverbs_cmd_mask =
  2428. (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
  2429. (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
  2430. (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
  2431. (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
  2432. (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
  2433. (1ull << IB_USER_VERBS_CMD_REG_MR) |
  2434. (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
  2435. (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
  2436. (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
  2437. (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
  2438. (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
  2439. (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
  2440. (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
  2441. (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
  2442. (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
  2443. (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
  2444. (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
  2445. (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
  2446. (1ull << IB_USER_VERBS_CMD_POST_RECV) |
  2447. (1ull << IB_USER_VERBS_CMD_POST_SEND);
  2448. iwibdev->ibdev.phys_port_cnt = 1;
  2449. iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count;
  2450. iwibdev->ibdev.dev.parent = &pcidev->dev;
  2451. iwibdev->ibdev.query_port = i40iw_query_port;
  2452. iwibdev->ibdev.modify_port = i40iw_modify_port;
  2453. iwibdev->ibdev.query_pkey = i40iw_query_pkey;
  2454. iwibdev->ibdev.query_gid = i40iw_query_gid;
  2455. iwibdev->ibdev.alloc_ucontext = i40iw_alloc_ucontext;
  2456. iwibdev->ibdev.dealloc_ucontext = i40iw_dealloc_ucontext;
  2457. iwibdev->ibdev.mmap = i40iw_mmap;
  2458. iwibdev->ibdev.alloc_pd = i40iw_alloc_pd;
  2459. iwibdev->ibdev.dealloc_pd = i40iw_dealloc_pd;
  2460. iwibdev->ibdev.create_qp = i40iw_create_qp;
  2461. iwibdev->ibdev.modify_qp = i40iw_modify_qp;
  2462. iwibdev->ibdev.query_qp = i40iw_query_qp;
  2463. iwibdev->ibdev.destroy_qp = i40iw_destroy_qp;
  2464. iwibdev->ibdev.create_cq = i40iw_create_cq;
  2465. iwibdev->ibdev.destroy_cq = i40iw_destroy_cq;
  2466. iwibdev->ibdev.get_dma_mr = i40iw_get_dma_mr;
  2467. iwibdev->ibdev.reg_user_mr = i40iw_reg_user_mr;
  2468. iwibdev->ibdev.dereg_mr = i40iw_dereg_mr;
  2469. iwibdev->ibdev.alloc_hw_stats = i40iw_alloc_hw_stats;
  2470. iwibdev->ibdev.get_hw_stats = i40iw_get_hw_stats;
  2471. iwibdev->ibdev.query_device = i40iw_query_device;
  2472. iwibdev->ibdev.create_ah = i40iw_create_ah;
  2473. iwibdev->ibdev.destroy_ah = i40iw_destroy_ah;
  2474. iwibdev->ibdev.drain_sq = i40iw_drain_sq;
  2475. iwibdev->ibdev.drain_rq = i40iw_drain_rq;
  2476. iwibdev->ibdev.alloc_mr = i40iw_alloc_mr;
  2477. iwibdev->ibdev.map_mr_sg = i40iw_map_mr_sg;
  2478. iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL);
  2479. if (!iwibdev->ibdev.iwcm) {
  2480. ib_dealloc_device(&iwibdev->ibdev);
  2481. return NULL;
  2482. }
  2483. iwibdev->ibdev.iwcm->add_ref = i40iw_add_ref;
  2484. iwibdev->ibdev.iwcm->rem_ref = i40iw_rem_ref;
  2485. iwibdev->ibdev.iwcm->get_qp = i40iw_get_qp;
  2486. iwibdev->ibdev.iwcm->connect = i40iw_connect;
  2487. iwibdev->ibdev.iwcm->accept = i40iw_accept;
  2488. iwibdev->ibdev.iwcm->reject = i40iw_reject;
  2489. iwibdev->ibdev.iwcm->create_listen = i40iw_create_listen;
  2490. iwibdev->ibdev.iwcm->destroy_listen = i40iw_destroy_listen;
  2491. memcpy(iwibdev->ibdev.iwcm->ifname, netdev->name,
  2492. sizeof(iwibdev->ibdev.iwcm->ifname));
  2493. iwibdev->ibdev.get_port_immutable = i40iw_port_immutable;
  2494. iwibdev->ibdev.get_dev_fw_str = i40iw_get_dev_fw_str;
  2495. iwibdev->ibdev.poll_cq = i40iw_poll_cq;
  2496. iwibdev->ibdev.req_notify_cq = i40iw_req_notify_cq;
  2497. iwibdev->ibdev.post_send = i40iw_post_send;
  2498. iwibdev->ibdev.post_recv = i40iw_post_recv;
  2499. return iwibdev;
  2500. }
  2501. /**
  2502. * i40iw_port_ibevent - indicate port event
  2503. * @iwdev: iwarp device
  2504. */
  2505. void i40iw_port_ibevent(struct i40iw_device *iwdev)
  2506. {
  2507. struct i40iw_ib_device *iwibdev = iwdev->iwibdev;
  2508. struct ib_event event;
  2509. event.device = &iwibdev->ibdev;
  2510. event.element.port_num = 1;
  2511. event.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
  2512. ib_dispatch_event(&event);
  2513. }
  2514. /**
  2515. * i40iw_unregister_rdma_device - unregister of iwarp from IB
  2516. * @iwibdev: rdma device ptr
  2517. */
  2518. static void i40iw_unregister_rdma_device(struct i40iw_ib_device *iwibdev)
  2519. {
  2520. int i;
  2521. for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i)
  2522. device_remove_file(&iwibdev->ibdev.dev,
  2523. i40iw_dev_attributes[i]);
  2524. ib_unregister_device(&iwibdev->ibdev);
  2525. }
  2526. /**
  2527. * i40iw_destroy_rdma_device - destroy rdma device and free resources
  2528. * @iwibdev: IB device ptr
  2529. */
  2530. void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
  2531. {
  2532. if (!iwibdev)
  2533. return;
  2534. i40iw_unregister_rdma_device(iwibdev);
  2535. kfree(iwibdev->ibdev.iwcm);
  2536. iwibdev->ibdev.iwcm = NULL;
  2537. wait_event_timeout(iwibdev->iwdev->close_wq,
  2538. !atomic64_read(&iwibdev->iwdev->use_count),
  2539. I40IW_EVENT_TIMEOUT);
  2540. ib_dealloc_device(&iwibdev->ibdev);
  2541. }
  2542. /**
  2543. * i40iw_register_rdma_device - register iwarp device to IB
  2544. * @iwdev: iwarp device
  2545. */
  2546. int i40iw_register_rdma_device(struct i40iw_device *iwdev)
  2547. {
  2548. int i, ret;
  2549. struct i40iw_ib_device *iwibdev;
  2550. iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
  2551. if (!iwdev->iwibdev)
  2552. return -ENOMEM;
  2553. iwibdev = iwdev->iwibdev;
  2554. ret = ib_register_device(&iwibdev->ibdev, NULL);
  2555. if (ret)
  2556. goto error;
  2557. for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i) {
  2558. ret =
  2559. device_create_file(&iwibdev->ibdev.dev,
  2560. i40iw_dev_attributes[i]);
  2561. if (ret) {
  2562. while (i > 0) {
  2563. i--;
  2564. device_remove_file(&iwibdev->ibdev.dev, i40iw_dev_attributes[i]);
  2565. }
  2566. ib_unregister_device(&iwibdev->ibdev);
  2567. goto error;
  2568. }
  2569. }
  2570. return 0;
  2571. error:
  2572. kfree(iwdev->iwibdev->ibdev.iwcm);
  2573. iwdev->iwibdev->ibdev.iwcm = NULL;
  2574. ib_dealloc_device(&iwdev->iwibdev->ibdev);
  2575. return ret;
  2576. }