i40iw_cm.c 118 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327
  1. /*******************************************************************************
  2. *
  3. * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenFabrics.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. *
  33. *******************************************************************************/
  34. #include <linux/atomic.h>
  35. #include <linux/ip.h>
  36. #include <linux/tcp.h>
  37. #include <linux/init.h>
  38. #include <linux/if_arp.h>
  39. #include <linux/if_vlan.h>
  40. #include <linux/notifier.h>
  41. #include <linux/net.h>
  42. #include <linux/types.h>
  43. #include <linux/timer.h>
  44. #include <linux/time.h>
  45. #include <linux/delay.h>
  46. #include <linux/etherdevice.h>
  47. #include <linux/netdevice.h>
  48. #include <linux/random.h>
  49. #include <linux/list.h>
  50. #include <linux/threads.h>
  51. #include <linux/highmem.h>
  52. #include <net/arp.h>
  53. #include <net/ndisc.h>
  54. #include <net/neighbour.h>
  55. #include <net/route.h>
  56. #include <net/addrconf.h>
  57. #include <net/ip6_route.h>
  58. #include <net/ip_fib.h>
  59. #include <net/tcp.h>
  60. #include <asm/checksum.h>
  61. #include "i40iw.h"
  62. static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *);
  63. static void i40iw_cm_post_event(struct i40iw_cm_event *event);
  64. static void i40iw_disconnect_worker(struct work_struct *work);
  65. /**
  66. * i40iw_free_sqbuf - put back puda buffer if refcount = 0
  67. * @vsi: pointer to vsi structure
  68. * @buf: puda buffer to free
  69. */
  70. void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp)
  71. {
  72. struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)bufp;
  73. struct i40iw_puda_rsrc *ilq = vsi->ilq;
  74. if (!atomic_dec_return(&buf->refcount))
  75. i40iw_puda_ret_bufpool(ilq, buf);
  76. }
  77. /**
  78. * i40iw_derive_hw_ird_setting - Calculate IRD
  79. *
  80. * @cm_ird: IRD of connection's node
  81. *
  82. * The ird from the connection is rounded to a supported HW
  83. * setting (2,8,32,64) and then encoded for ird_size field of
  84. * qp_ctx
  85. */
  86. static u8 i40iw_derive_hw_ird_setting(u16 cm_ird)
  87. {
  88. u8 encoded_ird_size;
  89. u8 pof2_cm_ird = 1;
  90. /* round-off to next powerof2 */
  91. while (pof2_cm_ird < cm_ird)
  92. pof2_cm_ird *= 2;
  93. /* ird_size field is encoded in qp_ctx */
  94. switch (pof2_cm_ird) {
  95. case I40IW_HW_IRD_SETTING_64:
  96. encoded_ird_size = 3;
  97. break;
  98. case I40IW_HW_IRD_SETTING_32:
  99. case I40IW_HW_IRD_SETTING_16:
  100. encoded_ird_size = 2;
  101. break;
  102. case I40IW_HW_IRD_SETTING_8:
  103. case I40IW_HW_IRD_SETTING_4:
  104. encoded_ird_size = 1;
  105. break;
  106. case I40IW_HW_IRD_SETTING_2:
  107. default:
  108. encoded_ird_size = 0;
  109. break;
  110. }
  111. return encoded_ird_size;
  112. }
  113. /**
  114. * i40iw_record_ird_ord - Record IRD/ORD passed in
  115. * @cm_node: connection's node
  116. * @conn_ird: connection IRD
  117. * @conn_ord: connection ORD
  118. */
  119. static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u16 conn_ird, u16 conn_ord)
  120. {
  121. if (conn_ird > I40IW_MAX_IRD_SIZE)
  122. conn_ird = I40IW_MAX_IRD_SIZE;
  123. if (conn_ord > I40IW_MAX_ORD_SIZE)
  124. conn_ord = I40IW_MAX_ORD_SIZE;
  125. cm_node->ird_size = conn_ird;
  126. cm_node->ord_size = conn_ord;
  127. }
  128. /**
  129. * i40iw_copy_ip_ntohl - change network to host ip
  130. * @dst: host ip
  131. * @src: big endian
  132. */
  133. void i40iw_copy_ip_ntohl(u32 *dst, __be32 *src)
  134. {
  135. *dst++ = ntohl(*src++);
  136. *dst++ = ntohl(*src++);
  137. *dst++ = ntohl(*src++);
  138. *dst = ntohl(*src);
  139. }
  140. /**
  141. * i40iw_copy_ip_htonl - change host addr to network ip
  142. * @dst: host ip
  143. * @src: little endian
  144. */
  145. static inline void i40iw_copy_ip_htonl(__be32 *dst, u32 *src)
  146. {
  147. *dst++ = htonl(*src++);
  148. *dst++ = htonl(*src++);
  149. *dst++ = htonl(*src++);
  150. *dst = htonl(*src);
  151. }
  152. /**
  153. * i40iw_fill_sockaddr4 - get addr info for passive connection
  154. * @cm_node: connection's node
  155. * @event: upper layer's cm event
  156. */
  157. static inline void i40iw_fill_sockaddr4(struct i40iw_cm_node *cm_node,
  158. struct iw_cm_event *event)
  159. {
  160. struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;
  161. struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;
  162. laddr->sin_family = AF_INET;
  163. raddr->sin_family = AF_INET;
  164. laddr->sin_port = htons(cm_node->loc_port);
  165. raddr->sin_port = htons(cm_node->rem_port);
  166. laddr->sin_addr.s_addr = htonl(cm_node->loc_addr[0]);
  167. raddr->sin_addr.s_addr = htonl(cm_node->rem_addr[0]);
  168. }
  169. /**
  170. * i40iw_fill_sockaddr6 - get ipv6 addr info for passive side
  171. * @cm_node: connection's node
  172. * @event: upper layer's cm event
  173. */
  174. static inline void i40iw_fill_sockaddr6(struct i40iw_cm_node *cm_node,
  175. struct iw_cm_event *event)
  176. {
  177. struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;
  178. struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)&event->remote_addr;
  179. laddr6->sin6_family = AF_INET6;
  180. raddr6->sin6_family = AF_INET6;
  181. laddr6->sin6_port = htons(cm_node->loc_port);
  182. raddr6->sin6_port = htons(cm_node->rem_port);
  183. i40iw_copy_ip_htonl(laddr6->sin6_addr.in6_u.u6_addr32,
  184. cm_node->loc_addr);
  185. i40iw_copy_ip_htonl(raddr6->sin6_addr.in6_u.u6_addr32,
  186. cm_node->rem_addr);
  187. }
  188. /**
  189. * i40iw_get_addr_info
  190. * @cm_node: contains ip/tcp info
  191. * @cm_info: to get a copy of the cm_node ip/tcp info
  192. */
  193. static void i40iw_get_addr_info(struct i40iw_cm_node *cm_node,
  194. struct i40iw_cm_info *cm_info)
  195. {
  196. cm_info->ipv4 = cm_node->ipv4;
  197. cm_info->vlan_id = cm_node->vlan_id;
  198. memcpy(cm_info->loc_addr, cm_node->loc_addr, sizeof(cm_info->loc_addr));
  199. memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr));
  200. cm_info->loc_port = cm_node->loc_port;
  201. cm_info->rem_port = cm_node->rem_port;
  202. cm_info->user_pri = cm_node->user_pri;
  203. }
  204. /**
  205. * i40iw_get_cmevent_info - for cm event upcall
  206. * @cm_node: connection's node
  207. * @cm_id: upper layers cm struct for the event
  208. * @event: upper layer's cm event
  209. */
  210. static inline void i40iw_get_cmevent_info(struct i40iw_cm_node *cm_node,
  211. struct iw_cm_id *cm_id,
  212. struct iw_cm_event *event)
  213. {
  214. memcpy(&event->local_addr, &cm_id->m_local_addr,
  215. sizeof(event->local_addr));
  216. memcpy(&event->remote_addr, &cm_id->m_remote_addr,
  217. sizeof(event->remote_addr));
  218. if (cm_node) {
  219. event->private_data = (void *)cm_node->pdata_buf;
  220. event->private_data_len = (u8)cm_node->pdata.size;
  221. event->ird = cm_node->ird_size;
  222. event->ord = cm_node->ord_size;
  223. }
  224. }
  225. /**
  226. * i40iw_send_cm_event - upcall cm's event handler
  227. * @cm_node: connection's node
  228. * @cm_id: upper layer's cm info struct
  229. * @type: Event type to indicate
  230. * @status: status for the event type
  231. */
  232. static int i40iw_send_cm_event(struct i40iw_cm_node *cm_node,
  233. struct iw_cm_id *cm_id,
  234. enum iw_cm_event_type type,
  235. int status)
  236. {
  237. struct iw_cm_event event;
  238. memset(&event, 0, sizeof(event));
  239. event.event = type;
  240. event.status = status;
  241. switch (type) {
  242. case IW_CM_EVENT_CONNECT_REQUEST:
  243. if (cm_node->ipv4)
  244. i40iw_fill_sockaddr4(cm_node, &event);
  245. else
  246. i40iw_fill_sockaddr6(cm_node, &event);
  247. event.provider_data = (void *)cm_node;
  248. event.private_data = (void *)cm_node->pdata_buf;
  249. event.private_data_len = (u8)cm_node->pdata.size;
  250. event.ird = cm_node->ird_size;
  251. break;
  252. case IW_CM_EVENT_CONNECT_REPLY:
  253. i40iw_get_cmevent_info(cm_node, cm_id, &event);
  254. break;
  255. case IW_CM_EVENT_ESTABLISHED:
  256. event.ird = cm_node->ird_size;
  257. event.ord = cm_node->ord_size;
  258. break;
  259. case IW_CM_EVENT_DISCONNECT:
  260. break;
  261. case IW_CM_EVENT_CLOSE:
  262. break;
  263. default:
  264. i40iw_pr_err("event type received type = %d\n", type);
  265. return -1;
  266. }
  267. return cm_id->event_handler(cm_id, &event);
  268. }
  269. /**
  270. * i40iw_create_event - create cm event
  271. * @cm_node: connection's node
  272. * @type: Event type to generate
  273. */
  274. static struct i40iw_cm_event *i40iw_create_event(struct i40iw_cm_node *cm_node,
  275. enum i40iw_cm_event_type type)
  276. {
  277. struct i40iw_cm_event *event;
  278. if (!cm_node->cm_id)
  279. return NULL;
  280. event = kzalloc(sizeof(*event), GFP_ATOMIC);
  281. if (!event)
  282. return NULL;
  283. event->type = type;
  284. event->cm_node = cm_node;
  285. memcpy(event->cm_info.rem_addr, cm_node->rem_addr, sizeof(event->cm_info.rem_addr));
  286. memcpy(event->cm_info.loc_addr, cm_node->loc_addr, sizeof(event->cm_info.loc_addr));
  287. event->cm_info.rem_port = cm_node->rem_port;
  288. event->cm_info.loc_port = cm_node->loc_port;
  289. event->cm_info.cm_id = cm_node->cm_id;
  290. i40iw_debug(cm_node->dev,
  291. I40IW_DEBUG_CM,
  292. "node=%p event=%p type=%u dst=%pI4 src=%pI4\n",
  293. cm_node,
  294. event,
  295. type,
  296. event->cm_info.loc_addr,
  297. event->cm_info.rem_addr);
  298. i40iw_cm_post_event(event);
  299. return event;
  300. }
  301. /**
  302. * i40iw_free_retrans_entry - free send entry
  303. * @cm_node: connection's node
  304. */
  305. static void i40iw_free_retrans_entry(struct i40iw_cm_node *cm_node)
  306. {
  307. struct i40iw_device *iwdev = cm_node->iwdev;
  308. struct i40iw_timer_entry *send_entry;
  309. send_entry = cm_node->send_entry;
  310. if (send_entry) {
  311. cm_node->send_entry = NULL;
  312. i40iw_free_sqbuf(&iwdev->vsi, (void *)send_entry->sqbuf);
  313. kfree(send_entry);
  314. atomic_dec(&cm_node->ref_count);
  315. }
  316. }
  317. /**
  318. * i40iw_cleanup_retrans_entry - free send entry with lock
  319. * @cm_node: connection's node
  320. */
  321. static void i40iw_cleanup_retrans_entry(struct i40iw_cm_node *cm_node)
  322. {
  323. unsigned long flags;
  324. spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
  325. i40iw_free_retrans_entry(cm_node);
  326. spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
  327. }
  328. /**
  329. * i40iw_form_cm_frame - get a free packet and build frame
  330. * @cm_node: connection's node ionfo to use in frame
  331. * @options: pointer to options info
  332. * @hdr: pointer mpa header
  333. * @pdata: pointer to private data
  334. * @flags: indicates FIN or ACK
  335. */
  336. static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
  337. struct i40iw_kmem_info *options,
  338. struct i40iw_kmem_info *hdr,
  339. struct i40iw_kmem_info *pdata,
  340. u8 flags)
  341. {
  342. struct i40iw_puda_buf *sqbuf;
  343. struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
  344. u8 *buf;
  345. struct tcphdr *tcph;
  346. struct iphdr *iph;
  347. struct ipv6hdr *ip6h;
  348. struct ethhdr *ethh;
  349. u16 packetsize;
  350. u16 eth_hlen = ETH_HLEN;
  351. u32 opts_len = 0;
  352. u32 pd_len = 0;
  353. u32 hdr_len = 0;
  354. u16 vtag;
  355. sqbuf = i40iw_puda_get_bufpool(vsi->ilq);
  356. if (!sqbuf)
  357. return NULL;
  358. buf = sqbuf->mem.va;
  359. if (options)
  360. opts_len = (u32)options->size;
  361. if (hdr)
  362. hdr_len = hdr->size;
  363. if (pdata)
  364. pd_len = pdata->size;
  365. if (cm_node->vlan_id < VLAN_TAG_PRESENT)
  366. eth_hlen += 4;
  367. if (cm_node->ipv4)
  368. packetsize = sizeof(*iph) + sizeof(*tcph);
  369. else
  370. packetsize = sizeof(*ip6h) + sizeof(*tcph);
  371. packetsize += opts_len + hdr_len + pd_len;
  372. memset(buf, 0x00, eth_hlen + packetsize);
  373. sqbuf->totallen = packetsize + eth_hlen;
  374. sqbuf->maclen = eth_hlen;
  375. sqbuf->tcphlen = sizeof(*tcph) + opts_len;
  376. sqbuf->scratch = (void *)cm_node;
  377. ethh = (struct ethhdr *)buf;
  378. buf += eth_hlen;
  379. if (cm_node->ipv4) {
  380. sqbuf->ipv4 = true;
  381. iph = (struct iphdr *)buf;
  382. buf += sizeof(*iph);
  383. tcph = (struct tcphdr *)buf;
  384. buf += sizeof(*tcph);
  385. ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
  386. ether_addr_copy(ethh->h_source, cm_node->loc_mac);
  387. if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
  388. ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
  389. vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
  390. ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
  391. ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IP);
  392. } else {
  393. ethh->h_proto = htons(ETH_P_IP);
  394. }
  395. iph->version = IPVERSION;
  396. iph->ihl = 5; /* 5 * 4Byte words, IP headr len */
  397. iph->tos = cm_node->tos;
  398. iph->tot_len = htons(packetsize);
  399. iph->id = htons(++cm_node->tcp_cntxt.loc_id);
  400. iph->frag_off = htons(0x4000);
  401. iph->ttl = 0x40;
  402. iph->protocol = IPPROTO_TCP;
  403. iph->saddr = htonl(cm_node->loc_addr[0]);
  404. iph->daddr = htonl(cm_node->rem_addr[0]);
  405. } else {
  406. sqbuf->ipv4 = false;
  407. ip6h = (struct ipv6hdr *)buf;
  408. buf += sizeof(*ip6h);
  409. tcph = (struct tcphdr *)buf;
  410. buf += sizeof(*tcph);
  411. ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
  412. ether_addr_copy(ethh->h_source, cm_node->loc_mac);
  413. if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
  414. ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
  415. vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
  416. ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
  417. ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IPV6);
  418. } else {
  419. ethh->h_proto = htons(ETH_P_IPV6);
  420. }
  421. ip6h->version = 6;
  422. ip6h->priority = cm_node->tos >> 4;
  423. ip6h->flow_lbl[0] = cm_node->tos << 4;
  424. ip6h->flow_lbl[1] = 0;
  425. ip6h->flow_lbl[2] = 0;
  426. ip6h->payload_len = htons(packetsize - sizeof(*ip6h));
  427. ip6h->nexthdr = 6;
  428. ip6h->hop_limit = 128;
  429. i40iw_copy_ip_htonl(ip6h->saddr.in6_u.u6_addr32,
  430. cm_node->loc_addr);
  431. i40iw_copy_ip_htonl(ip6h->daddr.in6_u.u6_addr32,
  432. cm_node->rem_addr);
  433. }
  434. tcph->source = htons(cm_node->loc_port);
  435. tcph->dest = htons(cm_node->rem_port);
  436. tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
  437. if (flags & SET_ACK) {
  438. cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
  439. tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
  440. tcph->ack = 1;
  441. } else {
  442. tcph->ack_seq = 0;
  443. }
  444. if (flags & SET_SYN) {
  445. cm_node->tcp_cntxt.loc_seq_num++;
  446. tcph->syn = 1;
  447. } else {
  448. cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;
  449. }
  450. if (flags & SET_FIN) {
  451. cm_node->tcp_cntxt.loc_seq_num++;
  452. tcph->fin = 1;
  453. }
  454. if (flags & SET_RST)
  455. tcph->rst = 1;
  456. tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);
  457. sqbuf->tcphlen = tcph->doff << 2;
  458. tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
  459. tcph->urg_ptr = 0;
  460. if (opts_len) {
  461. memcpy(buf, options->addr, opts_len);
  462. buf += opts_len;
  463. }
  464. if (hdr_len) {
  465. memcpy(buf, hdr->addr, hdr_len);
  466. buf += hdr_len;
  467. }
  468. if (pdata && pdata->addr)
  469. memcpy(buf, pdata->addr, pdata->size);
  470. atomic_set(&sqbuf->refcount, 1);
  471. return sqbuf;
  472. }
  473. /**
  474. * i40iw_send_reset - Send RST packet
  475. * @cm_node: connection's node
  476. */
  477. static int i40iw_send_reset(struct i40iw_cm_node *cm_node)
  478. {
  479. struct i40iw_puda_buf *sqbuf;
  480. int flags = SET_RST | SET_ACK;
  481. sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, flags);
  482. if (!sqbuf) {
  483. i40iw_pr_err("no sqbuf\n");
  484. return -1;
  485. }
  486. return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 0, 1);
  487. }
  488. /**
  489. * i40iw_active_open_err - send event for active side cm error
  490. * @cm_node: connection's node
  491. * @reset: Flag to send reset or not
  492. */
  493. static void i40iw_active_open_err(struct i40iw_cm_node *cm_node, bool reset)
  494. {
  495. i40iw_cleanup_retrans_entry(cm_node);
  496. cm_node->cm_core->stats_connect_errs++;
  497. if (reset) {
  498. i40iw_debug(cm_node->dev,
  499. I40IW_DEBUG_CM,
  500. "%s cm_node=%p state=%d\n",
  501. __func__,
  502. cm_node,
  503. cm_node->state);
  504. atomic_inc(&cm_node->ref_count);
  505. i40iw_send_reset(cm_node);
  506. }
  507. cm_node->state = I40IW_CM_STATE_CLOSED;
  508. i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
  509. }
  510. /**
  511. * i40iw_passive_open_err - handle passive side cm error
  512. * @cm_node: connection's node
  513. * @reset: send reset or just free cm_node
  514. */
  515. static void i40iw_passive_open_err(struct i40iw_cm_node *cm_node, bool reset)
  516. {
  517. i40iw_cleanup_retrans_entry(cm_node);
  518. cm_node->cm_core->stats_passive_errs++;
  519. cm_node->state = I40IW_CM_STATE_CLOSED;
  520. i40iw_debug(cm_node->dev,
  521. I40IW_DEBUG_CM,
  522. "%s cm_node=%p state =%d\n",
  523. __func__,
  524. cm_node,
  525. cm_node->state);
  526. if (reset)
  527. i40iw_send_reset(cm_node);
  528. else
  529. i40iw_rem_ref_cm_node(cm_node);
  530. }
  531. /**
  532. * i40iw_event_connect_error - to create connect error event
  533. * @event: cm information for connect event
  534. */
  535. static void i40iw_event_connect_error(struct i40iw_cm_event *event)
  536. {
  537. struct i40iw_qp *iwqp;
  538. struct iw_cm_id *cm_id;
  539. cm_id = event->cm_node->cm_id;
  540. if (!cm_id)
  541. return;
  542. iwqp = cm_id->provider_data;
  543. if (!iwqp || !iwqp->iwdev)
  544. return;
  545. iwqp->cm_id = NULL;
  546. cm_id->provider_data = NULL;
  547. i40iw_send_cm_event(event->cm_node, cm_id,
  548. IW_CM_EVENT_CONNECT_REPLY,
  549. -ECONNRESET);
  550. cm_id->rem_ref(cm_id);
  551. i40iw_rem_ref_cm_node(event->cm_node);
  552. }
  553. /**
  554. * i40iw_process_options
  555. * @cm_node: connection's node
  556. * @optionsloc: point to start of options
  557. * @optionsize: size of all options
  558. * @syn_packet: flag if syn packet
  559. */
  560. static int i40iw_process_options(struct i40iw_cm_node *cm_node,
  561. u8 *optionsloc,
  562. u32 optionsize,
  563. u32 syn_packet)
  564. {
  565. u32 tmp;
  566. u32 offset = 0;
  567. union all_known_options *all_options;
  568. char got_mss_option = 0;
  569. while (offset < optionsize) {
  570. all_options = (union all_known_options *)(optionsloc + offset);
  571. switch (all_options->as_base.optionnum) {
  572. case OPTION_NUMBER_END:
  573. offset = optionsize;
  574. break;
  575. case OPTION_NUMBER_NONE:
  576. offset += 1;
  577. continue;
  578. case OPTION_NUMBER_MSS:
  579. i40iw_debug(cm_node->dev,
  580. I40IW_DEBUG_CM,
  581. "%s: MSS Length: %d Offset: %d Size: %d\n",
  582. __func__,
  583. all_options->as_mss.length,
  584. offset,
  585. optionsize);
  586. got_mss_option = 1;
  587. if (all_options->as_mss.length != 4)
  588. return -1;
  589. tmp = ntohs(all_options->as_mss.mss);
  590. if (tmp > 0 && tmp < cm_node->tcp_cntxt.mss)
  591. cm_node->tcp_cntxt.mss = tmp;
  592. break;
  593. case OPTION_NUMBER_WINDOW_SCALE:
  594. cm_node->tcp_cntxt.snd_wscale =
  595. all_options->as_windowscale.shiftcount;
  596. break;
  597. default:
  598. i40iw_debug(cm_node->dev,
  599. I40IW_DEBUG_CM,
  600. "TCP Option not understood: %x\n",
  601. all_options->as_base.optionnum);
  602. break;
  603. }
  604. offset += all_options->as_base.length;
  605. }
  606. if (!got_mss_option && syn_packet)
  607. cm_node->tcp_cntxt.mss = I40IW_CM_DEFAULT_MSS;
  608. return 0;
  609. }
  610. /**
  611. * i40iw_handle_tcp_options -
  612. * @cm_node: connection's node
  613. * @tcph: pointer tcp header
  614. * @optionsize: size of options rcvd
  615. * @passive: active or passive flag
  616. */
  617. static int i40iw_handle_tcp_options(struct i40iw_cm_node *cm_node,
  618. struct tcphdr *tcph,
  619. int optionsize,
  620. int passive)
  621. {
  622. u8 *optionsloc = (u8 *)&tcph[1];
  623. if (optionsize) {
  624. if (i40iw_process_options(cm_node,
  625. optionsloc,
  626. optionsize,
  627. (u32)tcph->syn)) {
  628. i40iw_debug(cm_node->dev,
  629. I40IW_DEBUG_CM,
  630. "%s: Node %p, Sending RESET\n",
  631. __func__,
  632. cm_node);
  633. if (passive)
  634. i40iw_passive_open_err(cm_node, true);
  635. else
  636. i40iw_active_open_err(cm_node, true);
  637. return -1;
  638. }
  639. }
  640. cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) <<
  641. cm_node->tcp_cntxt.snd_wscale;
  642. if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd)
  643. cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
  644. return 0;
  645. }
  646. /**
  647. * i40iw_build_mpa_v1 - build a MPA V1 frame
  648. * @cm_node: connection's node
  649. * @mpa_key: to do read0 or write0
  650. */
  651. static void i40iw_build_mpa_v1(struct i40iw_cm_node *cm_node,
  652. void *start_addr,
  653. u8 mpa_key)
  654. {
  655. struct ietf_mpa_v1 *mpa_frame = (struct ietf_mpa_v1 *)start_addr;
  656. switch (mpa_key) {
  657. case MPA_KEY_REQUEST:
  658. memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);
  659. break;
  660. case MPA_KEY_REPLY:
  661. memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
  662. break;
  663. default:
  664. break;
  665. }
  666. mpa_frame->flags = IETF_MPA_FLAGS_CRC;
  667. mpa_frame->rev = cm_node->mpa_frame_rev;
  668. mpa_frame->priv_data_len = htons(cm_node->pdata.size);
  669. }
  670. /**
  671. * i40iw_build_mpa_v2 - build a MPA V2 frame
  672. * @cm_node: connection's node
  673. * @start_addr: buffer start address
  674. * @mpa_key: to do read0 or write0
  675. */
  676. static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node,
  677. void *start_addr,
  678. u8 mpa_key)
  679. {
  680. struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr;
  681. struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;
  682. u16 ctrl_ird, ctrl_ord;
  683. /* initialize the upper 5 bytes of the frame */
  684. i40iw_build_mpa_v1(cm_node, start_addr, mpa_key);
  685. mpa_frame->flags |= IETF_MPA_V2_FLAG;
  686. mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE);
  687. /* initialize RTR msg */
  688. if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) {
  689. ctrl_ird = IETF_NO_IRD_ORD;
  690. ctrl_ord = IETF_NO_IRD_ORD;
  691. } else {
  692. ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
  693. IETF_NO_IRD_ORD : cm_node->ird_size;
  694. ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
  695. IETF_NO_IRD_ORD : cm_node->ord_size;
  696. }
  697. ctrl_ird |= IETF_PEER_TO_PEER;
  698. switch (mpa_key) {
  699. case MPA_KEY_REQUEST:
  700. ctrl_ord |= IETF_RDMA0_WRITE;
  701. ctrl_ord |= IETF_RDMA0_READ;
  702. break;
  703. case MPA_KEY_REPLY:
  704. switch (cm_node->send_rdma0_op) {
  705. case SEND_RDMA_WRITE_ZERO:
  706. ctrl_ord |= IETF_RDMA0_WRITE;
  707. break;
  708. case SEND_RDMA_READ_ZERO:
  709. ctrl_ord |= IETF_RDMA0_READ;
  710. break;
  711. }
  712. break;
  713. default:
  714. break;
  715. }
  716. rtr_msg->ctrl_ird = htons(ctrl_ird);
  717. rtr_msg->ctrl_ord = htons(ctrl_ord);
  718. }
  719. /**
  720. * i40iw_cm_build_mpa_frame - build mpa frame for mpa version 1 or version 2
  721. * @cm_node: connection's node
  722. * @mpa: mpa: data buffer
  723. * @mpa_key: to do read0 or write0
  724. */
  725. static int i40iw_cm_build_mpa_frame(struct i40iw_cm_node *cm_node,
  726. struct i40iw_kmem_info *mpa,
  727. u8 mpa_key)
  728. {
  729. int hdr_len = 0;
  730. switch (cm_node->mpa_frame_rev) {
  731. case IETF_MPA_V1:
  732. hdr_len = sizeof(struct ietf_mpa_v1);
  733. i40iw_build_mpa_v1(cm_node, mpa->addr, mpa_key);
  734. break;
  735. case IETF_MPA_V2:
  736. hdr_len = sizeof(struct ietf_mpa_v2);
  737. i40iw_build_mpa_v2(cm_node, mpa->addr, mpa_key);
  738. break;
  739. default:
  740. break;
  741. }
  742. return hdr_len;
  743. }
  744. /**
  745. * i40iw_send_mpa_request - active node send mpa request to passive node
  746. * @cm_node: connection's node
  747. */
  748. static int i40iw_send_mpa_request(struct i40iw_cm_node *cm_node)
  749. {
  750. struct i40iw_puda_buf *sqbuf;
  751. if (!cm_node) {
  752. i40iw_pr_err("cm_node == NULL\n");
  753. return -1;
  754. }
  755. cm_node->mpa_hdr.addr = &cm_node->mpa_frame;
  756. cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node,
  757. &cm_node->mpa_hdr,
  758. MPA_KEY_REQUEST);
  759. if (!cm_node->mpa_hdr.size) {
  760. i40iw_pr_err("mpa size = %d\n", cm_node->mpa_hdr.size);
  761. return -1;
  762. }
  763. sqbuf = i40iw_form_cm_frame(cm_node,
  764. NULL,
  765. &cm_node->mpa_hdr,
  766. &cm_node->pdata,
  767. SET_ACK);
  768. if (!sqbuf) {
  769. i40iw_pr_err("sq_buf == NULL\n");
  770. return -1;
  771. }
  772. return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
  773. }
  774. /**
  775. * i40iw_send_mpa_reject -
  776. * @cm_node: connection's node
  777. * @pdata: reject data for connection
  778. * @plen: length of reject data
  779. */
  780. static int i40iw_send_mpa_reject(struct i40iw_cm_node *cm_node,
  781. const void *pdata,
  782. u8 plen)
  783. {
  784. struct i40iw_puda_buf *sqbuf;
  785. struct i40iw_kmem_info priv_info;
  786. cm_node->mpa_hdr.addr = &cm_node->mpa_frame;
  787. cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node,
  788. &cm_node->mpa_hdr,
  789. MPA_KEY_REPLY);
  790. cm_node->mpa_frame.flags |= IETF_MPA_FLAGS_REJECT;
  791. priv_info.addr = (void *)pdata;
  792. priv_info.size = plen;
  793. sqbuf = i40iw_form_cm_frame(cm_node,
  794. NULL,
  795. &cm_node->mpa_hdr,
  796. &priv_info,
  797. SET_ACK | SET_FIN);
  798. if (!sqbuf) {
  799. i40iw_pr_err("no sqbuf\n");
  800. return -ENOMEM;
  801. }
  802. cm_node->state = I40IW_CM_STATE_FIN_WAIT1;
  803. return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
  804. }
  805. /**
  806. * recv_mpa - process an IETF MPA frame
  807. * @cm_node: connection's node
  808. * @buffer: Data pointer
  809. * @type: to return accept or reject
  810. * @len: Len of mpa buffer
  811. */
  812. static int i40iw_parse_mpa(struct i40iw_cm_node *cm_node, u8 *buffer, u32 *type, u32 len)
  813. {
  814. struct ietf_mpa_v1 *mpa_frame;
  815. struct ietf_mpa_v2 *mpa_v2_frame;
  816. struct ietf_rtr_msg *rtr_msg;
  817. int mpa_hdr_len;
  818. int priv_data_len;
  819. *type = I40IW_MPA_REQUEST_ACCEPT;
  820. if (len < sizeof(struct ietf_mpa_v1)) {
  821. i40iw_pr_err("ietf buffer small (%x)\n", len);
  822. return -1;
  823. }
  824. mpa_frame = (struct ietf_mpa_v1 *)buffer;
  825. mpa_hdr_len = sizeof(struct ietf_mpa_v1);
  826. priv_data_len = ntohs(mpa_frame->priv_data_len);
  827. if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {
  828. i40iw_pr_err("large pri_data %d\n", priv_data_len);
  829. return -1;
  830. }
  831. if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) {
  832. i40iw_pr_err("unsupported mpa rev = %d\n", mpa_frame->rev);
  833. return -1;
  834. }
  835. if (mpa_frame->rev > cm_node->mpa_frame_rev) {
  836. i40iw_pr_err("rev %d\n", mpa_frame->rev);
  837. return -1;
  838. }
  839. cm_node->mpa_frame_rev = mpa_frame->rev;
  840. if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) {
  841. if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) {
  842. i40iw_pr_err("Unexpected MPA Key received\n");
  843. return -1;
  844. }
  845. } else {
  846. if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE)) {
  847. i40iw_pr_err("Unexpected MPA Key received\n");
  848. return -1;
  849. }
  850. }
  851. if (priv_data_len + mpa_hdr_len > len) {
  852. i40iw_pr_err("ietf buffer len(%x + %x != %x)\n",
  853. priv_data_len, mpa_hdr_len, len);
  854. return -1;
  855. }
  856. if (len > MAX_CM_BUFFER) {
  857. i40iw_pr_err("ietf buffer large len = %d\n", len);
  858. return -1;
  859. }
  860. switch (mpa_frame->rev) {
  861. case IETF_MPA_V2:{
  862. u16 ird_size;
  863. u16 ord_size;
  864. u16 ctrl_ord;
  865. u16 ctrl_ird;
  866. mpa_v2_frame = (struct ietf_mpa_v2 *)buffer;
  867. mpa_hdr_len += IETF_RTR_MSG_SIZE;
  868. rtr_msg = &mpa_v2_frame->rtr_msg;
  869. /* parse rtr message */
  870. ctrl_ord = ntohs(rtr_msg->ctrl_ord);
  871. ctrl_ird = ntohs(rtr_msg->ctrl_ird);
  872. ird_size = ctrl_ird & IETF_NO_IRD_ORD;
  873. ord_size = ctrl_ord & IETF_NO_IRD_ORD;
  874. if (!(ctrl_ird & IETF_PEER_TO_PEER))
  875. return -1;
  876. if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD) {
  877. cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD;
  878. goto negotiate_done;
  879. }
  880. if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) {
  881. /* responder */
  882. if (!ord_size && (ctrl_ord & IETF_RDMA0_READ))
  883. cm_node->ird_size = 1;
  884. if (cm_node->ord_size > ird_size)
  885. cm_node->ord_size = ird_size;
  886. } else {
  887. /* initiator */
  888. if (!ird_size && (ctrl_ord & IETF_RDMA0_READ))
  889. return -1;
  890. if (cm_node->ord_size > ird_size)
  891. cm_node->ord_size = ird_size;
  892. if (cm_node->ird_size < ord_size)
  893. /* no resources available */
  894. return -1;
  895. }
  896. negotiate_done:
  897. if (ctrl_ord & IETF_RDMA0_READ)
  898. cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
  899. else if (ctrl_ord & IETF_RDMA0_WRITE)
  900. cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;
  901. else /* Not supported RDMA0 operation */
  902. return -1;
  903. i40iw_debug(cm_node->dev, I40IW_DEBUG_CM,
  904. "MPAV2: Negotiated ORD: %d, IRD: %d\n",
  905. cm_node->ord_size, cm_node->ird_size);
  906. break;
  907. }
  908. break;
  909. case IETF_MPA_V1:
  910. default:
  911. break;
  912. }
  913. memcpy(cm_node->pdata_buf, buffer + mpa_hdr_len, priv_data_len);
  914. cm_node->pdata.size = priv_data_len;
  915. if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT)
  916. *type = I40IW_MPA_REQUEST_REJECT;
  917. if (mpa_frame->flags & IETF_MPA_FLAGS_MARKERS)
  918. cm_node->snd_mark_en = true;
  919. return 0;
  920. }
  921. /**
  922. * i40iw_schedule_cm_timer
  923. * @@cm_node: connection's node
  924. * @sqbuf: buffer to send
  925. * @type: if it es send ot close
  926. * @send_retrans: if rexmits to be done
  927. * @close_when_complete: is cm_node to be removed
  928. *
  929. * note - cm_node needs to be protected before calling this. Encase in:
  930. * i40iw_rem_ref_cm_node(cm_core, cm_node);
  931. * i40iw_schedule_cm_timer(...)
  932. * atomic_inc(&cm_node->ref_count);
  933. */
  934. int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
  935. struct i40iw_puda_buf *sqbuf,
  936. enum i40iw_timer_type type,
  937. int send_retrans,
  938. int close_when_complete)
  939. {
  940. struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
  941. struct i40iw_cm_core *cm_core = cm_node->cm_core;
  942. struct i40iw_timer_entry *new_send;
  943. int ret = 0;
  944. u32 was_timer_set;
  945. unsigned long flags;
  946. new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
  947. if (!new_send) {
  948. i40iw_free_sqbuf(vsi, (void *)sqbuf);
  949. return -ENOMEM;
  950. }
  951. new_send->retrycount = I40IW_DEFAULT_RETRYS;
  952. new_send->retranscount = I40IW_DEFAULT_RETRANS;
  953. new_send->sqbuf = sqbuf;
  954. new_send->timetosend = jiffies;
  955. new_send->type = type;
  956. new_send->send_retrans = send_retrans;
  957. new_send->close_when_complete = close_when_complete;
  958. if (type == I40IW_TIMER_TYPE_CLOSE) {
  959. new_send->timetosend += (HZ / 10);
  960. if (cm_node->close_entry) {
  961. kfree(new_send);
  962. i40iw_free_sqbuf(vsi, (void *)sqbuf);
  963. i40iw_pr_err("already close entry\n");
  964. return -EINVAL;
  965. }
  966. cm_node->close_entry = new_send;
  967. }
  968. if (type == I40IW_TIMER_TYPE_SEND) {
  969. spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
  970. cm_node->send_entry = new_send;
  971. atomic_inc(&cm_node->ref_count);
  972. spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
  973. new_send->timetosend = jiffies + I40IW_RETRY_TIMEOUT;
  974. atomic_inc(&sqbuf->refcount);
  975. i40iw_puda_send_buf(vsi->ilq, sqbuf);
  976. if (!send_retrans) {
  977. i40iw_cleanup_retrans_entry(cm_node);
  978. if (close_when_complete)
  979. i40iw_rem_ref_cm_node(cm_node);
  980. return ret;
  981. }
  982. }
  983. spin_lock_irqsave(&cm_core->ht_lock, flags);
  984. was_timer_set = timer_pending(&cm_core->tcp_timer);
  985. if (!was_timer_set) {
  986. cm_core->tcp_timer.expires = new_send->timetosend;
  987. add_timer(&cm_core->tcp_timer);
  988. }
  989. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  990. return ret;
  991. }
  992. /**
  993. * i40iw_retrans_expired - Could not rexmit the packet
  994. * @cm_node: connection's node
  995. */
  996. static void i40iw_retrans_expired(struct i40iw_cm_node *cm_node)
  997. {
  998. struct iw_cm_id *cm_id = cm_node->cm_id;
  999. enum i40iw_cm_node_state state = cm_node->state;
  1000. cm_node->state = I40IW_CM_STATE_CLOSED;
  1001. switch (state) {
  1002. case I40IW_CM_STATE_SYN_RCVD:
  1003. case I40IW_CM_STATE_CLOSING:
  1004. i40iw_rem_ref_cm_node(cm_node);
  1005. break;
  1006. case I40IW_CM_STATE_FIN_WAIT1:
  1007. case I40IW_CM_STATE_LAST_ACK:
  1008. if (cm_node->cm_id)
  1009. cm_id->rem_ref(cm_id);
  1010. i40iw_send_reset(cm_node);
  1011. break;
  1012. default:
  1013. atomic_inc(&cm_node->ref_count);
  1014. i40iw_send_reset(cm_node);
  1015. i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
  1016. break;
  1017. }
  1018. }
  1019. /**
  1020. * i40iw_handle_close_entry - for handling retry/timeouts
  1021. * @cm_node: connection's node
  1022. * @rem_node: flag for remove cm_node
  1023. */
  1024. static void i40iw_handle_close_entry(struct i40iw_cm_node *cm_node, u32 rem_node)
  1025. {
  1026. struct i40iw_timer_entry *close_entry = cm_node->close_entry;
  1027. struct iw_cm_id *cm_id = cm_node->cm_id;
  1028. struct i40iw_qp *iwqp;
  1029. unsigned long flags;
  1030. if (!close_entry)
  1031. return;
  1032. iwqp = (struct i40iw_qp *)close_entry->sqbuf;
  1033. if (iwqp) {
  1034. spin_lock_irqsave(&iwqp->lock, flags);
  1035. if (iwqp->cm_id) {
  1036. iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
  1037. iwqp->hw_iwarp_state = I40IW_QP_STATE_ERROR;
  1038. iwqp->last_aeq = I40IW_AE_RESET_SENT;
  1039. iwqp->ibqp_state = IB_QPS_ERR;
  1040. spin_unlock_irqrestore(&iwqp->lock, flags);
  1041. i40iw_cm_disconn(iwqp);
  1042. } else {
  1043. spin_unlock_irqrestore(&iwqp->lock, flags);
  1044. }
  1045. } else if (rem_node) {
  1046. /* TIME_WAIT state */
  1047. i40iw_rem_ref_cm_node(cm_node);
  1048. }
  1049. if (cm_id)
  1050. cm_id->rem_ref(cm_id);
  1051. kfree(close_entry);
  1052. cm_node->close_entry = NULL;
  1053. }
  1054. /**
  1055. * i40iw_cm_timer_tick - system's timer expired callback
  1056. * @pass: Pointing to cm_core
  1057. */
  1058. static void i40iw_cm_timer_tick(unsigned long pass)
  1059. {
  1060. unsigned long nexttimeout = jiffies + I40IW_LONG_TIME;
  1061. struct i40iw_cm_node *cm_node;
  1062. struct i40iw_timer_entry *send_entry, *close_entry;
  1063. struct list_head *list_core_temp;
  1064. struct i40iw_sc_vsi *vsi;
  1065. struct list_head *list_node;
  1066. struct i40iw_cm_core *cm_core = (struct i40iw_cm_core *)pass;
  1067. u32 settimer = 0;
  1068. unsigned long timetosend;
  1069. struct i40iw_sc_dev *dev;
  1070. unsigned long flags;
  1071. struct list_head timer_list;
  1072. INIT_LIST_HEAD(&timer_list);
  1073. spin_lock_irqsave(&cm_core->ht_lock, flags);
  1074. list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
  1075. cm_node = container_of(list_node, struct i40iw_cm_node, list);
  1076. if (cm_node->close_entry || cm_node->send_entry) {
  1077. atomic_inc(&cm_node->ref_count);
  1078. list_add(&cm_node->timer_entry, &timer_list);
  1079. }
  1080. }
  1081. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  1082. list_for_each_safe(list_node, list_core_temp, &timer_list) {
  1083. cm_node = container_of(list_node,
  1084. struct i40iw_cm_node,
  1085. timer_entry);
  1086. close_entry = cm_node->close_entry;
  1087. if (close_entry) {
  1088. if (time_after(close_entry->timetosend, jiffies)) {
  1089. if (nexttimeout > close_entry->timetosend ||
  1090. !settimer) {
  1091. nexttimeout = close_entry->timetosend;
  1092. settimer = 1;
  1093. }
  1094. } else {
  1095. i40iw_handle_close_entry(cm_node, 1);
  1096. }
  1097. }
  1098. spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
  1099. send_entry = cm_node->send_entry;
  1100. if (!send_entry)
  1101. goto done;
  1102. if (time_after(send_entry->timetosend, jiffies)) {
  1103. if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
  1104. if ((nexttimeout > send_entry->timetosend) ||
  1105. !settimer) {
  1106. nexttimeout = send_entry->timetosend;
  1107. settimer = 1;
  1108. }
  1109. } else {
  1110. i40iw_free_retrans_entry(cm_node);
  1111. }
  1112. goto done;
  1113. }
  1114. if ((cm_node->state == I40IW_CM_STATE_OFFLOADED) ||
  1115. (cm_node->state == I40IW_CM_STATE_CLOSED)) {
  1116. i40iw_free_retrans_entry(cm_node);
  1117. goto done;
  1118. }
  1119. if (!send_entry->retranscount || !send_entry->retrycount) {
  1120. i40iw_free_retrans_entry(cm_node);
  1121. spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
  1122. i40iw_retrans_expired(cm_node);
  1123. cm_node->state = I40IW_CM_STATE_CLOSED;
  1124. spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
  1125. goto done;
  1126. }
  1127. cm_node->cm_core->stats_pkt_retrans++;
  1128. spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
  1129. vsi = &cm_node->iwdev->vsi;
  1130. dev = cm_node->dev;
  1131. atomic_inc(&send_entry->sqbuf->refcount);
  1132. i40iw_puda_send_buf(vsi->ilq, send_entry->sqbuf);
  1133. spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
  1134. if (send_entry->send_retrans) {
  1135. send_entry->retranscount--;
  1136. timetosend = (I40IW_RETRY_TIMEOUT <<
  1137. (I40IW_DEFAULT_RETRANS -
  1138. send_entry->retranscount));
  1139. send_entry->timetosend = jiffies +
  1140. min(timetosend, I40IW_MAX_TIMEOUT);
  1141. if (nexttimeout > send_entry->timetosend || !settimer) {
  1142. nexttimeout = send_entry->timetosend;
  1143. settimer = 1;
  1144. }
  1145. } else {
  1146. int close_when_complete;
  1147. close_when_complete = send_entry->close_when_complete;
  1148. i40iw_debug(cm_node->dev,
  1149. I40IW_DEBUG_CM,
  1150. "cm_node=%p state=%d\n",
  1151. cm_node,
  1152. cm_node->state);
  1153. i40iw_free_retrans_entry(cm_node);
  1154. if (close_when_complete)
  1155. i40iw_rem_ref_cm_node(cm_node);
  1156. }
  1157. done:
  1158. spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
  1159. i40iw_rem_ref_cm_node(cm_node);
  1160. }
  1161. if (settimer) {
  1162. spin_lock_irqsave(&cm_core->ht_lock, flags);
  1163. if (!timer_pending(&cm_core->tcp_timer)) {
  1164. cm_core->tcp_timer.expires = nexttimeout;
  1165. add_timer(&cm_core->tcp_timer);
  1166. }
  1167. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  1168. }
  1169. }
  1170. /**
  1171. * i40iw_send_syn - send SYN packet
  1172. * @cm_node: connection's node
  1173. * @sendack: flag to set ACK bit or not
  1174. */
  1175. int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack)
  1176. {
  1177. struct i40iw_puda_buf *sqbuf;
  1178. int flags = SET_SYN;
  1179. char optionsbuffer[sizeof(struct option_mss) +
  1180. sizeof(struct option_windowscale) +
  1181. sizeof(struct option_base) + TCP_OPTIONS_PADDING];
  1182. struct i40iw_kmem_info opts;
  1183. int optionssize = 0;
  1184. /* Sending MSS option */
  1185. union all_known_options *options;
  1186. opts.addr = optionsbuffer;
  1187. if (!cm_node) {
  1188. i40iw_pr_err("no cm_node\n");
  1189. return -EINVAL;
  1190. }
  1191. options = (union all_known_options *)&optionsbuffer[optionssize];
  1192. options->as_mss.optionnum = OPTION_NUMBER_MSS;
  1193. options->as_mss.length = sizeof(struct option_mss);
  1194. options->as_mss.mss = htons(cm_node->tcp_cntxt.mss);
  1195. optionssize += sizeof(struct option_mss);
  1196. options = (union all_known_options *)&optionsbuffer[optionssize];
  1197. options->as_windowscale.optionnum = OPTION_NUMBER_WINDOW_SCALE;
  1198. options->as_windowscale.length = sizeof(struct option_windowscale);
  1199. options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;
  1200. optionssize += sizeof(struct option_windowscale);
  1201. options = (union all_known_options *)&optionsbuffer[optionssize];
  1202. options->as_end = OPTION_NUMBER_END;
  1203. optionssize += 1;
  1204. if (sendack)
  1205. flags |= SET_ACK;
  1206. opts.size = optionssize;
  1207. sqbuf = i40iw_form_cm_frame(cm_node, &opts, NULL, NULL, flags);
  1208. if (!sqbuf) {
  1209. i40iw_pr_err("no sqbuf\n");
  1210. return -1;
  1211. }
  1212. return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
  1213. }
  1214. /**
  1215. * i40iw_send_ack - Send ACK packet
  1216. * @cm_node: connection's node
  1217. */
  1218. static void i40iw_send_ack(struct i40iw_cm_node *cm_node)
  1219. {
  1220. struct i40iw_puda_buf *sqbuf;
  1221. struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
  1222. sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK);
  1223. if (sqbuf)
  1224. i40iw_puda_send_buf(vsi->ilq, sqbuf);
  1225. else
  1226. i40iw_pr_err("no sqbuf\n");
  1227. }
  1228. /**
  1229. * i40iw_send_fin - Send FIN pkt
  1230. * @cm_node: connection's node
  1231. */
  1232. static int i40iw_send_fin(struct i40iw_cm_node *cm_node)
  1233. {
  1234. struct i40iw_puda_buf *sqbuf;
  1235. sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK | SET_FIN);
  1236. if (!sqbuf) {
  1237. i40iw_pr_err("no sqbuf\n");
  1238. return -1;
  1239. }
  1240. return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
  1241. }
  1242. /**
  1243. * i40iw_find_node - find a cm node that matches the reference cm node
  1244. * @cm_core: cm's core
  1245. * @rem_port: remote tcp port num
  1246. * @rem_addr: remote ip addr
  1247. * @loc_port: local tcp port num
  1248. * @loc_addr: loc ip addr
  1249. * @add_refcnt: flag to increment refcount of cm_node
  1250. */
  1251. struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core,
  1252. u16 rem_port,
  1253. u32 *rem_addr,
  1254. u16 loc_port,
  1255. u32 *loc_addr,
  1256. bool add_refcnt)
  1257. {
  1258. struct list_head *hte;
  1259. struct i40iw_cm_node *cm_node;
  1260. unsigned long flags;
  1261. hte = &cm_core->connected_nodes;
  1262. /* walk list and find cm_node associated with this session ID */
  1263. spin_lock_irqsave(&cm_core->ht_lock, flags);
  1264. list_for_each_entry(cm_node, hte, list) {
  1265. if (!memcmp(cm_node->loc_addr, loc_addr, sizeof(cm_node->loc_addr)) &&
  1266. (cm_node->loc_port == loc_port) &&
  1267. !memcmp(cm_node->rem_addr, rem_addr, sizeof(cm_node->rem_addr)) &&
  1268. (cm_node->rem_port == rem_port)) {
  1269. if (add_refcnt)
  1270. atomic_inc(&cm_node->ref_count);
  1271. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  1272. return cm_node;
  1273. }
  1274. }
  1275. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  1276. /* no owner node */
  1277. return NULL;
  1278. }
  1279. /**
  1280. * i40iw_find_listener - find a cm node listening on this addr-port pair
  1281. * @cm_core: cm's core
  1282. * @dst_port: listener tcp port num
  1283. * @dst_addr: listener ip addr
  1284. * @listener_state: state to match with listen node's
  1285. */
  1286. static struct i40iw_cm_listener *i40iw_find_listener(
  1287. struct i40iw_cm_core *cm_core,
  1288. u32 *dst_addr,
  1289. u16 dst_port,
  1290. u16 vlan_id,
  1291. enum i40iw_cm_listener_state
  1292. listener_state)
  1293. {
  1294. struct i40iw_cm_listener *listen_node;
  1295. static const u32 ip_zero[4] = { 0, 0, 0, 0 };
  1296. u32 listen_addr[4];
  1297. u16 listen_port;
  1298. unsigned long flags;
  1299. /* walk list and find cm_node associated with this session ID */
  1300. spin_lock_irqsave(&cm_core->listen_list_lock, flags);
  1301. list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
  1302. memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
  1303. listen_port = listen_node->loc_port;
  1304. /* compare node pair, return node handle if a match */
  1305. if ((!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) ||
  1306. !memcmp(listen_addr, ip_zero, sizeof(listen_addr))) &&
  1307. (listen_port == dst_port) &&
  1308. (listener_state & listen_node->listener_state)) {
  1309. atomic_inc(&listen_node->ref_count);
  1310. spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
  1311. return listen_node;
  1312. }
  1313. }
  1314. spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
  1315. return NULL;
  1316. }
  1317. /**
  1318. * i40iw_add_hte_node - add a cm node to the hash table
  1319. * @cm_core: cm's core
  1320. * @cm_node: connection's node
  1321. */
  1322. static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core,
  1323. struct i40iw_cm_node *cm_node)
  1324. {
  1325. struct list_head *hte;
  1326. unsigned long flags;
  1327. if (!cm_node || !cm_core) {
  1328. i40iw_pr_err("cm_node or cm_core == NULL\n");
  1329. return;
  1330. }
  1331. spin_lock_irqsave(&cm_core->ht_lock, flags);
  1332. /* get a handle on the hash table element (list head for this slot) */
  1333. hte = &cm_core->connected_nodes;
  1334. list_add_tail(&cm_node->list, hte);
  1335. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  1336. }
  1337. /**
  1338. * listen_port_in_use - determine if port is in use
  1339. * @port: Listen port number
  1340. */
  1341. static bool i40iw_listen_port_in_use(struct i40iw_cm_core *cm_core, u16 port)
  1342. {
  1343. struct i40iw_cm_listener *listen_node;
  1344. unsigned long flags;
  1345. bool ret = false;
  1346. spin_lock_irqsave(&cm_core->listen_list_lock, flags);
  1347. list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
  1348. if (listen_node->loc_port == port) {
  1349. ret = true;
  1350. break;
  1351. }
  1352. }
  1353. spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
  1354. return ret;
  1355. }
  1356. /**
  1357. * i40iw_del_multiple_qhash - Remove qhash and child listens
  1358. * @iwdev: iWarp device
  1359. * @cm_info: CM info for parent listen node
  1360. * @cm_parent_listen_node: The parent listen node
  1361. */
  1362. static enum i40iw_status_code i40iw_del_multiple_qhash(
  1363. struct i40iw_device *iwdev,
  1364. struct i40iw_cm_info *cm_info,
  1365. struct i40iw_cm_listener *cm_parent_listen_node)
  1366. {
  1367. struct i40iw_cm_listener *child_listen_node;
  1368. enum i40iw_status_code ret = I40IW_ERR_CONFIG;
  1369. struct list_head *pos, *tpos;
  1370. unsigned long flags;
  1371. spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
  1372. list_for_each_safe(pos, tpos, &cm_parent_listen_node->child_listen_list) {
  1373. child_listen_node = list_entry(pos, struct i40iw_cm_listener, child_listen_list);
  1374. if (child_listen_node->ipv4)
  1375. i40iw_debug(&iwdev->sc_dev,
  1376. I40IW_DEBUG_CM,
  1377. "removing child listen for IP=%pI4, port=%d, vlan=%d\n",
  1378. child_listen_node->loc_addr,
  1379. child_listen_node->loc_port,
  1380. child_listen_node->vlan_id);
  1381. else
  1382. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
  1383. "removing child listen for IP=%pI6, port=%d, vlan=%d\n",
  1384. child_listen_node->loc_addr,
  1385. child_listen_node->loc_port,
  1386. child_listen_node->vlan_id);
  1387. list_del(pos);
  1388. memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
  1389. sizeof(cm_info->loc_addr));
  1390. cm_info->vlan_id = child_listen_node->vlan_id;
  1391. if (child_listen_node->qhash_set) {
  1392. ret = i40iw_manage_qhash(iwdev, cm_info,
  1393. I40IW_QHASH_TYPE_TCP_SYN,
  1394. I40IW_QHASH_MANAGE_TYPE_DELETE,
  1395. NULL, false);
  1396. child_listen_node->qhash_set = false;
  1397. } else {
  1398. ret = I40IW_SUCCESS;
  1399. }
  1400. i40iw_debug(&iwdev->sc_dev,
  1401. I40IW_DEBUG_CM,
  1402. "freed pointer = %p\n",
  1403. child_listen_node);
  1404. kfree(child_listen_node);
  1405. cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
  1406. }
  1407. spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
  1408. return ret;
  1409. }
  1410. /**
  1411. * i40iw_netdev_vlan_ipv6 - Gets the netdev and mac
  1412. * @addr: local IPv6 address
  1413. * @vlan_id: vlan id for the given IPv6 address
  1414. * @mac: mac address for the given IPv6 address
  1415. *
  1416. * Returns the net_device of the IPv6 address and also sets the
  1417. * vlan id and mac for that address.
  1418. */
  1419. static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
  1420. {
  1421. struct net_device *ip_dev = NULL;
  1422. struct in6_addr laddr6;
  1423. if (!IS_ENABLED(CONFIG_IPV6))
  1424. return NULL;
  1425. i40iw_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
  1426. if (vlan_id)
  1427. *vlan_id = I40IW_NO_VLAN;
  1428. if (mac)
  1429. eth_zero_addr(mac);
  1430. rcu_read_lock();
  1431. for_each_netdev_rcu(&init_net, ip_dev) {
  1432. if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) {
  1433. if (vlan_id)
  1434. *vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
  1435. if (ip_dev->dev_addr && mac)
  1436. ether_addr_copy(mac, ip_dev->dev_addr);
  1437. break;
  1438. }
  1439. }
  1440. rcu_read_unlock();
  1441. return ip_dev;
  1442. }
  1443. /**
  1444. * i40iw_get_vlan_ipv4 - Returns the vlan_id for IPv4 address
  1445. * @addr: local IPv4 address
  1446. */
  1447. static u16 i40iw_get_vlan_ipv4(u32 *addr)
  1448. {
  1449. struct net_device *netdev;
  1450. u16 vlan_id = I40IW_NO_VLAN;
  1451. netdev = ip_dev_find(&init_net, htonl(addr[0]));
  1452. if (netdev) {
  1453. vlan_id = rdma_vlan_dev_vlan_id(netdev);
  1454. dev_put(netdev);
  1455. }
  1456. return vlan_id;
  1457. }
  1458. /**
  1459. * i40iw_add_mqh_6 - Adds multiple qhashes for IPv6
  1460. * @iwdev: iWarp device
  1461. * @cm_info: CM info for parent listen node
  1462. * @cm_parent_listen_node: The parent listen node
  1463. *
  1464. * Adds a qhash and a child listen node for every IPv6 address
  1465. * on the adapter and adds the associated qhash filter
  1466. */
  1467. static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
  1468. struct i40iw_cm_info *cm_info,
  1469. struct i40iw_cm_listener *cm_parent_listen_node)
  1470. {
  1471. struct net_device *ip_dev;
  1472. struct inet6_dev *idev;
  1473. struct inet6_ifaddr *ifp, *tmp;
  1474. enum i40iw_status_code ret = 0;
  1475. struct i40iw_cm_listener *child_listen_node;
  1476. unsigned long flags;
  1477. rtnl_lock();
  1478. for_each_netdev_rcu(&init_net, ip_dev) {
  1479. if ((((rdma_vlan_dev_vlan_id(ip_dev) < I40IW_NO_VLAN) &&
  1480. (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
  1481. (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
  1482. idev = __in6_dev_get(ip_dev);
  1483. if (!idev) {
  1484. i40iw_pr_err("idev == NULL\n");
  1485. break;
  1486. }
  1487. list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
  1488. i40iw_debug(&iwdev->sc_dev,
  1489. I40IW_DEBUG_CM,
  1490. "IP=%pI6, vlan_id=%d, MAC=%pM\n",
  1491. &ifp->addr,
  1492. rdma_vlan_dev_vlan_id(ip_dev),
  1493. ip_dev->dev_addr);
  1494. child_listen_node =
  1495. kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
  1496. i40iw_debug(&iwdev->sc_dev,
  1497. I40IW_DEBUG_CM,
  1498. "Allocating child listener %p\n",
  1499. child_listen_node);
  1500. if (!child_listen_node) {
  1501. ret = I40IW_ERR_NO_MEMORY;
  1502. goto exit;
  1503. }
  1504. cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
  1505. cm_parent_listen_node->vlan_id = cm_info->vlan_id;
  1506. memcpy(child_listen_node, cm_parent_listen_node,
  1507. sizeof(*child_listen_node));
  1508. i40iw_copy_ip_ntohl(child_listen_node->loc_addr,
  1509. ifp->addr.in6_u.u6_addr32);
  1510. memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
  1511. sizeof(cm_info->loc_addr));
  1512. ret = i40iw_manage_qhash(iwdev, cm_info,
  1513. I40IW_QHASH_TYPE_TCP_SYN,
  1514. I40IW_QHASH_MANAGE_TYPE_ADD,
  1515. NULL, true);
  1516. if (!ret) {
  1517. child_listen_node->qhash_set = true;
  1518. spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
  1519. list_add(&child_listen_node->child_listen_list,
  1520. &cm_parent_listen_node->child_listen_list);
  1521. spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
  1522. cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
  1523. } else {
  1524. kfree(child_listen_node);
  1525. }
  1526. }
  1527. }
  1528. }
  1529. exit:
  1530. rtnl_unlock();
  1531. return ret;
  1532. }
  1533. /**
  1534. * i40iw_add_mqh_4 - Adds multiple qhashes for IPv4
  1535. * @iwdev: iWarp device
  1536. * @cm_info: CM info for parent listen node
  1537. * @cm_parent_listen_node: The parent listen node
  1538. *
  1539. * Adds a qhash and a child listen node for every IPv4 address
  1540. * on the adapter and adds the associated qhash filter
  1541. */
  1542. static enum i40iw_status_code i40iw_add_mqh_4(
  1543. struct i40iw_device *iwdev,
  1544. struct i40iw_cm_info *cm_info,
  1545. struct i40iw_cm_listener *cm_parent_listen_node)
  1546. {
  1547. struct net_device *dev;
  1548. struct in_device *idev;
  1549. struct i40iw_cm_listener *child_listen_node;
  1550. enum i40iw_status_code ret = 0;
  1551. unsigned long flags;
  1552. rtnl_lock();
  1553. for_each_netdev(&init_net, dev) {
  1554. if ((((rdma_vlan_dev_vlan_id(dev) < I40IW_NO_VLAN) &&
  1555. (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
  1556. (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
  1557. idev = in_dev_get(dev);
  1558. for_ifa(idev) {
  1559. i40iw_debug(&iwdev->sc_dev,
  1560. I40IW_DEBUG_CM,
  1561. "Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
  1562. &ifa->ifa_address,
  1563. rdma_vlan_dev_vlan_id(dev),
  1564. dev->dev_addr);
  1565. child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
  1566. cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
  1567. i40iw_debug(&iwdev->sc_dev,
  1568. I40IW_DEBUG_CM,
  1569. "Allocating child listener %p\n",
  1570. child_listen_node);
  1571. if (!child_listen_node) {
  1572. in_dev_put(idev);
  1573. ret = I40IW_ERR_NO_MEMORY;
  1574. goto exit;
  1575. }
  1576. cm_info->vlan_id = rdma_vlan_dev_vlan_id(dev);
  1577. cm_parent_listen_node->vlan_id = cm_info->vlan_id;
  1578. memcpy(child_listen_node,
  1579. cm_parent_listen_node,
  1580. sizeof(*child_listen_node));
  1581. child_listen_node->loc_addr[0] = ntohl(ifa->ifa_address);
  1582. memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
  1583. sizeof(cm_info->loc_addr));
  1584. ret = i40iw_manage_qhash(iwdev,
  1585. cm_info,
  1586. I40IW_QHASH_TYPE_TCP_SYN,
  1587. I40IW_QHASH_MANAGE_TYPE_ADD,
  1588. NULL,
  1589. true);
  1590. if (!ret) {
  1591. child_listen_node->qhash_set = true;
  1592. spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
  1593. list_add(&child_listen_node->child_listen_list,
  1594. &cm_parent_listen_node->child_listen_list);
  1595. spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
  1596. } else {
  1597. kfree(child_listen_node);
  1598. cm_parent_listen_node->cm_core->stats_listen_nodes_created--;
  1599. }
  1600. }
  1601. endfor_ifa(idev);
  1602. in_dev_put(idev);
  1603. }
  1604. }
  1605. exit:
  1606. rtnl_unlock();
  1607. return ret;
  1608. }
  1609. /**
  1610. * i40iw_dec_refcnt_listen - delete listener and associated cm nodes
  1611. * @cm_core: cm's core
  1612. * @free_hanging_nodes: to free associated cm_nodes
  1613. * @apbvt_del: flag to delete the apbvt
  1614. */
  1615. static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
  1616. struct i40iw_cm_listener *listener,
  1617. int free_hanging_nodes, bool apbvt_del)
  1618. {
  1619. int ret = -EINVAL;
  1620. int err = 0;
  1621. struct list_head *list_pos;
  1622. struct list_head *list_temp;
  1623. struct i40iw_cm_node *cm_node;
  1624. struct list_head reset_list;
  1625. struct i40iw_cm_info nfo;
  1626. struct i40iw_cm_node *loopback;
  1627. enum i40iw_cm_node_state old_state;
  1628. unsigned long flags;
  1629. /* free non-accelerated child nodes for this listener */
  1630. INIT_LIST_HEAD(&reset_list);
  1631. if (free_hanging_nodes) {
  1632. spin_lock_irqsave(&cm_core->ht_lock, flags);
  1633. list_for_each_safe(list_pos, list_temp, &cm_core->connected_nodes) {
  1634. cm_node = container_of(list_pos, struct i40iw_cm_node, list);
  1635. if ((cm_node->listener == listener) && !cm_node->accelerated) {
  1636. atomic_inc(&cm_node->ref_count);
  1637. list_add(&cm_node->reset_entry, &reset_list);
  1638. }
  1639. }
  1640. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  1641. }
  1642. list_for_each_safe(list_pos, list_temp, &reset_list) {
  1643. cm_node = container_of(list_pos, struct i40iw_cm_node, reset_entry);
  1644. loopback = cm_node->loopbackpartner;
  1645. if (cm_node->state >= I40IW_CM_STATE_FIN_WAIT1) {
  1646. i40iw_rem_ref_cm_node(cm_node);
  1647. } else {
  1648. if (!loopback) {
  1649. i40iw_cleanup_retrans_entry(cm_node);
  1650. err = i40iw_send_reset(cm_node);
  1651. if (err) {
  1652. cm_node->state = I40IW_CM_STATE_CLOSED;
  1653. i40iw_pr_err("send reset\n");
  1654. } else {
  1655. old_state = cm_node->state;
  1656. cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED;
  1657. if (old_state != I40IW_CM_STATE_MPAREQ_RCVD)
  1658. i40iw_rem_ref_cm_node(cm_node);
  1659. }
  1660. } else {
  1661. struct i40iw_cm_event event;
  1662. event.cm_node = loopback;
  1663. memcpy(event.cm_info.rem_addr,
  1664. loopback->rem_addr, sizeof(event.cm_info.rem_addr));
  1665. memcpy(event.cm_info.loc_addr,
  1666. loopback->loc_addr, sizeof(event.cm_info.loc_addr));
  1667. event.cm_info.rem_port = loopback->rem_port;
  1668. event.cm_info.loc_port = loopback->loc_port;
  1669. event.cm_info.cm_id = loopback->cm_id;
  1670. event.cm_info.ipv4 = loopback->ipv4;
  1671. atomic_inc(&loopback->ref_count);
  1672. loopback->state = I40IW_CM_STATE_CLOSED;
  1673. i40iw_event_connect_error(&event);
  1674. cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED;
  1675. i40iw_rem_ref_cm_node(cm_node);
  1676. }
  1677. }
  1678. }
  1679. if (!atomic_dec_return(&listener->ref_count)) {
  1680. spin_lock_irqsave(&cm_core->listen_list_lock, flags);
  1681. list_del(&listener->list);
  1682. spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
  1683. if (listener->iwdev) {
  1684. if (apbvt_del && !i40iw_listen_port_in_use(cm_core, listener->loc_port))
  1685. i40iw_manage_apbvt(listener->iwdev,
  1686. listener->loc_port,
  1687. I40IW_MANAGE_APBVT_DEL);
  1688. memcpy(nfo.loc_addr, listener->loc_addr, sizeof(nfo.loc_addr));
  1689. nfo.loc_port = listener->loc_port;
  1690. nfo.ipv4 = listener->ipv4;
  1691. nfo.vlan_id = listener->vlan_id;
  1692. nfo.user_pri = listener->user_pri;
  1693. if (!list_empty(&listener->child_listen_list)) {
  1694. i40iw_del_multiple_qhash(listener->iwdev, &nfo, listener);
  1695. } else {
  1696. if (listener->qhash_set)
  1697. i40iw_manage_qhash(listener->iwdev,
  1698. &nfo,
  1699. I40IW_QHASH_TYPE_TCP_SYN,
  1700. I40IW_QHASH_MANAGE_TYPE_DELETE,
  1701. NULL,
  1702. false);
  1703. }
  1704. }
  1705. cm_core->stats_listen_destroyed++;
  1706. kfree(listener);
  1707. cm_core->stats_listen_nodes_destroyed++;
  1708. listener = NULL;
  1709. ret = 0;
  1710. }
  1711. if (listener) {
  1712. if (atomic_read(&listener->pend_accepts_cnt) > 0)
  1713. i40iw_debug(cm_core->dev,
  1714. I40IW_DEBUG_CM,
  1715. "%s: listener (%p) pending accepts=%u\n",
  1716. __func__,
  1717. listener,
  1718. atomic_read(&listener->pend_accepts_cnt));
  1719. }
  1720. return ret;
  1721. }
  1722. /**
  1723. * i40iw_cm_del_listen - delete a linstener
  1724. * @cm_core: cm's core
  1725. * @listener: passive connection's listener
  1726. * @apbvt_del: flag to delete apbvt
  1727. */
  1728. static int i40iw_cm_del_listen(struct i40iw_cm_core *cm_core,
  1729. struct i40iw_cm_listener *listener,
  1730. bool apbvt_del)
  1731. {
  1732. listener->listener_state = I40IW_CM_LISTENER_PASSIVE_STATE;
  1733. listener->cm_id = NULL; /* going to be destroyed pretty soon */
  1734. return i40iw_dec_refcnt_listen(cm_core, listener, 1, apbvt_del);
  1735. }
  1736. /**
  1737. * i40iw_addr_resolve_neigh - resolve neighbor address
  1738. * @iwdev: iwarp device structure
  1739. * @src_ip: local ip address
  1740. * @dst_ip: remote ip address
  1741. * @arpindex: if there is an arp entry
  1742. */
  1743. static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
  1744. u32 src_ip,
  1745. u32 dst_ip,
  1746. int arpindex)
  1747. {
  1748. struct rtable *rt;
  1749. struct neighbour *neigh;
  1750. int rc = arpindex;
  1751. struct net_device *netdev = iwdev->netdev;
  1752. __be32 dst_ipaddr = htonl(dst_ip);
  1753. __be32 src_ipaddr = htonl(src_ip);
  1754. rt = ip_route_output(&init_net, dst_ipaddr, src_ipaddr, 0, 0);
  1755. if (IS_ERR(rt)) {
  1756. i40iw_pr_err("ip_route_output\n");
  1757. return rc;
  1758. }
  1759. if (netif_is_bond_slave(netdev))
  1760. netdev = netdev_master_upper_dev_get(netdev);
  1761. neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
  1762. rcu_read_lock();
  1763. if (neigh) {
  1764. if (neigh->nud_state & NUD_VALID) {
  1765. if (arpindex >= 0) {
  1766. if (ether_addr_equal(iwdev->arp_table[arpindex].mac_addr,
  1767. neigh->ha))
  1768. /* Mac address same as arp table */
  1769. goto resolve_neigh_exit;
  1770. i40iw_manage_arp_cache(iwdev,
  1771. iwdev->arp_table[arpindex].mac_addr,
  1772. &dst_ip,
  1773. true,
  1774. I40IW_ARP_DELETE);
  1775. }
  1776. i40iw_manage_arp_cache(iwdev, neigh->ha, &dst_ip, true, I40IW_ARP_ADD);
  1777. rc = i40iw_arp_table(iwdev, &dst_ip, true, NULL, I40IW_ARP_RESOLVE);
  1778. } else {
  1779. neigh_event_send(neigh, NULL);
  1780. }
  1781. }
  1782. resolve_neigh_exit:
  1783. rcu_read_unlock();
  1784. if (neigh)
  1785. neigh_release(neigh);
  1786. ip_rt_put(rt);
  1787. return rc;
  1788. }
  1789. /**
  1790. * i40iw_get_dst_ipv6
  1791. */
  1792. static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
  1793. struct sockaddr_in6 *dst_addr)
  1794. {
  1795. struct dst_entry *dst;
  1796. struct flowi6 fl6;
  1797. memset(&fl6, 0, sizeof(fl6));
  1798. fl6.daddr = dst_addr->sin6_addr;
  1799. fl6.saddr = src_addr->sin6_addr;
  1800. if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
  1801. fl6.flowi6_oif = dst_addr->sin6_scope_id;
  1802. dst = ip6_route_output(&init_net, NULL, &fl6);
  1803. return dst;
  1804. }
  1805. /**
  1806. * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
  1807. * @iwdev: iwarp device structure
  1808. * @dst_ip: remote ip address
  1809. * @arpindex: if there is an arp entry
  1810. */
  1811. static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
  1812. u32 *src,
  1813. u32 *dest,
  1814. int arpindex)
  1815. {
  1816. struct neighbour *neigh;
  1817. int rc = arpindex;
  1818. struct net_device *netdev = iwdev->netdev;
  1819. struct dst_entry *dst;
  1820. struct sockaddr_in6 dst_addr;
  1821. struct sockaddr_in6 src_addr;
  1822. memset(&dst_addr, 0, sizeof(dst_addr));
  1823. dst_addr.sin6_family = AF_INET6;
  1824. i40iw_copy_ip_htonl(dst_addr.sin6_addr.in6_u.u6_addr32, dest);
  1825. memset(&src_addr, 0, sizeof(src_addr));
  1826. src_addr.sin6_family = AF_INET6;
  1827. i40iw_copy_ip_htonl(src_addr.sin6_addr.in6_u.u6_addr32, src);
  1828. dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr);
  1829. if (!dst || dst->error) {
  1830. if (dst) {
  1831. dst_release(dst);
  1832. i40iw_pr_err("ip6_route_output returned dst->error = %d\n",
  1833. dst->error);
  1834. }
  1835. return rc;
  1836. }
  1837. if (netif_is_bond_slave(netdev))
  1838. netdev = netdev_master_upper_dev_get(netdev);
  1839. neigh = dst_neigh_lookup(dst, &dst_addr);
  1840. rcu_read_lock();
  1841. if (neigh) {
  1842. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "dst_neigh_lookup MAC=%pM\n", neigh->ha);
  1843. if (neigh->nud_state & NUD_VALID) {
  1844. if (arpindex >= 0) {
  1845. if (ether_addr_equal
  1846. (iwdev->arp_table[arpindex].mac_addr,
  1847. neigh->ha)) {
  1848. /* Mac address same as in arp table */
  1849. goto resolve_neigh_exit6;
  1850. }
  1851. i40iw_manage_arp_cache(iwdev,
  1852. iwdev->arp_table[arpindex].mac_addr,
  1853. dest,
  1854. false,
  1855. I40IW_ARP_DELETE);
  1856. }
  1857. i40iw_manage_arp_cache(iwdev,
  1858. neigh->ha,
  1859. dest,
  1860. false,
  1861. I40IW_ARP_ADD);
  1862. rc = i40iw_arp_table(iwdev,
  1863. dest,
  1864. false,
  1865. NULL,
  1866. I40IW_ARP_RESOLVE);
  1867. } else {
  1868. neigh_event_send(neigh, NULL);
  1869. }
  1870. }
  1871. resolve_neigh_exit6:
  1872. rcu_read_unlock();
  1873. if (neigh)
  1874. neigh_release(neigh);
  1875. dst_release(dst);
  1876. return rc;
  1877. }
  1878. /**
  1879. * i40iw_ipv4_is_loopback - check if loopback
  1880. * @loc_addr: local addr to compare
  1881. * @rem_addr: remote address
  1882. */
  1883. static bool i40iw_ipv4_is_loopback(u32 loc_addr, u32 rem_addr)
  1884. {
  1885. return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr);
  1886. }
  1887. /**
  1888. * i40iw_ipv6_is_loopback - check if loopback
  1889. * @loc_addr: local addr to compare
  1890. * @rem_addr: remote address
  1891. */
  1892. static bool i40iw_ipv6_is_loopback(u32 *loc_addr, u32 *rem_addr)
  1893. {
  1894. struct in6_addr raddr6;
  1895. i40iw_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr);
  1896. return !memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6);
  1897. }
  1898. /**
  1899. * i40iw_make_cm_node - create a new instance of a cm node
  1900. * @cm_core: cm's core
  1901. * @iwdev: iwarp device structure
  1902. * @cm_info: quad info for connection
  1903. * @listener: passive connection's listener
  1904. */
  1905. static struct i40iw_cm_node *i40iw_make_cm_node(
  1906. struct i40iw_cm_core *cm_core,
  1907. struct i40iw_device *iwdev,
  1908. struct i40iw_cm_info *cm_info,
  1909. struct i40iw_cm_listener *listener)
  1910. {
  1911. struct i40iw_cm_node *cm_node;
  1912. struct timespec ts;
  1913. int oldarpindex;
  1914. int arpindex;
  1915. struct net_device *netdev = iwdev->netdev;
  1916. /* create an hte and cm_node for this instance */
  1917. cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
  1918. if (!cm_node)
  1919. return NULL;
  1920. /* set our node specific transport info */
  1921. cm_node->ipv4 = cm_info->ipv4;
  1922. cm_node->vlan_id = cm_info->vlan_id;
  1923. if ((cm_node->vlan_id == I40IW_NO_VLAN) && iwdev->dcb)
  1924. cm_node->vlan_id = 0;
  1925. cm_node->tos = cm_info->tos;
  1926. cm_node->user_pri = cm_info->user_pri;
  1927. if (listener) {
  1928. if (listener->tos != cm_info->tos)
  1929. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB,
  1930. "application TOS[%d] and remote client TOS[%d] mismatch\n",
  1931. listener->tos, cm_info->tos);
  1932. cm_node->tos = max(listener->tos, cm_info->tos);
  1933. cm_node->user_pri = rt_tos2priority(cm_node->tos);
  1934. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "listener: TOS:[%d] UP:[%d]\n",
  1935. cm_node->tos, cm_node->user_pri);
  1936. }
  1937. memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
  1938. memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr));
  1939. cm_node->loc_port = cm_info->loc_port;
  1940. cm_node->rem_port = cm_info->rem_port;
  1941. cm_node->mpa_frame_rev = iwdev->mpa_version;
  1942. cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
  1943. cm_node->ird_size = I40IW_MAX_IRD_SIZE;
  1944. cm_node->ord_size = I40IW_MAX_ORD_SIZE;
  1945. cm_node->listener = listener;
  1946. cm_node->cm_id = cm_info->cm_id;
  1947. ether_addr_copy(cm_node->loc_mac, netdev->dev_addr);
  1948. spin_lock_init(&cm_node->retrans_list_lock);
  1949. atomic_set(&cm_node->ref_count, 1);
  1950. /* associate our parent CM core */
  1951. cm_node->cm_core = cm_core;
  1952. cm_node->tcp_cntxt.loc_id = I40IW_CM_DEF_LOCAL_ID;
  1953. cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
  1954. cm_node->tcp_cntxt.rcv_wnd =
  1955. I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE;
  1956. ts = current_kernel_time();
  1957. cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec;
  1958. cm_node->tcp_cntxt.mss = iwdev->vsi.mss;
  1959. cm_node->iwdev = iwdev;
  1960. cm_node->dev = &iwdev->sc_dev;
  1961. if ((cm_node->ipv4 &&
  1962. i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
  1963. (!cm_node->ipv4 && i40iw_ipv6_is_loopback(cm_node->loc_addr,
  1964. cm_node->rem_addr))) {
  1965. arpindex = i40iw_arp_table(iwdev,
  1966. cm_node->rem_addr,
  1967. false,
  1968. NULL,
  1969. I40IW_ARP_RESOLVE);
  1970. } else {
  1971. oldarpindex = i40iw_arp_table(iwdev,
  1972. cm_node->rem_addr,
  1973. false,
  1974. NULL,
  1975. I40IW_ARP_RESOLVE);
  1976. if (cm_node->ipv4)
  1977. arpindex = i40iw_addr_resolve_neigh(iwdev,
  1978. cm_info->loc_addr[0],
  1979. cm_info->rem_addr[0],
  1980. oldarpindex);
  1981. else if (IS_ENABLED(CONFIG_IPV6))
  1982. arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev,
  1983. cm_info->loc_addr,
  1984. cm_info->rem_addr,
  1985. oldarpindex);
  1986. else
  1987. arpindex = -EINVAL;
  1988. }
  1989. if (arpindex < 0) {
  1990. i40iw_pr_err("cm_node arpindex\n");
  1991. kfree(cm_node);
  1992. return NULL;
  1993. }
  1994. ether_addr_copy(cm_node->rem_mac, iwdev->arp_table[arpindex].mac_addr);
  1995. i40iw_add_hte_node(cm_core, cm_node);
  1996. cm_core->stats_nodes_created++;
  1997. return cm_node;
  1998. }
  1999. /**
  2000. * i40iw_rem_ref_cm_node - destroy an instance of a cm node
  2001. * @cm_node: connection's node
  2002. */
  2003. static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
  2004. {
  2005. struct i40iw_cm_core *cm_core = cm_node->cm_core;
  2006. struct i40iw_qp *iwqp;
  2007. struct i40iw_cm_info nfo;
  2008. unsigned long flags;
  2009. spin_lock_irqsave(&cm_node->cm_core->ht_lock, flags);
  2010. if (atomic_dec_return(&cm_node->ref_count)) {
  2011. spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
  2012. return;
  2013. }
  2014. list_del(&cm_node->list);
  2015. spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
  2016. /* if the node is destroyed before connection was accelerated */
  2017. if (!cm_node->accelerated && cm_node->accept_pend) {
  2018. pr_err("node destroyed before established\n");
  2019. atomic_dec(&cm_node->listener->pend_accepts_cnt);
  2020. }
  2021. if (cm_node->close_entry)
  2022. i40iw_handle_close_entry(cm_node, 0);
  2023. if (cm_node->listener) {
  2024. i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
  2025. } else {
  2026. if (!i40iw_listen_port_in_use(cm_core, cm_node->loc_port) &&
  2027. cm_node->apbvt_set) {
  2028. i40iw_manage_apbvt(cm_node->iwdev,
  2029. cm_node->loc_port,
  2030. I40IW_MANAGE_APBVT_DEL);
  2031. i40iw_get_addr_info(cm_node, &nfo);
  2032. if (cm_node->qhash_set) {
  2033. i40iw_manage_qhash(cm_node->iwdev,
  2034. &nfo,
  2035. I40IW_QHASH_TYPE_TCP_ESTABLISHED,
  2036. I40IW_QHASH_MANAGE_TYPE_DELETE,
  2037. NULL,
  2038. false);
  2039. cm_node->qhash_set = 0;
  2040. }
  2041. }
  2042. }
  2043. iwqp = cm_node->iwqp;
  2044. if (iwqp) {
  2045. iwqp->cm_node = NULL;
  2046. i40iw_rem_ref(&iwqp->ibqp);
  2047. cm_node->iwqp = NULL;
  2048. } else if (cm_node->qhash_set) {
  2049. i40iw_get_addr_info(cm_node, &nfo);
  2050. i40iw_manage_qhash(cm_node->iwdev,
  2051. &nfo,
  2052. I40IW_QHASH_TYPE_TCP_ESTABLISHED,
  2053. I40IW_QHASH_MANAGE_TYPE_DELETE,
  2054. NULL,
  2055. false);
  2056. cm_node->qhash_set = 0;
  2057. }
  2058. cm_node->cm_core->stats_nodes_destroyed++;
  2059. kfree(cm_node);
  2060. }
  2061. /**
  2062. * i40iw_handle_fin_pkt - FIN packet received
  2063. * @cm_node: connection's node
  2064. */
  2065. static void i40iw_handle_fin_pkt(struct i40iw_cm_node *cm_node)
  2066. {
  2067. u32 ret;
  2068. switch (cm_node->state) {
  2069. case I40IW_CM_STATE_SYN_RCVD:
  2070. case I40IW_CM_STATE_SYN_SENT:
  2071. case I40IW_CM_STATE_ESTABLISHED:
  2072. case I40IW_CM_STATE_MPAREJ_RCVD:
  2073. cm_node->tcp_cntxt.rcv_nxt++;
  2074. i40iw_cleanup_retrans_entry(cm_node);
  2075. cm_node->state = I40IW_CM_STATE_LAST_ACK;
  2076. i40iw_send_fin(cm_node);
  2077. break;
  2078. case I40IW_CM_STATE_MPAREQ_SENT:
  2079. i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
  2080. cm_node->tcp_cntxt.rcv_nxt++;
  2081. i40iw_cleanup_retrans_entry(cm_node);
  2082. cm_node->state = I40IW_CM_STATE_CLOSED;
  2083. atomic_inc(&cm_node->ref_count);
  2084. i40iw_send_reset(cm_node);
  2085. break;
  2086. case I40IW_CM_STATE_FIN_WAIT1:
  2087. cm_node->tcp_cntxt.rcv_nxt++;
  2088. i40iw_cleanup_retrans_entry(cm_node);
  2089. cm_node->state = I40IW_CM_STATE_CLOSING;
  2090. i40iw_send_ack(cm_node);
  2091. /*
  2092. * Wait for ACK as this is simultaneous close.
  2093. * After we receive ACK, do not send anything.
  2094. * Just rm the node.
  2095. */
  2096. break;
  2097. case I40IW_CM_STATE_FIN_WAIT2:
  2098. cm_node->tcp_cntxt.rcv_nxt++;
  2099. i40iw_cleanup_retrans_entry(cm_node);
  2100. cm_node->state = I40IW_CM_STATE_TIME_WAIT;
  2101. i40iw_send_ack(cm_node);
  2102. ret =
  2103. i40iw_schedule_cm_timer(cm_node, NULL, I40IW_TIMER_TYPE_CLOSE, 1, 0);
  2104. if (ret)
  2105. i40iw_pr_err("node %p state = %d\n", cm_node, cm_node->state);
  2106. break;
  2107. case I40IW_CM_STATE_TIME_WAIT:
  2108. cm_node->tcp_cntxt.rcv_nxt++;
  2109. i40iw_cleanup_retrans_entry(cm_node);
  2110. cm_node->state = I40IW_CM_STATE_CLOSED;
  2111. i40iw_rem_ref_cm_node(cm_node);
  2112. break;
  2113. case I40IW_CM_STATE_OFFLOADED:
  2114. default:
  2115. i40iw_pr_err("bad state node %p state = %d\n", cm_node, cm_node->state);
  2116. break;
  2117. }
  2118. }
  2119. /**
  2120. * i40iw_handle_rst_pkt - process received RST packet
  2121. * @cm_node: connection's node
  2122. * @rbuf: receive buffer
  2123. */
  2124. static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node,
  2125. struct i40iw_puda_buf *rbuf)
  2126. {
  2127. i40iw_cleanup_retrans_entry(cm_node);
  2128. switch (cm_node->state) {
  2129. case I40IW_CM_STATE_SYN_SENT:
  2130. case I40IW_CM_STATE_MPAREQ_SENT:
  2131. switch (cm_node->mpa_frame_rev) {
  2132. case IETF_MPA_V2:
  2133. cm_node->mpa_frame_rev = IETF_MPA_V1;
  2134. /* send a syn and goto syn sent state */
  2135. cm_node->state = I40IW_CM_STATE_SYN_SENT;
  2136. if (i40iw_send_syn(cm_node, 0))
  2137. i40iw_active_open_err(cm_node, false);
  2138. break;
  2139. case IETF_MPA_V1:
  2140. default:
  2141. i40iw_active_open_err(cm_node, false);
  2142. break;
  2143. }
  2144. break;
  2145. case I40IW_CM_STATE_MPAREQ_RCVD:
  2146. atomic_add_return(1, &cm_node->passive_state);
  2147. break;
  2148. case I40IW_CM_STATE_ESTABLISHED:
  2149. case I40IW_CM_STATE_SYN_RCVD:
  2150. case I40IW_CM_STATE_LISTENING:
  2151. i40iw_pr_err("Bad state state = %d\n", cm_node->state);
  2152. i40iw_passive_open_err(cm_node, false);
  2153. break;
  2154. case I40IW_CM_STATE_OFFLOADED:
  2155. i40iw_active_open_err(cm_node, false);
  2156. break;
  2157. case I40IW_CM_STATE_CLOSED:
  2158. break;
  2159. case I40IW_CM_STATE_FIN_WAIT2:
  2160. case I40IW_CM_STATE_FIN_WAIT1:
  2161. case I40IW_CM_STATE_LAST_ACK:
  2162. cm_node->cm_id->rem_ref(cm_node->cm_id);
  2163. case I40IW_CM_STATE_TIME_WAIT:
  2164. cm_node->state = I40IW_CM_STATE_CLOSED;
  2165. i40iw_rem_ref_cm_node(cm_node);
  2166. break;
  2167. default:
  2168. break;
  2169. }
  2170. }
  2171. /**
  2172. * i40iw_handle_rcv_mpa - Process a recv'd mpa buffer
  2173. * @cm_node: connection's node
  2174. * @rbuf: receive buffer
  2175. */
  2176. static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node,
  2177. struct i40iw_puda_buf *rbuf)
  2178. {
  2179. int ret;
  2180. int datasize = rbuf->datalen;
  2181. u8 *dataloc = rbuf->data;
  2182. enum i40iw_cm_event_type type = I40IW_CM_EVENT_UNKNOWN;
  2183. u32 res_type;
  2184. ret = i40iw_parse_mpa(cm_node, dataloc, &res_type, datasize);
  2185. if (ret) {
  2186. if (cm_node->state == I40IW_CM_STATE_MPAREQ_SENT)
  2187. i40iw_active_open_err(cm_node, true);
  2188. else
  2189. i40iw_passive_open_err(cm_node, true);
  2190. return;
  2191. }
  2192. switch (cm_node->state) {
  2193. case I40IW_CM_STATE_ESTABLISHED:
  2194. if (res_type == I40IW_MPA_REQUEST_REJECT)
  2195. i40iw_pr_err("state for reject\n");
  2196. cm_node->state = I40IW_CM_STATE_MPAREQ_RCVD;
  2197. type = I40IW_CM_EVENT_MPA_REQ;
  2198. i40iw_send_ack(cm_node); /* ACK received MPA request */
  2199. atomic_set(&cm_node->passive_state,
  2200. I40IW_PASSIVE_STATE_INDICATED);
  2201. break;
  2202. case I40IW_CM_STATE_MPAREQ_SENT:
  2203. i40iw_cleanup_retrans_entry(cm_node);
  2204. if (res_type == I40IW_MPA_REQUEST_REJECT) {
  2205. type = I40IW_CM_EVENT_MPA_REJECT;
  2206. cm_node->state = I40IW_CM_STATE_MPAREJ_RCVD;
  2207. } else {
  2208. type = I40IW_CM_EVENT_CONNECTED;
  2209. cm_node->state = I40IW_CM_STATE_OFFLOADED;
  2210. }
  2211. i40iw_send_ack(cm_node);
  2212. break;
  2213. default:
  2214. pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state);
  2215. break;
  2216. }
  2217. i40iw_create_event(cm_node, type);
  2218. }
  2219. /**
  2220. * i40iw_indicate_pkt_err - Send up err event to cm
  2221. * @cm_node: connection's node
  2222. */
  2223. static void i40iw_indicate_pkt_err(struct i40iw_cm_node *cm_node)
  2224. {
  2225. switch (cm_node->state) {
  2226. case I40IW_CM_STATE_SYN_SENT:
  2227. case I40IW_CM_STATE_MPAREQ_SENT:
  2228. i40iw_active_open_err(cm_node, true);
  2229. break;
  2230. case I40IW_CM_STATE_ESTABLISHED:
  2231. case I40IW_CM_STATE_SYN_RCVD:
  2232. i40iw_passive_open_err(cm_node, true);
  2233. break;
  2234. case I40IW_CM_STATE_OFFLOADED:
  2235. default:
  2236. break;
  2237. }
  2238. }
  2239. /**
  2240. * i40iw_check_syn - Check for error on received syn ack
  2241. * @cm_node: connection's node
  2242. * @tcph: pointer tcp header
  2243. */
  2244. static int i40iw_check_syn(struct i40iw_cm_node *cm_node, struct tcphdr *tcph)
  2245. {
  2246. int err = 0;
  2247. if (ntohl(tcph->ack_seq) != cm_node->tcp_cntxt.loc_seq_num) {
  2248. err = 1;
  2249. i40iw_active_open_err(cm_node, true);
  2250. }
  2251. return err;
  2252. }
  2253. /**
  2254. * i40iw_check_seq - check seq numbers if OK
  2255. * @cm_node: connection's node
  2256. * @tcph: pointer tcp header
  2257. */
  2258. static int i40iw_check_seq(struct i40iw_cm_node *cm_node, struct tcphdr *tcph)
  2259. {
  2260. int err = 0;
  2261. u32 seq;
  2262. u32 ack_seq;
  2263. u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num;
  2264. u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
  2265. u32 rcv_wnd;
  2266. seq = ntohl(tcph->seq);
  2267. ack_seq = ntohl(tcph->ack_seq);
  2268. rcv_wnd = cm_node->tcp_cntxt.rcv_wnd;
  2269. if (ack_seq != loc_seq_num)
  2270. err = -1;
  2271. else if (!between(seq, rcv_nxt, (rcv_nxt + rcv_wnd)))
  2272. err = -1;
  2273. if (err) {
  2274. i40iw_pr_err("seq number\n");
  2275. i40iw_indicate_pkt_err(cm_node);
  2276. }
  2277. return err;
  2278. }
  2279. /**
  2280. * i40iw_handle_syn_pkt - is for Passive node
  2281. * @cm_node: connection's node
  2282. * @rbuf: receive buffer
  2283. */
  2284. static void i40iw_handle_syn_pkt(struct i40iw_cm_node *cm_node,
  2285. struct i40iw_puda_buf *rbuf)
  2286. {
  2287. struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
  2288. int ret;
  2289. u32 inc_sequence;
  2290. int optionsize;
  2291. struct i40iw_cm_info nfo;
  2292. optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
  2293. inc_sequence = ntohl(tcph->seq);
  2294. switch (cm_node->state) {
  2295. case I40IW_CM_STATE_SYN_SENT:
  2296. case I40IW_CM_STATE_MPAREQ_SENT:
  2297. /* Rcvd syn on active open connection */
  2298. i40iw_active_open_err(cm_node, 1);
  2299. break;
  2300. case I40IW_CM_STATE_LISTENING:
  2301. /* Passive OPEN */
  2302. if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
  2303. cm_node->listener->backlog) {
  2304. cm_node->cm_core->stats_backlog_drops++;
  2305. i40iw_passive_open_err(cm_node, false);
  2306. break;
  2307. }
  2308. ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1);
  2309. if (ret) {
  2310. i40iw_passive_open_err(cm_node, false);
  2311. /* drop pkt */
  2312. break;
  2313. }
  2314. cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
  2315. cm_node->accept_pend = 1;
  2316. atomic_inc(&cm_node->listener->pend_accepts_cnt);
  2317. cm_node->state = I40IW_CM_STATE_SYN_RCVD;
  2318. i40iw_get_addr_info(cm_node, &nfo);
  2319. ret = i40iw_manage_qhash(cm_node->iwdev,
  2320. &nfo,
  2321. I40IW_QHASH_TYPE_TCP_ESTABLISHED,
  2322. I40IW_QHASH_MANAGE_TYPE_ADD,
  2323. (void *)cm_node,
  2324. false);
  2325. cm_node->qhash_set = true;
  2326. break;
  2327. case I40IW_CM_STATE_CLOSED:
  2328. i40iw_cleanup_retrans_entry(cm_node);
  2329. atomic_inc(&cm_node->ref_count);
  2330. i40iw_send_reset(cm_node);
  2331. break;
  2332. case I40IW_CM_STATE_OFFLOADED:
  2333. case I40IW_CM_STATE_ESTABLISHED:
  2334. case I40IW_CM_STATE_FIN_WAIT1:
  2335. case I40IW_CM_STATE_FIN_WAIT2:
  2336. case I40IW_CM_STATE_MPAREQ_RCVD:
  2337. case I40IW_CM_STATE_LAST_ACK:
  2338. case I40IW_CM_STATE_CLOSING:
  2339. case I40IW_CM_STATE_UNKNOWN:
  2340. default:
  2341. break;
  2342. }
  2343. }
  2344. /**
  2345. * i40iw_handle_synack_pkt - Process SYN+ACK packet (active side)
  2346. * @cm_node: connection's node
  2347. * @rbuf: receive buffer
  2348. */
  2349. static void i40iw_handle_synack_pkt(struct i40iw_cm_node *cm_node,
  2350. struct i40iw_puda_buf *rbuf)
  2351. {
  2352. struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
  2353. int ret;
  2354. u32 inc_sequence;
  2355. int optionsize;
  2356. optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
  2357. inc_sequence = ntohl(tcph->seq);
  2358. switch (cm_node->state) {
  2359. case I40IW_CM_STATE_SYN_SENT:
  2360. i40iw_cleanup_retrans_entry(cm_node);
  2361. /* active open */
  2362. if (i40iw_check_syn(cm_node, tcph)) {
  2363. i40iw_pr_err("check syn fail\n");
  2364. return;
  2365. }
  2366. cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
  2367. /* setup options */
  2368. ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 0);
  2369. if (ret) {
  2370. i40iw_debug(cm_node->dev,
  2371. I40IW_DEBUG_CM,
  2372. "cm_node=%p tcp_options failed\n",
  2373. cm_node);
  2374. break;
  2375. }
  2376. i40iw_cleanup_retrans_entry(cm_node);
  2377. cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
  2378. i40iw_send_ack(cm_node); /* ACK for the syn_ack */
  2379. ret = i40iw_send_mpa_request(cm_node);
  2380. if (ret) {
  2381. i40iw_debug(cm_node->dev,
  2382. I40IW_DEBUG_CM,
  2383. "cm_node=%p i40iw_send_mpa_request failed\n",
  2384. cm_node);
  2385. break;
  2386. }
  2387. cm_node->state = I40IW_CM_STATE_MPAREQ_SENT;
  2388. break;
  2389. case I40IW_CM_STATE_MPAREQ_RCVD:
  2390. i40iw_passive_open_err(cm_node, true);
  2391. break;
  2392. case I40IW_CM_STATE_LISTENING:
  2393. cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
  2394. i40iw_cleanup_retrans_entry(cm_node);
  2395. cm_node->state = I40IW_CM_STATE_CLOSED;
  2396. i40iw_send_reset(cm_node);
  2397. break;
  2398. case I40IW_CM_STATE_CLOSED:
  2399. cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
  2400. i40iw_cleanup_retrans_entry(cm_node);
  2401. atomic_inc(&cm_node->ref_count);
  2402. i40iw_send_reset(cm_node);
  2403. break;
  2404. case I40IW_CM_STATE_ESTABLISHED:
  2405. case I40IW_CM_STATE_FIN_WAIT1:
  2406. case I40IW_CM_STATE_FIN_WAIT2:
  2407. case I40IW_CM_STATE_LAST_ACK:
  2408. case I40IW_CM_STATE_OFFLOADED:
  2409. case I40IW_CM_STATE_CLOSING:
  2410. case I40IW_CM_STATE_UNKNOWN:
  2411. case I40IW_CM_STATE_MPAREQ_SENT:
  2412. default:
  2413. break;
  2414. }
  2415. }
  2416. /**
  2417. * i40iw_handle_ack_pkt - process packet with ACK
  2418. * @cm_node: connection's node
  2419. * @rbuf: receive buffer
  2420. */
  2421. static int i40iw_handle_ack_pkt(struct i40iw_cm_node *cm_node,
  2422. struct i40iw_puda_buf *rbuf)
  2423. {
  2424. struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
  2425. u32 inc_sequence;
  2426. int ret = 0;
  2427. int optionsize;
  2428. u32 datasize = rbuf->datalen;
  2429. optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
  2430. if (i40iw_check_seq(cm_node, tcph))
  2431. return -EINVAL;
  2432. inc_sequence = ntohl(tcph->seq);
  2433. switch (cm_node->state) {
  2434. case I40IW_CM_STATE_SYN_RCVD:
  2435. i40iw_cleanup_retrans_entry(cm_node);
  2436. ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1);
  2437. if (ret)
  2438. break;
  2439. cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
  2440. cm_node->state = I40IW_CM_STATE_ESTABLISHED;
  2441. if (datasize) {
  2442. cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
  2443. i40iw_handle_rcv_mpa(cm_node, rbuf);
  2444. }
  2445. break;
  2446. case I40IW_CM_STATE_ESTABLISHED:
  2447. i40iw_cleanup_retrans_entry(cm_node);
  2448. if (datasize) {
  2449. cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
  2450. i40iw_handle_rcv_mpa(cm_node, rbuf);
  2451. }
  2452. break;
  2453. case I40IW_CM_STATE_MPAREQ_SENT:
  2454. cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
  2455. if (datasize) {
  2456. cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
  2457. i40iw_handle_rcv_mpa(cm_node, rbuf);
  2458. }
  2459. break;
  2460. case I40IW_CM_STATE_LISTENING:
  2461. i40iw_cleanup_retrans_entry(cm_node);
  2462. cm_node->state = I40IW_CM_STATE_CLOSED;
  2463. i40iw_send_reset(cm_node);
  2464. break;
  2465. case I40IW_CM_STATE_CLOSED:
  2466. i40iw_cleanup_retrans_entry(cm_node);
  2467. atomic_inc(&cm_node->ref_count);
  2468. i40iw_send_reset(cm_node);
  2469. break;
  2470. case I40IW_CM_STATE_LAST_ACK:
  2471. case I40IW_CM_STATE_CLOSING:
  2472. i40iw_cleanup_retrans_entry(cm_node);
  2473. cm_node->state = I40IW_CM_STATE_CLOSED;
  2474. if (!cm_node->accept_pend)
  2475. cm_node->cm_id->rem_ref(cm_node->cm_id);
  2476. i40iw_rem_ref_cm_node(cm_node);
  2477. break;
  2478. case I40IW_CM_STATE_FIN_WAIT1:
  2479. i40iw_cleanup_retrans_entry(cm_node);
  2480. cm_node->state = I40IW_CM_STATE_FIN_WAIT2;
  2481. break;
  2482. case I40IW_CM_STATE_SYN_SENT:
  2483. case I40IW_CM_STATE_FIN_WAIT2:
  2484. case I40IW_CM_STATE_OFFLOADED:
  2485. case I40IW_CM_STATE_MPAREQ_RCVD:
  2486. case I40IW_CM_STATE_UNKNOWN:
  2487. default:
  2488. i40iw_cleanup_retrans_entry(cm_node);
  2489. break;
  2490. }
  2491. return ret;
  2492. }
  2493. /**
  2494. * i40iw_process_packet - process cm packet
  2495. * @cm_node: connection's node
  2496. * @rbuf: receive buffer
  2497. */
  2498. static void i40iw_process_packet(struct i40iw_cm_node *cm_node,
  2499. struct i40iw_puda_buf *rbuf)
  2500. {
  2501. enum i40iw_tcpip_pkt_type pkt_type = I40IW_PKT_TYPE_UNKNOWN;
  2502. struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
  2503. u32 fin_set = 0;
  2504. int ret;
  2505. if (tcph->rst) {
  2506. pkt_type = I40IW_PKT_TYPE_RST;
  2507. } else if (tcph->syn) {
  2508. pkt_type = I40IW_PKT_TYPE_SYN;
  2509. if (tcph->ack)
  2510. pkt_type = I40IW_PKT_TYPE_SYNACK;
  2511. } else if (tcph->ack) {
  2512. pkt_type = I40IW_PKT_TYPE_ACK;
  2513. }
  2514. if (tcph->fin)
  2515. fin_set = 1;
  2516. switch (pkt_type) {
  2517. case I40IW_PKT_TYPE_SYN:
  2518. i40iw_handle_syn_pkt(cm_node, rbuf);
  2519. break;
  2520. case I40IW_PKT_TYPE_SYNACK:
  2521. i40iw_handle_synack_pkt(cm_node, rbuf);
  2522. break;
  2523. case I40IW_PKT_TYPE_ACK:
  2524. ret = i40iw_handle_ack_pkt(cm_node, rbuf);
  2525. if (fin_set && !ret)
  2526. i40iw_handle_fin_pkt(cm_node);
  2527. break;
  2528. case I40IW_PKT_TYPE_RST:
  2529. i40iw_handle_rst_pkt(cm_node, rbuf);
  2530. break;
  2531. default:
  2532. if (fin_set &&
  2533. (!i40iw_check_seq(cm_node, (struct tcphdr *)rbuf->tcph)))
  2534. i40iw_handle_fin_pkt(cm_node);
  2535. break;
  2536. }
  2537. }
  2538. /**
  2539. * i40iw_make_listen_node - create a listen node with params
  2540. * @cm_core: cm's core
  2541. * @iwdev: iwarp device structure
  2542. * @cm_info: quad info for connection
  2543. */
  2544. static struct i40iw_cm_listener *i40iw_make_listen_node(
  2545. struct i40iw_cm_core *cm_core,
  2546. struct i40iw_device *iwdev,
  2547. struct i40iw_cm_info *cm_info)
  2548. {
  2549. struct i40iw_cm_listener *listener;
  2550. unsigned long flags;
  2551. /* cannot have multiple matching listeners */
  2552. listener = i40iw_find_listener(cm_core, cm_info->loc_addr,
  2553. cm_info->loc_port,
  2554. cm_info->vlan_id,
  2555. I40IW_CM_LISTENER_EITHER_STATE);
  2556. if (listener &&
  2557. (listener->listener_state == I40IW_CM_LISTENER_ACTIVE_STATE)) {
  2558. atomic_dec(&listener->ref_count);
  2559. i40iw_debug(cm_core->dev,
  2560. I40IW_DEBUG_CM,
  2561. "Not creating listener since it already exists\n");
  2562. return NULL;
  2563. }
  2564. if (!listener) {
  2565. /* create a CM listen node (1/2 node to compare incoming traffic to) */
  2566. listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
  2567. if (!listener)
  2568. return NULL;
  2569. cm_core->stats_listen_nodes_created++;
  2570. memcpy(listener->loc_addr, cm_info->loc_addr, sizeof(listener->loc_addr));
  2571. listener->loc_port = cm_info->loc_port;
  2572. INIT_LIST_HEAD(&listener->child_listen_list);
  2573. atomic_set(&listener->ref_count, 1);
  2574. } else {
  2575. listener->reused_node = 1;
  2576. }
  2577. listener->cm_id = cm_info->cm_id;
  2578. listener->ipv4 = cm_info->ipv4;
  2579. listener->vlan_id = cm_info->vlan_id;
  2580. atomic_set(&listener->pend_accepts_cnt, 0);
  2581. listener->cm_core = cm_core;
  2582. listener->iwdev = iwdev;
  2583. listener->backlog = cm_info->backlog;
  2584. listener->listener_state = I40IW_CM_LISTENER_ACTIVE_STATE;
  2585. if (!listener->reused_node) {
  2586. spin_lock_irqsave(&cm_core->listen_list_lock, flags);
  2587. list_add(&listener->list, &cm_core->listen_nodes);
  2588. spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
  2589. }
  2590. return listener;
  2591. }
  2592. /**
  2593. * i40iw_create_cm_node - make a connection node with params
  2594. * @cm_core: cm's core
  2595. * @iwdev: iwarp device structure
  2596. * @private_data_len: len to provate data for mpa request
  2597. * @private_data: pointer to private data for connection
  2598. * @cm_info: quad info for connection
  2599. */
  2600. static struct i40iw_cm_node *i40iw_create_cm_node(
  2601. struct i40iw_cm_core *cm_core,
  2602. struct i40iw_device *iwdev,
  2603. u16 private_data_len,
  2604. void *private_data,
  2605. struct i40iw_cm_info *cm_info)
  2606. {
  2607. struct i40iw_cm_node *cm_node;
  2608. struct i40iw_cm_listener *loopback_remotelistener;
  2609. struct i40iw_cm_node *loopback_remotenode;
  2610. struct i40iw_cm_info loopback_cm_info;
  2611. /* create a CM connection node */
  2612. cm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL);
  2613. if (!cm_node)
  2614. return ERR_PTR(-ENOMEM);
  2615. /* set our node side to client (active) side */
  2616. cm_node->tcp_cntxt.client = 1;
  2617. cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
  2618. if (!memcmp(cm_info->loc_addr, cm_info->rem_addr, sizeof(cm_info->loc_addr))) {
  2619. loopback_remotelistener = i40iw_find_listener(
  2620. cm_core,
  2621. cm_info->rem_addr,
  2622. cm_node->rem_port,
  2623. cm_node->vlan_id,
  2624. I40IW_CM_LISTENER_ACTIVE_STATE);
  2625. if (!loopback_remotelistener) {
  2626. i40iw_rem_ref_cm_node(cm_node);
  2627. return ERR_PTR(-ECONNREFUSED);
  2628. } else {
  2629. loopback_cm_info = *cm_info;
  2630. loopback_cm_info.loc_port = cm_info->rem_port;
  2631. loopback_cm_info.rem_port = cm_info->loc_port;
  2632. loopback_cm_info.cm_id = loopback_remotelistener->cm_id;
  2633. loopback_cm_info.ipv4 = cm_info->ipv4;
  2634. loopback_remotenode = i40iw_make_cm_node(cm_core,
  2635. iwdev,
  2636. &loopback_cm_info,
  2637. loopback_remotelistener);
  2638. if (!loopback_remotenode) {
  2639. i40iw_rem_ref_cm_node(cm_node);
  2640. return ERR_PTR(-ENOMEM);
  2641. }
  2642. cm_core->stats_loopbacks++;
  2643. loopback_remotenode->loopbackpartner = cm_node;
  2644. loopback_remotenode->tcp_cntxt.rcv_wscale =
  2645. I40IW_CM_DEFAULT_RCV_WND_SCALE;
  2646. cm_node->loopbackpartner = loopback_remotenode;
  2647. memcpy(loopback_remotenode->pdata_buf, private_data,
  2648. private_data_len);
  2649. loopback_remotenode->pdata.size = private_data_len;
  2650. cm_node->state = I40IW_CM_STATE_OFFLOADED;
  2651. cm_node->tcp_cntxt.rcv_nxt =
  2652. loopback_remotenode->tcp_cntxt.loc_seq_num;
  2653. loopback_remotenode->tcp_cntxt.rcv_nxt =
  2654. cm_node->tcp_cntxt.loc_seq_num;
  2655. cm_node->tcp_cntxt.max_snd_wnd =
  2656. loopback_remotenode->tcp_cntxt.rcv_wnd;
  2657. loopback_remotenode->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
  2658. cm_node->tcp_cntxt.snd_wnd = loopback_remotenode->tcp_cntxt.rcv_wnd;
  2659. loopback_remotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
  2660. cm_node->tcp_cntxt.snd_wscale = loopback_remotenode->tcp_cntxt.rcv_wscale;
  2661. loopback_remotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale;
  2662. loopback_remotenode->state = I40IW_CM_STATE_MPAREQ_RCVD;
  2663. i40iw_create_event(loopback_remotenode, I40IW_CM_EVENT_MPA_REQ);
  2664. }
  2665. return cm_node;
  2666. }
  2667. cm_node->pdata.size = private_data_len;
  2668. cm_node->pdata.addr = cm_node->pdata_buf;
  2669. memcpy(cm_node->pdata_buf, private_data, private_data_len);
  2670. cm_node->state = I40IW_CM_STATE_SYN_SENT;
  2671. return cm_node;
  2672. }
  2673. /**
  2674. * i40iw_cm_reject - reject and teardown a connection
  2675. * @cm_node: connection's node
  2676. * @pdate: ptr to private data for reject
  2677. * @plen: size of private data
  2678. */
  2679. static int i40iw_cm_reject(struct i40iw_cm_node *cm_node, const void *pdata, u8 plen)
  2680. {
  2681. int ret = 0;
  2682. int err;
  2683. int passive_state;
  2684. struct iw_cm_id *cm_id = cm_node->cm_id;
  2685. struct i40iw_cm_node *loopback = cm_node->loopbackpartner;
  2686. if (cm_node->tcp_cntxt.client)
  2687. return ret;
  2688. i40iw_cleanup_retrans_entry(cm_node);
  2689. if (!loopback) {
  2690. passive_state = atomic_add_return(1, &cm_node->passive_state);
  2691. if (passive_state == I40IW_SEND_RESET_EVENT) {
  2692. cm_node->state = I40IW_CM_STATE_CLOSED;
  2693. i40iw_rem_ref_cm_node(cm_node);
  2694. } else {
  2695. if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
  2696. i40iw_rem_ref_cm_node(cm_node);
  2697. } else {
  2698. ret = i40iw_send_mpa_reject(cm_node, pdata, plen);
  2699. if (ret) {
  2700. cm_node->state = I40IW_CM_STATE_CLOSED;
  2701. err = i40iw_send_reset(cm_node);
  2702. if (err)
  2703. i40iw_pr_err("send reset failed\n");
  2704. } else {
  2705. cm_id->add_ref(cm_id);
  2706. }
  2707. }
  2708. }
  2709. } else {
  2710. cm_node->cm_id = NULL;
  2711. if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
  2712. i40iw_rem_ref_cm_node(cm_node);
  2713. i40iw_rem_ref_cm_node(loopback);
  2714. } else {
  2715. ret = i40iw_send_cm_event(loopback,
  2716. loopback->cm_id,
  2717. IW_CM_EVENT_CONNECT_REPLY,
  2718. -ECONNREFUSED);
  2719. i40iw_rem_ref_cm_node(cm_node);
  2720. loopback->state = I40IW_CM_STATE_CLOSING;
  2721. cm_id = loopback->cm_id;
  2722. i40iw_rem_ref_cm_node(loopback);
  2723. cm_id->rem_ref(cm_id);
  2724. }
  2725. }
  2726. return ret;
  2727. }
  2728. /**
  2729. * i40iw_cm_close - close of cm connection
  2730. * @cm_node: connection's node
  2731. */
  2732. static int i40iw_cm_close(struct i40iw_cm_node *cm_node)
  2733. {
  2734. int ret = 0;
  2735. if (!cm_node)
  2736. return -EINVAL;
  2737. switch (cm_node->state) {
  2738. case I40IW_CM_STATE_SYN_RCVD:
  2739. case I40IW_CM_STATE_SYN_SENT:
  2740. case I40IW_CM_STATE_ONE_SIDE_ESTABLISHED:
  2741. case I40IW_CM_STATE_ESTABLISHED:
  2742. case I40IW_CM_STATE_ACCEPTING:
  2743. case I40IW_CM_STATE_MPAREQ_SENT:
  2744. case I40IW_CM_STATE_MPAREQ_RCVD:
  2745. i40iw_cleanup_retrans_entry(cm_node);
  2746. i40iw_send_reset(cm_node);
  2747. break;
  2748. case I40IW_CM_STATE_CLOSE_WAIT:
  2749. cm_node->state = I40IW_CM_STATE_LAST_ACK;
  2750. i40iw_send_fin(cm_node);
  2751. break;
  2752. case I40IW_CM_STATE_FIN_WAIT1:
  2753. case I40IW_CM_STATE_FIN_WAIT2:
  2754. case I40IW_CM_STATE_LAST_ACK:
  2755. case I40IW_CM_STATE_TIME_WAIT:
  2756. case I40IW_CM_STATE_CLOSING:
  2757. ret = -1;
  2758. break;
  2759. case I40IW_CM_STATE_LISTENING:
  2760. i40iw_cleanup_retrans_entry(cm_node);
  2761. i40iw_send_reset(cm_node);
  2762. break;
  2763. case I40IW_CM_STATE_MPAREJ_RCVD:
  2764. case I40IW_CM_STATE_UNKNOWN:
  2765. case I40IW_CM_STATE_INITED:
  2766. case I40IW_CM_STATE_CLOSED:
  2767. case I40IW_CM_STATE_LISTENER_DESTROYED:
  2768. i40iw_rem_ref_cm_node(cm_node);
  2769. break;
  2770. case I40IW_CM_STATE_OFFLOADED:
  2771. if (cm_node->send_entry)
  2772. i40iw_pr_err("send_entry\n");
  2773. i40iw_rem_ref_cm_node(cm_node);
  2774. break;
  2775. }
  2776. return ret;
  2777. }
  2778. /**
  2779. * i40iw_receive_ilq - recv an ETHERNET packet, and process it
  2780. * through CM
  2781. * @vsi: pointer to the vsi structure
  2782. * @rbuf: receive buffer
  2783. */
  2784. void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf)
  2785. {
  2786. struct i40iw_cm_node *cm_node;
  2787. struct i40iw_cm_listener *listener;
  2788. struct iphdr *iph;
  2789. struct ipv6hdr *ip6h;
  2790. struct tcphdr *tcph;
  2791. struct i40iw_cm_info cm_info;
  2792. struct i40iw_sc_dev *dev = vsi->dev;
  2793. struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
  2794. struct i40iw_cm_core *cm_core = &iwdev->cm_core;
  2795. struct vlan_ethhdr *ethh;
  2796. u16 vtag;
  2797. /* if vlan, then maclen = 18 else 14 */
  2798. iph = (struct iphdr *)rbuf->iph;
  2799. memset(&cm_info, 0, sizeof(cm_info));
  2800. i40iw_debug_buf(dev,
  2801. I40IW_DEBUG_ILQ,
  2802. "RECEIVE ILQ BUFFER",
  2803. rbuf->mem.va,
  2804. rbuf->totallen);
  2805. ethh = (struct vlan_ethhdr *)rbuf->mem.va;
  2806. if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) {
  2807. vtag = ntohs(ethh->h_vlan_TCI);
  2808. cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
  2809. cm_info.vlan_id = vtag & VLAN_VID_MASK;
  2810. i40iw_debug(cm_core->dev,
  2811. I40IW_DEBUG_CM,
  2812. "%s vlan_id=%d\n",
  2813. __func__,
  2814. cm_info.vlan_id);
  2815. } else {
  2816. cm_info.vlan_id = I40IW_NO_VLAN;
  2817. }
  2818. tcph = (struct tcphdr *)rbuf->tcph;
  2819. if (rbuf->ipv4) {
  2820. cm_info.loc_addr[0] = ntohl(iph->daddr);
  2821. cm_info.rem_addr[0] = ntohl(iph->saddr);
  2822. cm_info.ipv4 = true;
  2823. cm_info.tos = iph->tos;
  2824. } else {
  2825. ip6h = (struct ipv6hdr *)rbuf->iph;
  2826. i40iw_copy_ip_ntohl(cm_info.loc_addr,
  2827. ip6h->daddr.in6_u.u6_addr32);
  2828. i40iw_copy_ip_ntohl(cm_info.rem_addr,
  2829. ip6h->saddr.in6_u.u6_addr32);
  2830. cm_info.ipv4 = false;
  2831. cm_info.tos = (ip6h->priority << 4) | (ip6h->flow_lbl[0] >> 4);
  2832. }
  2833. cm_info.loc_port = ntohs(tcph->dest);
  2834. cm_info.rem_port = ntohs(tcph->source);
  2835. cm_node = i40iw_find_node(cm_core,
  2836. cm_info.rem_port,
  2837. cm_info.rem_addr,
  2838. cm_info.loc_port,
  2839. cm_info.loc_addr,
  2840. true);
  2841. if (!cm_node) {
  2842. /* Only type of packet accepted are for */
  2843. /* the PASSIVE open (syn only) */
  2844. if (!tcph->syn || tcph->ack)
  2845. return;
  2846. listener =
  2847. i40iw_find_listener(cm_core,
  2848. cm_info.loc_addr,
  2849. cm_info.loc_port,
  2850. cm_info.vlan_id,
  2851. I40IW_CM_LISTENER_ACTIVE_STATE);
  2852. if (!listener) {
  2853. cm_info.cm_id = NULL;
  2854. i40iw_debug(cm_core->dev,
  2855. I40IW_DEBUG_CM,
  2856. "%s no listener found\n",
  2857. __func__);
  2858. return;
  2859. }
  2860. cm_info.cm_id = listener->cm_id;
  2861. cm_node = i40iw_make_cm_node(cm_core, iwdev, &cm_info, listener);
  2862. if (!cm_node) {
  2863. i40iw_debug(cm_core->dev,
  2864. I40IW_DEBUG_CM,
  2865. "%s allocate node failed\n",
  2866. __func__);
  2867. atomic_dec(&listener->ref_count);
  2868. return;
  2869. }
  2870. if (!tcph->rst && !tcph->fin) {
  2871. cm_node->state = I40IW_CM_STATE_LISTENING;
  2872. } else {
  2873. i40iw_rem_ref_cm_node(cm_node);
  2874. return;
  2875. }
  2876. atomic_inc(&cm_node->ref_count);
  2877. } else if (cm_node->state == I40IW_CM_STATE_OFFLOADED) {
  2878. i40iw_rem_ref_cm_node(cm_node);
  2879. return;
  2880. }
  2881. i40iw_process_packet(cm_node, rbuf);
  2882. i40iw_rem_ref_cm_node(cm_node);
  2883. }
  2884. /**
  2885. * i40iw_setup_cm_core - allocate a top level instance of a cm
  2886. * core
  2887. * @iwdev: iwarp device structure
  2888. */
  2889. void i40iw_setup_cm_core(struct i40iw_device *iwdev)
  2890. {
  2891. struct i40iw_cm_core *cm_core = &iwdev->cm_core;
  2892. cm_core->iwdev = iwdev;
  2893. cm_core->dev = &iwdev->sc_dev;
  2894. INIT_LIST_HEAD(&cm_core->connected_nodes);
  2895. INIT_LIST_HEAD(&cm_core->listen_nodes);
  2896. setup_timer(&cm_core->tcp_timer, i40iw_cm_timer_tick,
  2897. (unsigned long)cm_core);
  2898. spin_lock_init(&cm_core->ht_lock);
  2899. spin_lock_init(&cm_core->listen_list_lock);
  2900. cm_core->event_wq = alloc_ordered_workqueue("iwewq",
  2901. WQ_MEM_RECLAIM);
  2902. cm_core->disconn_wq = alloc_ordered_workqueue("iwdwq",
  2903. WQ_MEM_RECLAIM);
  2904. }
  2905. /**
  2906. * i40iw_cleanup_cm_core - deallocate a top level instance of a
  2907. * cm core
  2908. * @cm_core: cm's core
  2909. */
  2910. void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core)
  2911. {
  2912. unsigned long flags;
  2913. if (!cm_core)
  2914. return;
  2915. spin_lock_irqsave(&cm_core->ht_lock, flags);
  2916. if (timer_pending(&cm_core->tcp_timer))
  2917. del_timer_sync(&cm_core->tcp_timer);
  2918. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  2919. destroy_workqueue(cm_core->event_wq);
  2920. destroy_workqueue(cm_core->disconn_wq);
  2921. }
  2922. /**
  2923. * i40iw_init_tcp_ctx - setup qp context
  2924. * @cm_node: connection's node
  2925. * @tcp_info: offload info for tcp
  2926. * @iwqp: associate qp for the connection
  2927. */
  2928. static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node,
  2929. struct i40iw_tcp_offload_info *tcp_info,
  2930. struct i40iw_qp *iwqp)
  2931. {
  2932. tcp_info->ipv4 = cm_node->ipv4;
  2933. tcp_info->drop_ooo_seg = true;
  2934. tcp_info->wscale = true;
  2935. tcp_info->ignore_tcp_opt = true;
  2936. tcp_info->ignore_tcp_uns_opt = true;
  2937. tcp_info->no_nagle = false;
  2938. tcp_info->ttl = I40IW_DEFAULT_TTL;
  2939. tcp_info->rtt_var = cpu_to_le32(I40IW_DEFAULT_RTT_VAR);
  2940. tcp_info->ss_thresh = cpu_to_le32(I40IW_DEFAULT_SS_THRESH);
  2941. tcp_info->rexmit_thresh = I40IW_DEFAULT_REXMIT_THRESH;
  2942. tcp_info->tcp_state = I40IW_TCP_STATE_ESTABLISHED;
  2943. tcp_info->snd_wscale = cm_node->tcp_cntxt.snd_wscale;
  2944. tcp_info->rcv_wscale = cm_node->tcp_cntxt.rcv_wscale;
  2945. tcp_info->snd_nxt = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
  2946. tcp_info->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd);
  2947. tcp_info->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
  2948. tcp_info->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
  2949. tcp_info->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
  2950. tcp_info->cwnd = cpu_to_le32(2 * cm_node->tcp_cntxt.mss);
  2951. tcp_info->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
  2952. tcp_info->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
  2953. tcp_info->max_snd_window = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd);
  2954. tcp_info->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd <<
  2955. cm_node->tcp_cntxt.rcv_wscale);
  2956. tcp_info->flow_label = 0;
  2957. tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss));
  2958. if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
  2959. tcp_info->insert_vlan_tag = true;
  2960. tcp_info->vlan_tag = cpu_to_le16(cm_node->vlan_id);
  2961. }
  2962. if (cm_node->ipv4) {
  2963. tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
  2964. tcp_info->dst_port = cpu_to_le16(cm_node->rem_port);
  2965. tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[0]);
  2966. tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[0]);
  2967. tcp_info->arp_idx =
  2968. cpu_to_le16((u16)i40iw_arp_table(
  2969. iwqp->iwdev,
  2970. &tcp_info->dest_ip_addr3,
  2971. true,
  2972. NULL,
  2973. I40IW_ARP_RESOLVE));
  2974. } else {
  2975. tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
  2976. tcp_info->dst_port = cpu_to_le16(cm_node->rem_port);
  2977. tcp_info->dest_ip_addr0 = cpu_to_le32(cm_node->rem_addr[0]);
  2978. tcp_info->dest_ip_addr1 = cpu_to_le32(cm_node->rem_addr[1]);
  2979. tcp_info->dest_ip_addr2 = cpu_to_le32(cm_node->rem_addr[2]);
  2980. tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[3]);
  2981. tcp_info->local_ipaddr0 = cpu_to_le32(cm_node->loc_addr[0]);
  2982. tcp_info->local_ipaddr1 = cpu_to_le32(cm_node->loc_addr[1]);
  2983. tcp_info->local_ipaddr2 = cpu_to_le32(cm_node->loc_addr[2]);
  2984. tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[3]);
  2985. tcp_info->arp_idx =
  2986. cpu_to_le16((u16)i40iw_arp_table(
  2987. iwqp->iwdev,
  2988. &tcp_info->dest_ip_addr0,
  2989. false,
  2990. NULL,
  2991. I40IW_ARP_RESOLVE));
  2992. }
  2993. }
  2994. /**
  2995. * i40iw_cm_init_tsa_conn - setup qp for RTS
  2996. * @iwqp: associate qp for the connection
  2997. * @cm_node: connection's node
  2998. */
  2999. static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,
  3000. struct i40iw_cm_node *cm_node)
  3001. {
  3002. struct i40iw_tcp_offload_info tcp_info;
  3003. struct i40iwarp_offload_info *iwarp_info;
  3004. struct i40iw_qp_host_ctx_info *ctx_info;
  3005. struct i40iw_device *iwdev = iwqp->iwdev;
  3006. struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
  3007. memset(&tcp_info, 0x00, sizeof(struct i40iw_tcp_offload_info));
  3008. iwarp_info = &iwqp->iwarp_info;
  3009. ctx_info = &iwqp->ctx_info;
  3010. ctx_info->tcp_info = &tcp_info;
  3011. ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
  3012. ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
  3013. iwarp_info->ord_size = cm_node->ord_size;
  3014. iwarp_info->ird_size = i40iw_derive_hw_ird_setting(cm_node->ird_size);
  3015. if (iwarp_info->ord_size == 1)
  3016. iwarp_info->ord_size = 2;
  3017. iwarp_info->rd_enable = true;
  3018. iwarp_info->rdmap_ver = 1;
  3019. iwarp_info->ddp_ver = 1;
  3020. iwarp_info->pd_id = iwqp->iwpd->sc_pd.pd_id;
  3021. ctx_info->tcp_info_valid = true;
  3022. ctx_info->iwarp_info_valid = true;
  3023. ctx_info->add_to_qoslist = true;
  3024. ctx_info->user_pri = cm_node->user_pri;
  3025. i40iw_init_tcp_ctx(cm_node, &tcp_info, iwqp);
  3026. if (cm_node->snd_mark_en) {
  3027. iwarp_info->snd_mark_en = true;
  3028. iwarp_info->snd_mark_offset = (tcp_info.snd_nxt &
  3029. SNDMARKER_SEQNMASK) + cm_node->lsmm_size;
  3030. }
  3031. cm_node->state = I40IW_CM_STATE_OFFLOADED;
  3032. tcp_info.tcp_state = I40IW_TCP_STATE_ESTABLISHED;
  3033. tcp_info.src_mac_addr_idx = iwdev->mac_ip_table_idx;
  3034. tcp_info.tos = cm_node->tos;
  3035. dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, (u64 *)(iwqp->host_ctx.va), ctx_info);
  3036. /* once tcp_info is set, no need to do it again */
  3037. ctx_info->tcp_info_valid = false;
  3038. ctx_info->iwarp_info_valid = false;
  3039. ctx_info->add_to_qoslist = false;
  3040. }
  3041. /**
  3042. * i40iw_cm_disconn - when a connection is being closed
  3043. * @iwqp: associate qp for the connection
  3044. */
  3045. void i40iw_cm_disconn(struct i40iw_qp *iwqp)
  3046. {
  3047. struct disconn_work *work;
  3048. struct i40iw_device *iwdev = iwqp->iwdev;
  3049. struct i40iw_cm_core *cm_core = &iwdev->cm_core;
  3050. unsigned long flags;
  3051. work = kzalloc(sizeof(*work), GFP_ATOMIC);
  3052. if (!work)
  3053. return; /* Timer will clean up */
  3054. spin_lock_irqsave(&iwdev->qptable_lock, flags);
  3055. if (!iwdev->qp_table[iwqp->ibqp.qp_num]) {
  3056. spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
  3057. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
  3058. "%s qp_id %d is already freed\n",
  3059. __func__, iwqp->ibqp.qp_num);
  3060. kfree(work);
  3061. return;
  3062. }
  3063. i40iw_add_ref(&iwqp->ibqp);
  3064. spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
  3065. work->iwqp = iwqp;
  3066. INIT_WORK(&work->work, i40iw_disconnect_worker);
  3067. queue_work(cm_core->disconn_wq, &work->work);
  3068. return;
  3069. }
  3070. /**
  3071. * i40iw_qp_disconnect - free qp and close cm
  3072. * @iwqp: associate qp for the connection
  3073. */
  3074. static void i40iw_qp_disconnect(struct i40iw_qp *iwqp)
  3075. {
  3076. struct i40iw_device *iwdev;
  3077. struct i40iw_ib_device *iwibdev;
  3078. iwdev = to_iwdev(iwqp->ibqp.device);
  3079. if (!iwdev) {
  3080. i40iw_pr_err("iwdev == NULL\n");
  3081. return;
  3082. }
  3083. iwibdev = iwdev->iwibdev;
  3084. if (iwqp->active_conn) {
  3085. /* indicate this connection is NOT active */
  3086. iwqp->active_conn = 0;
  3087. } else {
  3088. /* Need to free the Last Streaming Mode Message */
  3089. if (iwqp->ietf_mem.va) {
  3090. if (iwqp->lsmm_mr)
  3091. iwibdev->ibdev.dereg_mr(iwqp->lsmm_mr);
  3092. i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem);
  3093. }
  3094. }
  3095. /* close the CM node down if it is still active */
  3096. if (iwqp->cm_node) {
  3097. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "%s Call close API\n", __func__);
  3098. i40iw_cm_close(iwqp->cm_node);
  3099. }
  3100. }
  3101. /**
  3102. * i40iw_cm_disconn_true - called by worker thread to disconnect qp
  3103. * @iwqp: associate qp for the connection
  3104. */
  3105. static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
  3106. {
  3107. struct iw_cm_id *cm_id;
  3108. struct i40iw_device *iwdev;
  3109. struct i40iw_sc_qp *qp = &iwqp->sc_qp;
  3110. u16 last_ae;
  3111. u8 original_hw_tcp_state;
  3112. u8 original_ibqp_state;
  3113. int disconn_status = 0;
  3114. int issue_disconn = 0;
  3115. int issue_close = 0;
  3116. int issue_flush = 0;
  3117. struct ib_event ibevent;
  3118. unsigned long flags;
  3119. int ret;
  3120. if (!iwqp) {
  3121. i40iw_pr_err("iwqp == NULL\n");
  3122. return;
  3123. }
  3124. spin_lock_irqsave(&iwqp->lock, flags);
  3125. cm_id = iwqp->cm_id;
  3126. /* make sure we havent already closed this connection */
  3127. if (!cm_id) {
  3128. spin_unlock_irqrestore(&iwqp->lock, flags);
  3129. return;
  3130. }
  3131. iwdev = to_iwdev(iwqp->ibqp.device);
  3132. original_hw_tcp_state = iwqp->hw_tcp_state;
  3133. original_ibqp_state = iwqp->ibqp_state;
  3134. last_ae = iwqp->last_aeq;
  3135. if (qp->term_flags) {
  3136. issue_disconn = 1;
  3137. issue_close = 1;
  3138. iwqp->cm_id = NULL;
  3139. /*When term timer expires after cm_timer, don't want
  3140. *terminate-handler to issue cm_disconn which can re-free
  3141. *a QP even after its refcnt=0.
  3142. */
  3143. i40iw_terminate_del_timer(qp);
  3144. if (!iwqp->flush_issued) {
  3145. iwqp->flush_issued = 1;
  3146. issue_flush = 1;
  3147. }
  3148. } else if ((original_hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) ||
  3149. ((original_ibqp_state == IB_QPS_RTS) &&
  3150. (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {
  3151. issue_disconn = 1;
  3152. if (last_ae == I40IW_AE_LLP_CONNECTION_RESET)
  3153. disconn_status = -ECONNRESET;
  3154. }
  3155. if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) ||
  3156. (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) ||
  3157. (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) ||
  3158. (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {
  3159. issue_close = 1;
  3160. iwqp->cm_id = NULL;
  3161. if (!iwqp->flush_issued) {
  3162. iwqp->flush_issued = 1;
  3163. issue_flush = 1;
  3164. }
  3165. }
  3166. spin_unlock_irqrestore(&iwqp->lock, flags);
  3167. if (issue_flush && !iwqp->destroyed) {
  3168. /* Flush the queues */
  3169. i40iw_flush_wqes(iwdev, iwqp);
  3170. if (qp->term_flags && iwqp->ibqp.event_handler) {
  3171. ibevent.device = iwqp->ibqp.device;
  3172. ibevent.event = (qp->eventtype == TERM_EVENT_QP_FATAL) ?
  3173. IB_EVENT_QP_FATAL : IB_EVENT_QP_ACCESS_ERR;
  3174. ibevent.element.qp = &iwqp->ibqp;
  3175. iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
  3176. }
  3177. }
  3178. if (cm_id && cm_id->event_handler) {
  3179. if (issue_disconn) {
  3180. ret = i40iw_send_cm_event(NULL,
  3181. cm_id,
  3182. IW_CM_EVENT_DISCONNECT,
  3183. disconn_status);
  3184. if (ret)
  3185. i40iw_debug(&iwdev->sc_dev,
  3186. I40IW_DEBUG_CM,
  3187. "disconnect event failed %s: - cm_id = %p\n",
  3188. __func__, cm_id);
  3189. }
  3190. if (issue_close) {
  3191. i40iw_qp_disconnect(iwqp);
  3192. cm_id->provider_data = iwqp;
  3193. ret = i40iw_send_cm_event(NULL, cm_id, IW_CM_EVENT_CLOSE, 0);
  3194. if (ret)
  3195. i40iw_debug(&iwdev->sc_dev,
  3196. I40IW_DEBUG_CM,
  3197. "close event failed %s: - cm_id = %p\n",
  3198. __func__, cm_id);
  3199. cm_id->rem_ref(cm_id);
  3200. }
  3201. }
  3202. }
  3203. /**
  3204. * i40iw_disconnect_worker - worker for connection close
  3205. * @work: points or disconn structure
  3206. */
  3207. static void i40iw_disconnect_worker(struct work_struct *work)
  3208. {
  3209. struct disconn_work *dwork = container_of(work, struct disconn_work, work);
  3210. struct i40iw_qp *iwqp = dwork->iwqp;
  3211. kfree(dwork);
  3212. i40iw_cm_disconn_true(iwqp);
  3213. i40iw_rem_ref(&iwqp->ibqp);
  3214. }
  3215. /**
  3216. * i40iw_accept - registered call for connection to be accepted
  3217. * @cm_id: cm information for passive connection
  3218. * @conn_param: accpet parameters
  3219. */
  3220. int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
  3221. {
  3222. struct ib_qp *ibqp;
  3223. struct i40iw_qp *iwqp;
  3224. struct i40iw_device *iwdev;
  3225. struct i40iw_sc_dev *dev;
  3226. struct i40iw_cm_node *cm_node;
  3227. struct ib_qp_attr attr;
  3228. int passive_state;
  3229. struct ib_mr *ibmr;
  3230. struct i40iw_pd *iwpd;
  3231. u16 buf_len = 0;
  3232. struct i40iw_kmem_info accept;
  3233. enum i40iw_status_code status;
  3234. u64 tagged_offset;
  3235. memset(&attr, 0, sizeof(attr));
  3236. ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
  3237. if (!ibqp)
  3238. return -EINVAL;
  3239. iwqp = to_iwqp(ibqp);
  3240. iwdev = iwqp->iwdev;
  3241. dev = &iwdev->sc_dev;
  3242. cm_node = (struct i40iw_cm_node *)cm_id->provider_data;
  3243. if (((struct sockaddr_in *)&cm_id->local_addr)->sin_family == AF_INET) {
  3244. cm_node->ipv4 = true;
  3245. cm_node->vlan_id = i40iw_get_vlan_ipv4(cm_node->loc_addr);
  3246. } else {
  3247. cm_node->ipv4 = false;
  3248. i40iw_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id, NULL);
  3249. }
  3250. i40iw_debug(cm_node->dev,
  3251. I40IW_DEBUG_CM,
  3252. "Accept vlan_id=%d\n",
  3253. cm_node->vlan_id);
  3254. if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
  3255. if (cm_node->loopbackpartner)
  3256. i40iw_rem_ref_cm_node(cm_node->loopbackpartner);
  3257. i40iw_rem_ref_cm_node(cm_node);
  3258. return -EINVAL;
  3259. }
  3260. passive_state = atomic_add_return(1, &cm_node->passive_state);
  3261. if (passive_state == I40IW_SEND_RESET_EVENT) {
  3262. i40iw_rem_ref_cm_node(cm_node);
  3263. return -ECONNRESET;
  3264. }
  3265. cm_node->cm_core->stats_accepts++;
  3266. iwqp->cm_node = (void *)cm_node;
  3267. cm_node->iwqp = iwqp;
  3268. buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE;
  3269. status = i40iw_allocate_dma_mem(dev->hw, &iwqp->ietf_mem, buf_len, 1);
  3270. if (status)
  3271. return -ENOMEM;
  3272. cm_node->pdata.size = conn_param->private_data_len;
  3273. accept.addr = iwqp->ietf_mem.va;
  3274. accept.size = i40iw_cm_build_mpa_frame(cm_node, &accept, MPA_KEY_REPLY);
  3275. memcpy(accept.addr + accept.size, conn_param->private_data,
  3276. conn_param->private_data_len);
  3277. /* setup our first outgoing iWarp send WQE (the IETF frame response) */
  3278. if ((cm_node->ipv4 &&
  3279. !i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
  3280. (!cm_node->ipv4 &&
  3281. !i40iw_ipv6_is_loopback(cm_node->loc_addr, cm_node->rem_addr))) {
  3282. iwpd = iwqp->iwpd;
  3283. tagged_offset = (uintptr_t)iwqp->ietf_mem.va;
  3284. ibmr = i40iw_reg_phys_mr(&iwpd->ibpd,
  3285. iwqp->ietf_mem.pa,
  3286. buf_len,
  3287. IB_ACCESS_LOCAL_WRITE,
  3288. &tagged_offset);
  3289. if (IS_ERR(ibmr)) {
  3290. i40iw_free_dma_mem(dev->hw, &iwqp->ietf_mem);
  3291. return -ENOMEM;
  3292. }
  3293. ibmr->pd = &iwpd->ibpd;
  3294. ibmr->device = iwpd->ibpd.device;
  3295. iwqp->lsmm_mr = ibmr;
  3296. if (iwqp->page)
  3297. iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
  3298. dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp,
  3299. iwqp->ietf_mem.va,
  3300. (accept.size + conn_param->private_data_len),
  3301. ibmr->lkey);
  3302. } else {
  3303. if (iwqp->page)
  3304. iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
  3305. dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp, NULL, 0, 0);
  3306. }
  3307. if (iwqp->page)
  3308. kunmap(iwqp->page);
  3309. iwqp->cm_id = cm_id;
  3310. cm_node->cm_id = cm_id;
  3311. cm_id->provider_data = (void *)iwqp;
  3312. iwqp->active_conn = 0;
  3313. cm_node->lsmm_size = accept.size + conn_param->private_data_len;
  3314. i40iw_cm_init_tsa_conn(iwqp, cm_node);
  3315. cm_id->add_ref(cm_id);
  3316. i40iw_add_ref(&iwqp->ibqp);
  3317. i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
  3318. attr.qp_state = IB_QPS_RTS;
  3319. cm_node->qhash_set = false;
  3320. i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
  3321. if (cm_node->loopbackpartner) {
  3322. cm_node->loopbackpartner->pdata.size = conn_param->private_data_len;
  3323. /* copy entire MPA frame to our cm_node's frame */
  3324. memcpy(cm_node->loopbackpartner->pdata_buf,
  3325. conn_param->private_data,
  3326. conn_param->private_data_len);
  3327. i40iw_create_event(cm_node->loopbackpartner, I40IW_CM_EVENT_CONNECTED);
  3328. }
  3329. cm_node->accelerated = 1;
  3330. if (cm_node->accept_pend) {
  3331. if (!cm_node->listener)
  3332. i40iw_pr_err("cm_node->listener NULL for passive node\n");
  3333. atomic_dec(&cm_node->listener->pend_accepts_cnt);
  3334. cm_node->accept_pend = 0;
  3335. }
  3336. return 0;
  3337. }
  3338. /**
  3339. * i40iw_reject - registered call for connection to be rejected
  3340. * @cm_id: cm information for passive connection
  3341. * @pdata: private data to be sent
  3342. * @pdata_len: private data length
  3343. */
  3344. int i40iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
  3345. {
  3346. struct i40iw_device *iwdev;
  3347. struct i40iw_cm_node *cm_node;
  3348. struct i40iw_cm_node *loopback;
  3349. cm_node = (struct i40iw_cm_node *)cm_id->provider_data;
  3350. loopback = cm_node->loopbackpartner;
  3351. cm_node->cm_id = cm_id;
  3352. cm_node->pdata.size = pdata_len;
  3353. iwdev = to_iwdev(cm_id->device);
  3354. if (!iwdev)
  3355. return -EINVAL;
  3356. cm_node->cm_core->stats_rejects++;
  3357. if (pdata_len + sizeof(struct ietf_mpa_v2) > MAX_CM_BUFFER)
  3358. return -EINVAL;
  3359. if (loopback) {
  3360. memcpy(&loopback->pdata_buf, pdata, pdata_len);
  3361. loopback->pdata.size = pdata_len;
  3362. }
  3363. return i40iw_cm_reject(cm_node, pdata, pdata_len);
  3364. }
  3365. /**
  3366. * i40iw_connect - registered call for connection to be established
  3367. * @cm_id: cm information for passive connection
  3368. * @conn_param: Information about the connection
  3369. */
  3370. int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
  3371. {
  3372. struct ib_qp *ibqp;
  3373. struct i40iw_qp *iwqp;
  3374. struct i40iw_device *iwdev;
  3375. struct i40iw_cm_node *cm_node;
  3376. struct i40iw_cm_info cm_info;
  3377. struct sockaddr_in *laddr;
  3378. struct sockaddr_in *raddr;
  3379. struct sockaddr_in6 *laddr6;
  3380. struct sockaddr_in6 *raddr6;
  3381. bool qhash_set = false;
  3382. int apbvt_set = 0;
  3383. int err = 0;
  3384. enum i40iw_status_code status;
  3385. ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
  3386. if (!ibqp)
  3387. return -EINVAL;
  3388. iwqp = to_iwqp(ibqp);
  3389. if (!iwqp)
  3390. return -EINVAL;
  3391. iwdev = to_iwdev(iwqp->ibqp.device);
  3392. if (!iwdev)
  3393. return -EINVAL;
  3394. laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
  3395. raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
  3396. laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
  3397. raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
  3398. if (!(laddr->sin_port) || !(raddr->sin_port))
  3399. return -EINVAL;
  3400. iwqp->active_conn = 1;
  3401. iwqp->cm_id = NULL;
  3402. cm_id->provider_data = iwqp;
  3403. /* set up the connection params for the node */
  3404. if (cm_id->remote_addr.ss_family == AF_INET) {
  3405. cm_info.ipv4 = true;
  3406. memset(cm_info.loc_addr, 0, sizeof(cm_info.loc_addr));
  3407. memset(cm_info.rem_addr, 0, sizeof(cm_info.rem_addr));
  3408. cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
  3409. cm_info.rem_addr[0] = ntohl(raddr->sin_addr.s_addr);
  3410. cm_info.loc_port = ntohs(laddr->sin_port);
  3411. cm_info.rem_port = ntohs(raddr->sin_port);
  3412. cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr);
  3413. } else {
  3414. cm_info.ipv4 = false;
  3415. i40iw_copy_ip_ntohl(cm_info.loc_addr,
  3416. laddr6->sin6_addr.in6_u.u6_addr32);
  3417. i40iw_copy_ip_ntohl(cm_info.rem_addr,
  3418. raddr6->sin6_addr.in6_u.u6_addr32);
  3419. cm_info.loc_port = ntohs(laddr6->sin6_port);
  3420. cm_info.rem_port = ntohs(raddr6->sin6_port);
  3421. i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id, NULL);
  3422. }
  3423. cm_info.cm_id = cm_id;
  3424. cm_info.tos = cm_id->tos;
  3425. cm_info.user_pri = rt_tos2priority(cm_id->tos);
  3426. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n",
  3427. __func__, cm_id->tos, cm_info.user_pri);
  3428. if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
  3429. (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
  3430. raddr6->sin6_addr.in6_u.u6_addr32,
  3431. sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {
  3432. status = i40iw_manage_qhash(iwdev,
  3433. &cm_info,
  3434. I40IW_QHASH_TYPE_TCP_ESTABLISHED,
  3435. I40IW_QHASH_MANAGE_TYPE_ADD,
  3436. NULL,
  3437. true);
  3438. if (status)
  3439. return -EINVAL;
  3440. qhash_set = true;
  3441. }
  3442. status = i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD);
  3443. if (status) {
  3444. i40iw_manage_qhash(iwdev,
  3445. &cm_info,
  3446. I40IW_QHASH_TYPE_TCP_ESTABLISHED,
  3447. I40IW_QHASH_MANAGE_TYPE_DELETE,
  3448. NULL,
  3449. false);
  3450. return -EINVAL;
  3451. }
  3452. apbvt_set = 1;
  3453. cm_id->add_ref(cm_id);
  3454. cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev,
  3455. conn_param->private_data_len,
  3456. (void *)conn_param->private_data,
  3457. &cm_info);
  3458. if (IS_ERR(cm_node)) {
  3459. err = PTR_ERR(cm_node);
  3460. goto err_out;
  3461. }
  3462. i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
  3463. if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
  3464. !cm_node->ord_size)
  3465. cm_node->ord_size = 1;
  3466. cm_node->apbvt_set = apbvt_set;
  3467. cm_node->qhash_set = qhash_set;
  3468. iwqp->cm_node = cm_node;
  3469. cm_node->iwqp = iwqp;
  3470. iwqp->cm_id = cm_id;
  3471. i40iw_add_ref(&iwqp->ibqp);
  3472. if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
  3473. cm_node->state = I40IW_CM_STATE_SYN_SENT;
  3474. err = i40iw_send_syn(cm_node, 0);
  3475. if (err) {
  3476. i40iw_rem_ref_cm_node(cm_node);
  3477. goto err_out;
  3478. }
  3479. }
  3480. i40iw_debug(cm_node->dev,
  3481. I40IW_DEBUG_CM,
  3482. "Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\n",
  3483. cm_node->rem_port,
  3484. cm_node,
  3485. cm_node->cm_id);
  3486. return 0;
  3487. err_out:
  3488. if (cm_info.ipv4)
  3489. i40iw_debug(&iwdev->sc_dev,
  3490. I40IW_DEBUG_CM,
  3491. "Api - connect() FAILED: dest addr=%pI4",
  3492. cm_info.rem_addr);
  3493. else
  3494. i40iw_debug(&iwdev->sc_dev,
  3495. I40IW_DEBUG_CM,
  3496. "Api - connect() FAILED: dest addr=%pI6",
  3497. cm_info.rem_addr);
  3498. if (qhash_set)
  3499. i40iw_manage_qhash(iwdev,
  3500. &cm_info,
  3501. I40IW_QHASH_TYPE_TCP_ESTABLISHED,
  3502. I40IW_QHASH_MANAGE_TYPE_DELETE,
  3503. NULL,
  3504. false);
  3505. if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core,
  3506. cm_info.loc_port))
  3507. i40iw_manage_apbvt(iwdev,
  3508. cm_info.loc_port,
  3509. I40IW_MANAGE_APBVT_DEL);
  3510. cm_id->rem_ref(cm_id);
  3511. iwdev->cm_core.stats_connect_errs++;
  3512. return err;
  3513. }
  3514. /**
  3515. * i40iw_create_listen - registered call creating listener
  3516. * @cm_id: cm information for passive connection
  3517. * @backlog: to max accept pending count
  3518. */
  3519. int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog)
  3520. {
  3521. struct i40iw_device *iwdev;
  3522. struct i40iw_cm_listener *cm_listen_node;
  3523. struct i40iw_cm_info cm_info;
  3524. enum i40iw_status_code ret;
  3525. struct sockaddr_in *laddr;
  3526. struct sockaddr_in6 *laddr6;
  3527. bool wildcard = false;
  3528. iwdev = to_iwdev(cm_id->device);
  3529. if (!iwdev)
  3530. return -EINVAL;
  3531. laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
  3532. laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
  3533. memset(&cm_info, 0, sizeof(cm_info));
  3534. if (laddr->sin_family == AF_INET) {
  3535. cm_info.ipv4 = true;
  3536. cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
  3537. cm_info.loc_port = ntohs(laddr->sin_port);
  3538. if (laddr->sin_addr.s_addr != INADDR_ANY)
  3539. cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr);
  3540. else
  3541. wildcard = true;
  3542. } else {
  3543. cm_info.ipv4 = false;
  3544. i40iw_copy_ip_ntohl(cm_info.loc_addr,
  3545. laddr6->sin6_addr.in6_u.u6_addr32);
  3546. cm_info.loc_port = ntohs(laddr6->sin6_port);
  3547. if (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY)
  3548. i40iw_netdev_vlan_ipv6(cm_info.loc_addr,
  3549. &cm_info.vlan_id,
  3550. NULL);
  3551. else
  3552. wildcard = true;
  3553. }
  3554. cm_info.backlog = backlog;
  3555. cm_info.cm_id = cm_id;
  3556. cm_listen_node = i40iw_make_listen_node(&iwdev->cm_core, iwdev, &cm_info);
  3557. if (!cm_listen_node) {
  3558. i40iw_pr_err("cm_listen_node == NULL\n");
  3559. return -ENOMEM;
  3560. }
  3561. cm_id->provider_data = cm_listen_node;
  3562. cm_listen_node->tos = cm_id->tos;
  3563. cm_listen_node->user_pri = rt_tos2priority(cm_id->tos);
  3564. cm_info.user_pri = cm_listen_node->user_pri;
  3565. if (!cm_listen_node->reused_node) {
  3566. if (wildcard) {
  3567. if (cm_info.ipv4)
  3568. ret = i40iw_add_mqh_4(iwdev,
  3569. &cm_info,
  3570. cm_listen_node);
  3571. else
  3572. ret = i40iw_add_mqh_6(iwdev,
  3573. &cm_info,
  3574. cm_listen_node);
  3575. if (ret)
  3576. goto error;
  3577. ret = i40iw_manage_apbvt(iwdev,
  3578. cm_info.loc_port,
  3579. I40IW_MANAGE_APBVT_ADD);
  3580. if (ret)
  3581. goto error;
  3582. } else {
  3583. ret = i40iw_manage_qhash(iwdev,
  3584. &cm_info,
  3585. I40IW_QHASH_TYPE_TCP_SYN,
  3586. I40IW_QHASH_MANAGE_TYPE_ADD,
  3587. NULL,
  3588. true);
  3589. if (ret)
  3590. goto error;
  3591. cm_listen_node->qhash_set = true;
  3592. ret = i40iw_manage_apbvt(iwdev,
  3593. cm_info.loc_port,
  3594. I40IW_MANAGE_APBVT_ADD);
  3595. if (ret)
  3596. goto error;
  3597. }
  3598. }
  3599. cm_id->add_ref(cm_id);
  3600. cm_listen_node->cm_core->stats_listen_created++;
  3601. return 0;
  3602. error:
  3603. i40iw_cm_del_listen(&iwdev->cm_core, (void *)cm_listen_node, false);
  3604. return -EINVAL;
  3605. }
  3606. /**
  3607. * i40iw_destroy_listen - registered call to destroy listener
  3608. * @cm_id: cm information for passive connection
  3609. */
  3610. int i40iw_destroy_listen(struct iw_cm_id *cm_id)
  3611. {
  3612. struct i40iw_device *iwdev;
  3613. iwdev = to_iwdev(cm_id->device);
  3614. if (cm_id->provider_data)
  3615. i40iw_cm_del_listen(&iwdev->cm_core, cm_id->provider_data, true);
  3616. else
  3617. i40iw_pr_err("cm_id->provider_data was NULL\n");
  3618. cm_id->rem_ref(cm_id);
  3619. return 0;
  3620. }
  3621. /**
  3622. * i40iw_cm_event_connected - handle connected active node
  3623. * @event: the info for cm_node of connection
  3624. */
  3625. static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
  3626. {
  3627. struct i40iw_qp *iwqp;
  3628. struct i40iw_device *iwdev;
  3629. struct i40iw_cm_node *cm_node;
  3630. struct i40iw_sc_dev *dev;
  3631. struct ib_qp_attr attr;
  3632. struct iw_cm_id *cm_id;
  3633. int status;
  3634. bool read0;
  3635. cm_node = event->cm_node;
  3636. cm_id = cm_node->cm_id;
  3637. iwqp = (struct i40iw_qp *)cm_id->provider_data;
  3638. iwdev = to_iwdev(iwqp->ibqp.device);
  3639. dev = &iwdev->sc_dev;
  3640. if (iwqp->destroyed) {
  3641. status = -ETIMEDOUT;
  3642. goto error;
  3643. }
  3644. i40iw_cm_init_tsa_conn(iwqp, cm_node);
  3645. read0 = (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO);
  3646. if (iwqp->page)
  3647. iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
  3648. dev->iw_priv_qp_ops->qp_send_rtt(&iwqp->sc_qp, read0);
  3649. if (iwqp->page)
  3650. kunmap(iwqp->page);
  3651. status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, 0);
  3652. if (status)
  3653. i40iw_pr_err("send cm event\n");
  3654. memset(&attr, 0, sizeof(attr));
  3655. attr.qp_state = IB_QPS_RTS;
  3656. cm_node->qhash_set = false;
  3657. i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
  3658. cm_node->accelerated = 1;
  3659. if (cm_node->accept_pend) {
  3660. if (!cm_node->listener)
  3661. i40iw_pr_err("listener is null for passive node\n");
  3662. atomic_dec(&cm_node->listener->pend_accepts_cnt);
  3663. cm_node->accept_pend = 0;
  3664. }
  3665. return;
  3666. error:
  3667. iwqp->cm_id = NULL;
  3668. cm_id->provider_data = NULL;
  3669. i40iw_send_cm_event(event->cm_node,
  3670. cm_id,
  3671. IW_CM_EVENT_CONNECT_REPLY,
  3672. status);
  3673. cm_id->rem_ref(cm_id);
  3674. i40iw_rem_ref_cm_node(event->cm_node);
  3675. }
  3676. /**
  3677. * i40iw_cm_event_reset - handle reset
  3678. * @event: the info for cm_node of connection
  3679. */
  3680. static void i40iw_cm_event_reset(struct i40iw_cm_event *event)
  3681. {
  3682. struct i40iw_cm_node *cm_node = event->cm_node;
  3683. struct iw_cm_id *cm_id = cm_node->cm_id;
  3684. struct i40iw_qp *iwqp;
  3685. if (!cm_id)
  3686. return;
  3687. iwqp = cm_id->provider_data;
  3688. if (!iwqp)
  3689. return;
  3690. i40iw_debug(cm_node->dev,
  3691. I40IW_DEBUG_CM,
  3692. "reset event %p - cm_id = %p\n",
  3693. event->cm_node, cm_id);
  3694. iwqp->cm_id = NULL;
  3695. i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_DISCONNECT, -ECONNRESET);
  3696. i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CLOSE, 0);
  3697. }
  3698. /**
  3699. * i40iw_cm_event_handler - worker thread callback to send event to cm upper layer
  3700. * @work: pointer of cm event info.
  3701. */
  3702. static void i40iw_cm_event_handler(struct work_struct *work)
  3703. {
  3704. struct i40iw_cm_event *event = container_of(work,
  3705. struct i40iw_cm_event,
  3706. event_work);
  3707. struct i40iw_cm_node *cm_node;
  3708. if (!event || !event->cm_node || !event->cm_node->cm_core)
  3709. return;
  3710. cm_node = event->cm_node;
  3711. switch (event->type) {
  3712. case I40IW_CM_EVENT_MPA_REQ:
  3713. i40iw_send_cm_event(cm_node,
  3714. cm_node->cm_id,
  3715. IW_CM_EVENT_CONNECT_REQUEST,
  3716. 0);
  3717. break;
  3718. case I40IW_CM_EVENT_RESET:
  3719. i40iw_cm_event_reset(event);
  3720. break;
  3721. case I40IW_CM_EVENT_CONNECTED:
  3722. if (!event->cm_node->cm_id ||
  3723. (event->cm_node->state != I40IW_CM_STATE_OFFLOADED))
  3724. break;
  3725. i40iw_cm_event_connected(event);
  3726. break;
  3727. case I40IW_CM_EVENT_MPA_REJECT:
  3728. if (!event->cm_node->cm_id ||
  3729. (cm_node->state == I40IW_CM_STATE_OFFLOADED))
  3730. break;
  3731. i40iw_send_cm_event(cm_node,
  3732. cm_node->cm_id,
  3733. IW_CM_EVENT_CONNECT_REPLY,
  3734. -ECONNREFUSED);
  3735. break;
  3736. case I40IW_CM_EVENT_ABORTED:
  3737. if (!event->cm_node->cm_id ||
  3738. (event->cm_node->state == I40IW_CM_STATE_OFFLOADED))
  3739. break;
  3740. i40iw_event_connect_error(event);
  3741. break;
  3742. default:
  3743. i40iw_pr_err("event type = %d\n", event->type);
  3744. break;
  3745. }
  3746. event->cm_info.cm_id->rem_ref(event->cm_info.cm_id);
  3747. i40iw_rem_ref_cm_node(event->cm_node);
  3748. kfree(event);
  3749. }
  3750. /**
  3751. * i40iw_cm_post_event - queue event request for worker thread
  3752. * @event: cm node's info for up event call
  3753. */
  3754. static void i40iw_cm_post_event(struct i40iw_cm_event *event)
  3755. {
  3756. atomic_inc(&event->cm_node->ref_count);
  3757. event->cm_info.cm_id->add_ref(event->cm_info.cm_id);
  3758. INIT_WORK(&event->event_work, i40iw_cm_event_handler);
  3759. queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
  3760. }
  3761. /**
  3762. * i40iw_qhash_ctrl - enable/disable qhash for list
  3763. * @iwdev: device pointer
  3764. * @parent_listen_node: parent listen node
  3765. * @nfo: cm info node
  3766. * @ipaddr: Pointer to IPv4 or IPv6 address
  3767. * @ipv4: flag indicating IPv4 when true
  3768. * @ifup: flag indicating interface up when true
  3769. *
  3770. * Enables or disables the qhash for the node in the child
  3771. * listen list that matches ipaddr. If no matching IP was found
  3772. * it will allocate and add a new child listen node to the
  3773. * parent listen node. The listen_list_lock is assumed to be
  3774. * held when called.
  3775. */
  3776. static void i40iw_qhash_ctrl(struct i40iw_device *iwdev,
  3777. struct i40iw_cm_listener *parent_listen_node,
  3778. struct i40iw_cm_info *nfo,
  3779. u32 *ipaddr, bool ipv4, bool ifup)
  3780. {
  3781. struct list_head *child_listen_list = &parent_listen_node->child_listen_list;
  3782. struct i40iw_cm_listener *child_listen_node;
  3783. struct list_head *pos, *tpos;
  3784. enum i40iw_status_code ret;
  3785. bool node_allocated = false;
  3786. enum i40iw_quad_hash_manage_type op =
  3787. ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
  3788. list_for_each_safe(pos, tpos, child_listen_list) {
  3789. child_listen_node =
  3790. list_entry(pos,
  3791. struct i40iw_cm_listener,
  3792. child_listen_list);
  3793. if (!memcmp(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16))
  3794. goto set_qhash;
  3795. }
  3796. /* if not found then add a child listener if interface is going up */
  3797. if (!ifup)
  3798. return;
  3799. child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
  3800. if (!child_listen_node)
  3801. return;
  3802. node_allocated = true;
  3803. memcpy(child_listen_node, parent_listen_node, sizeof(*child_listen_node));
  3804. memcpy(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16);
  3805. set_qhash:
  3806. memcpy(nfo->loc_addr,
  3807. child_listen_node->loc_addr,
  3808. sizeof(nfo->loc_addr));
  3809. nfo->vlan_id = child_listen_node->vlan_id;
  3810. ret = i40iw_manage_qhash(iwdev, nfo,
  3811. I40IW_QHASH_TYPE_TCP_SYN,
  3812. op,
  3813. NULL, false);
  3814. if (!ret) {
  3815. child_listen_node->qhash_set = ifup;
  3816. if (node_allocated)
  3817. list_add(&child_listen_node->child_listen_list,
  3818. &parent_listen_node->child_listen_list);
  3819. } else if (node_allocated) {
  3820. kfree(child_listen_node);
  3821. }
  3822. }
  3823. /**
  3824. * i40iw_cm_disconnect_all - disconnect all connected qp's
  3825. * @iwdev: device pointer
  3826. */
  3827. void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
  3828. {
  3829. struct i40iw_cm_core *cm_core = &iwdev->cm_core;
  3830. struct list_head *list_core_temp;
  3831. struct list_head *list_node;
  3832. struct i40iw_cm_node *cm_node;
  3833. unsigned long flags;
  3834. struct list_head connected_list;
  3835. struct ib_qp_attr attr;
  3836. INIT_LIST_HEAD(&connected_list);
  3837. spin_lock_irqsave(&cm_core->ht_lock, flags);
  3838. list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
  3839. cm_node = container_of(list_node, struct i40iw_cm_node, list);
  3840. atomic_inc(&cm_node->ref_count);
  3841. list_add(&cm_node->connected_entry, &connected_list);
  3842. }
  3843. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  3844. list_for_each_safe(list_node, list_core_temp, &connected_list) {
  3845. cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry);
  3846. attr.qp_state = IB_QPS_ERR;
  3847. i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
  3848. i40iw_rem_ref_cm_node(cm_node);
  3849. }
  3850. }
  3851. /**
  3852. * i40iw_ifdown_notify - process an ifdown on an interface
  3853. * @iwdev: device pointer
  3854. * @ipaddr: Pointer to IPv4 or IPv6 address
  3855. * @ipv4: flag indicating IPv4 when true
  3856. * @ifup: flag indicating interface up when true
  3857. */
  3858. void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
  3859. u32 *ipaddr, bool ipv4, bool ifup)
  3860. {
  3861. struct i40iw_cm_core *cm_core = &iwdev->cm_core;
  3862. unsigned long flags;
  3863. struct i40iw_cm_listener *listen_node;
  3864. static const u32 ip_zero[4] = { 0, 0, 0, 0 };
  3865. struct i40iw_cm_info nfo;
  3866. u16 vlan_id = rdma_vlan_dev_vlan_id(netdev);
  3867. enum i40iw_status_code ret;
  3868. enum i40iw_quad_hash_manage_type op =
  3869. ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
  3870. /* Disable or enable qhash for listeners */
  3871. spin_lock_irqsave(&cm_core->listen_list_lock, flags);
  3872. list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
  3873. if (vlan_id == listen_node->vlan_id &&
  3874. (!memcmp(listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16) ||
  3875. !memcmp(listen_node->loc_addr, ip_zero, ipv4 ? 4 : 16))) {
  3876. memcpy(nfo.loc_addr, listen_node->loc_addr,
  3877. sizeof(nfo.loc_addr));
  3878. nfo.loc_port = listen_node->loc_port;
  3879. nfo.ipv4 = listen_node->ipv4;
  3880. nfo.vlan_id = listen_node->vlan_id;
  3881. nfo.user_pri = listen_node->user_pri;
  3882. if (!list_empty(&listen_node->child_listen_list)) {
  3883. i40iw_qhash_ctrl(iwdev,
  3884. listen_node,
  3885. &nfo,
  3886. ipaddr, ipv4, ifup);
  3887. } else if (memcmp(listen_node->loc_addr, ip_zero,
  3888. ipv4 ? 4 : 16)) {
  3889. ret = i40iw_manage_qhash(iwdev,
  3890. &nfo,
  3891. I40IW_QHASH_TYPE_TCP_SYN,
  3892. op,
  3893. NULL,
  3894. false);
  3895. if (!ret)
  3896. listen_node->qhash_set = ifup;
  3897. }
  3898. }
  3899. }
  3900. spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
  3901. /* disconnect any connected qp's on ifdown */
  3902. if (!ifup)
  3903. i40iw_cm_disconnect_all(iwdev);
  3904. }