i40iw_cm.c 121 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403
  1. /*******************************************************************************
  2. *
  3. * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenFabrics.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. *
  33. *******************************************************************************/
  34. #include <linux/atomic.h>
  35. #include <linux/ip.h>
  36. #include <linux/tcp.h>
  37. #include <linux/init.h>
  38. #include <linux/if_arp.h>
  39. #include <linux/if_vlan.h>
  40. #include <linux/notifier.h>
  41. #include <linux/net.h>
  42. #include <linux/types.h>
  43. #include <linux/timer.h>
  44. #include <linux/time.h>
  45. #include <linux/delay.h>
  46. #include <linux/etherdevice.h>
  47. #include <linux/netdevice.h>
  48. #include <linux/random.h>
  49. #include <linux/list.h>
  50. #include <linux/threads.h>
  51. #include <linux/highmem.h>
  52. #include <net/arp.h>
  53. #include <net/ndisc.h>
  54. #include <net/neighbour.h>
  55. #include <net/route.h>
  56. #include <net/addrconf.h>
  57. #include <net/ip6_route.h>
  58. #include <net/ip_fib.h>
  59. #include <net/tcp.h>
  60. #include <asm/checksum.h>
  61. #include "i40iw.h"
  62. static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *);
  63. static void i40iw_cm_post_event(struct i40iw_cm_event *event);
  64. static void i40iw_disconnect_worker(struct work_struct *work);
  65. /**
  66. * i40iw_free_sqbuf - put back puda buffer if refcount = 0
  67. * @vsi: pointer to vsi structure
  68. * @buf: puda buffer to free
  69. */
  70. void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp)
  71. {
  72. struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)bufp;
  73. struct i40iw_puda_rsrc *ilq = vsi->ilq;
  74. if (!atomic_dec_return(&buf->refcount))
  75. i40iw_puda_ret_bufpool(ilq, buf);
  76. }
  77. /**
  78. * i40iw_derive_hw_ird_setting - Calculate IRD
  79. *
  80. * @cm_ird: IRD of connection's node
  81. *
  82. * The ird from the connection is rounded to a supported HW
  83. * setting (2,8,32,64) and then encoded for ird_size field of
  84. * qp_ctx
  85. */
  86. static u8 i40iw_derive_hw_ird_setting(u16 cm_ird)
  87. {
  88. u8 encoded_ird_size;
  89. /* ird_size field is encoded in qp_ctx */
  90. switch (cm_ird ? roundup_pow_of_two(cm_ird) : 0) {
  91. case I40IW_HW_IRD_SETTING_64:
  92. encoded_ird_size = 3;
  93. break;
  94. case I40IW_HW_IRD_SETTING_32:
  95. case I40IW_HW_IRD_SETTING_16:
  96. encoded_ird_size = 2;
  97. break;
  98. case I40IW_HW_IRD_SETTING_8:
  99. case I40IW_HW_IRD_SETTING_4:
  100. encoded_ird_size = 1;
  101. break;
  102. case I40IW_HW_IRD_SETTING_2:
  103. default:
  104. encoded_ird_size = 0;
  105. break;
  106. }
  107. return encoded_ird_size;
  108. }
  109. /**
  110. * i40iw_record_ird_ord - Record IRD/ORD passed in
  111. * @cm_node: connection's node
  112. * @conn_ird: connection IRD
  113. * @conn_ord: connection ORD
  114. */
  115. static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u32 conn_ird,
  116. u32 conn_ord)
  117. {
  118. if (conn_ird > I40IW_MAX_IRD_SIZE)
  119. conn_ird = I40IW_MAX_IRD_SIZE;
  120. if (conn_ord > I40IW_MAX_ORD_SIZE)
  121. conn_ord = I40IW_MAX_ORD_SIZE;
  122. else if (!conn_ord && cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO)
  123. conn_ord = 1;
  124. cm_node->ird_size = conn_ird;
  125. cm_node->ord_size = conn_ord;
  126. }
  127. /**
  128. * i40iw_copy_ip_ntohl - change network to host ip
  129. * @dst: host ip
  130. * @src: big endian
  131. */
  132. void i40iw_copy_ip_ntohl(u32 *dst, __be32 *src)
  133. {
  134. *dst++ = ntohl(*src++);
  135. *dst++ = ntohl(*src++);
  136. *dst++ = ntohl(*src++);
  137. *dst = ntohl(*src);
  138. }
  139. /**
  140. * i40iw_copy_ip_htonl - change host addr to network ip
  141. * @dst: host ip
  142. * @src: little endian
  143. */
  144. static inline void i40iw_copy_ip_htonl(__be32 *dst, u32 *src)
  145. {
  146. *dst++ = htonl(*src++);
  147. *dst++ = htonl(*src++);
  148. *dst++ = htonl(*src++);
  149. *dst = htonl(*src);
  150. }
  151. /**
  152. * i40iw_fill_sockaddr4 - get addr info for passive connection
  153. * @cm_node: connection's node
  154. * @event: upper layer's cm event
  155. */
  156. static inline void i40iw_fill_sockaddr4(struct i40iw_cm_node *cm_node,
  157. struct iw_cm_event *event)
  158. {
  159. struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;
  160. struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;
  161. laddr->sin_family = AF_INET;
  162. raddr->sin_family = AF_INET;
  163. laddr->sin_port = htons(cm_node->loc_port);
  164. raddr->sin_port = htons(cm_node->rem_port);
  165. laddr->sin_addr.s_addr = htonl(cm_node->loc_addr[0]);
  166. raddr->sin_addr.s_addr = htonl(cm_node->rem_addr[0]);
  167. }
  168. /**
  169. * i40iw_fill_sockaddr6 - get ipv6 addr info for passive side
  170. * @cm_node: connection's node
  171. * @event: upper layer's cm event
  172. */
  173. static inline void i40iw_fill_sockaddr6(struct i40iw_cm_node *cm_node,
  174. struct iw_cm_event *event)
  175. {
  176. struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;
  177. struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)&event->remote_addr;
  178. laddr6->sin6_family = AF_INET6;
  179. raddr6->sin6_family = AF_INET6;
  180. laddr6->sin6_port = htons(cm_node->loc_port);
  181. raddr6->sin6_port = htons(cm_node->rem_port);
  182. i40iw_copy_ip_htonl(laddr6->sin6_addr.in6_u.u6_addr32,
  183. cm_node->loc_addr);
  184. i40iw_copy_ip_htonl(raddr6->sin6_addr.in6_u.u6_addr32,
  185. cm_node->rem_addr);
  186. }
  187. /**
  188. * i40iw_get_addr_info
  189. * @cm_node: contains ip/tcp info
  190. * @cm_info: to get a copy of the cm_node ip/tcp info
  191. */
  192. static void i40iw_get_addr_info(struct i40iw_cm_node *cm_node,
  193. struct i40iw_cm_info *cm_info)
  194. {
  195. cm_info->ipv4 = cm_node->ipv4;
  196. cm_info->vlan_id = cm_node->vlan_id;
  197. memcpy(cm_info->loc_addr, cm_node->loc_addr, sizeof(cm_info->loc_addr));
  198. memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr));
  199. cm_info->loc_port = cm_node->loc_port;
  200. cm_info->rem_port = cm_node->rem_port;
  201. cm_info->user_pri = cm_node->user_pri;
  202. }
  203. /**
  204. * i40iw_get_cmevent_info - for cm event upcall
  205. * @cm_node: connection's node
  206. * @cm_id: upper layers cm struct for the event
  207. * @event: upper layer's cm event
  208. */
  209. static inline void i40iw_get_cmevent_info(struct i40iw_cm_node *cm_node,
  210. struct iw_cm_id *cm_id,
  211. struct iw_cm_event *event)
  212. {
  213. memcpy(&event->local_addr, &cm_id->m_local_addr,
  214. sizeof(event->local_addr));
  215. memcpy(&event->remote_addr, &cm_id->m_remote_addr,
  216. sizeof(event->remote_addr));
  217. if (cm_node) {
  218. event->private_data = (void *)cm_node->pdata_buf;
  219. event->private_data_len = (u8)cm_node->pdata.size;
  220. event->ird = cm_node->ird_size;
  221. event->ord = cm_node->ord_size;
  222. }
  223. }
  224. /**
  225. * i40iw_send_cm_event - upcall cm's event handler
  226. * @cm_node: connection's node
  227. * @cm_id: upper layer's cm info struct
  228. * @type: Event type to indicate
  229. * @status: status for the event type
  230. */
  231. static int i40iw_send_cm_event(struct i40iw_cm_node *cm_node,
  232. struct iw_cm_id *cm_id,
  233. enum iw_cm_event_type type,
  234. int status)
  235. {
  236. struct iw_cm_event event;
  237. memset(&event, 0, sizeof(event));
  238. event.event = type;
  239. event.status = status;
  240. switch (type) {
  241. case IW_CM_EVENT_CONNECT_REQUEST:
  242. if (cm_node->ipv4)
  243. i40iw_fill_sockaddr4(cm_node, &event);
  244. else
  245. i40iw_fill_sockaddr6(cm_node, &event);
  246. event.provider_data = (void *)cm_node;
  247. event.private_data = (void *)cm_node->pdata_buf;
  248. event.private_data_len = (u8)cm_node->pdata.size;
  249. event.ird = cm_node->ird_size;
  250. break;
  251. case IW_CM_EVENT_CONNECT_REPLY:
  252. i40iw_get_cmevent_info(cm_node, cm_id, &event);
  253. break;
  254. case IW_CM_EVENT_ESTABLISHED:
  255. event.ird = cm_node->ird_size;
  256. event.ord = cm_node->ord_size;
  257. break;
  258. case IW_CM_EVENT_DISCONNECT:
  259. break;
  260. case IW_CM_EVENT_CLOSE:
  261. break;
  262. default:
  263. i40iw_pr_err("event type received type = %d\n", type);
  264. return -1;
  265. }
  266. return cm_id->event_handler(cm_id, &event);
  267. }
  268. /**
  269. * i40iw_create_event - create cm event
  270. * @cm_node: connection's node
  271. * @type: Event type to generate
  272. */
  273. static struct i40iw_cm_event *i40iw_create_event(struct i40iw_cm_node *cm_node,
  274. enum i40iw_cm_event_type type)
  275. {
  276. struct i40iw_cm_event *event;
  277. if (!cm_node->cm_id)
  278. return NULL;
  279. event = kzalloc(sizeof(*event), GFP_ATOMIC);
  280. if (!event)
  281. return NULL;
  282. event->type = type;
  283. event->cm_node = cm_node;
  284. memcpy(event->cm_info.rem_addr, cm_node->rem_addr, sizeof(event->cm_info.rem_addr));
  285. memcpy(event->cm_info.loc_addr, cm_node->loc_addr, sizeof(event->cm_info.loc_addr));
  286. event->cm_info.rem_port = cm_node->rem_port;
  287. event->cm_info.loc_port = cm_node->loc_port;
  288. event->cm_info.cm_id = cm_node->cm_id;
  289. i40iw_debug(cm_node->dev,
  290. I40IW_DEBUG_CM,
  291. "node=%p event=%p type=%u dst=%pI4 src=%pI4\n",
  292. cm_node,
  293. event,
  294. type,
  295. event->cm_info.loc_addr,
  296. event->cm_info.rem_addr);
  297. i40iw_cm_post_event(event);
  298. return event;
  299. }
  300. /**
  301. * i40iw_free_retrans_entry - free send entry
  302. * @cm_node: connection's node
  303. */
  304. static void i40iw_free_retrans_entry(struct i40iw_cm_node *cm_node)
  305. {
  306. struct i40iw_device *iwdev = cm_node->iwdev;
  307. struct i40iw_timer_entry *send_entry;
  308. send_entry = cm_node->send_entry;
  309. if (send_entry) {
  310. cm_node->send_entry = NULL;
  311. i40iw_free_sqbuf(&iwdev->vsi, (void *)send_entry->sqbuf);
  312. kfree(send_entry);
  313. atomic_dec(&cm_node->ref_count);
  314. }
  315. }
  316. /**
  317. * i40iw_cleanup_retrans_entry - free send entry with lock
  318. * @cm_node: connection's node
  319. */
  320. static void i40iw_cleanup_retrans_entry(struct i40iw_cm_node *cm_node)
  321. {
  322. unsigned long flags;
  323. spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
  324. i40iw_free_retrans_entry(cm_node);
  325. spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
  326. }
  327. /**
  328. * i40iw_form_cm_frame - get a free packet and build frame
  329. * @cm_node: connection's node ionfo to use in frame
  330. * @options: pointer to options info
  331. * @hdr: pointer mpa header
  332. * @pdata: pointer to private data
  333. * @flags: indicates FIN or ACK
  334. */
  335. static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
  336. struct i40iw_kmem_info *options,
  337. struct i40iw_kmem_info *hdr,
  338. struct i40iw_kmem_info *pdata,
  339. u8 flags)
  340. {
  341. struct i40iw_puda_buf *sqbuf;
  342. struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
  343. u8 *buf;
  344. struct tcphdr *tcph;
  345. struct iphdr *iph;
  346. struct ipv6hdr *ip6h;
  347. struct ethhdr *ethh;
  348. u16 packetsize;
  349. u16 eth_hlen = ETH_HLEN;
  350. u32 opts_len = 0;
  351. u32 pd_len = 0;
  352. u32 hdr_len = 0;
  353. u16 vtag;
  354. sqbuf = i40iw_puda_get_bufpool(vsi->ilq);
  355. if (!sqbuf)
  356. return NULL;
  357. buf = sqbuf->mem.va;
  358. if (options)
  359. opts_len = (u32)options->size;
  360. if (hdr)
  361. hdr_len = hdr->size;
  362. if (pdata)
  363. pd_len = pdata->size;
  364. if (cm_node->vlan_id < VLAN_TAG_PRESENT)
  365. eth_hlen += 4;
  366. if (cm_node->ipv4)
  367. packetsize = sizeof(*iph) + sizeof(*tcph);
  368. else
  369. packetsize = sizeof(*ip6h) + sizeof(*tcph);
  370. packetsize += opts_len + hdr_len + pd_len;
  371. memset(buf, 0x00, eth_hlen + packetsize);
  372. sqbuf->totallen = packetsize + eth_hlen;
  373. sqbuf->maclen = eth_hlen;
  374. sqbuf->tcphlen = sizeof(*tcph) + opts_len;
  375. sqbuf->scratch = (void *)cm_node;
  376. ethh = (struct ethhdr *)buf;
  377. buf += eth_hlen;
  378. if (cm_node->ipv4) {
  379. sqbuf->ipv4 = true;
  380. iph = (struct iphdr *)buf;
  381. buf += sizeof(*iph);
  382. tcph = (struct tcphdr *)buf;
  383. buf += sizeof(*tcph);
  384. ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
  385. ether_addr_copy(ethh->h_source, cm_node->loc_mac);
  386. if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
  387. ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
  388. vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
  389. ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
  390. ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IP);
  391. } else {
  392. ethh->h_proto = htons(ETH_P_IP);
  393. }
  394. iph->version = IPVERSION;
  395. iph->ihl = 5; /* 5 * 4Byte words, IP headr len */
  396. iph->tos = cm_node->tos;
  397. iph->tot_len = htons(packetsize);
  398. iph->id = htons(++cm_node->tcp_cntxt.loc_id);
  399. iph->frag_off = htons(0x4000);
  400. iph->ttl = 0x40;
  401. iph->protocol = IPPROTO_TCP;
  402. iph->saddr = htonl(cm_node->loc_addr[0]);
  403. iph->daddr = htonl(cm_node->rem_addr[0]);
  404. } else {
  405. sqbuf->ipv4 = false;
  406. ip6h = (struct ipv6hdr *)buf;
  407. buf += sizeof(*ip6h);
  408. tcph = (struct tcphdr *)buf;
  409. buf += sizeof(*tcph);
  410. ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
  411. ether_addr_copy(ethh->h_source, cm_node->loc_mac);
  412. if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
  413. ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
  414. vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
  415. ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
  416. ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IPV6);
  417. } else {
  418. ethh->h_proto = htons(ETH_P_IPV6);
  419. }
  420. ip6h->version = 6;
  421. ip6h->priority = cm_node->tos >> 4;
  422. ip6h->flow_lbl[0] = cm_node->tos << 4;
  423. ip6h->flow_lbl[1] = 0;
  424. ip6h->flow_lbl[2] = 0;
  425. ip6h->payload_len = htons(packetsize - sizeof(*ip6h));
  426. ip6h->nexthdr = 6;
  427. ip6h->hop_limit = 128;
  428. i40iw_copy_ip_htonl(ip6h->saddr.in6_u.u6_addr32,
  429. cm_node->loc_addr);
  430. i40iw_copy_ip_htonl(ip6h->daddr.in6_u.u6_addr32,
  431. cm_node->rem_addr);
  432. }
  433. tcph->source = htons(cm_node->loc_port);
  434. tcph->dest = htons(cm_node->rem_port);
  435. tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
  436. if (flags & SET_ACK) {
  437. cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
  438. tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
  439. tcph->ack = 1;
  440. } else {
  441. tcph->ack_seq = 0;
  442. }
  443. if (flags & SET_SYN) {
  444. cm_node->tcp_cntxt.loc_seq_num++;
  445. tcph->syn = 1;
  446. } else {
  447. cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;
  448. }
  449. if (flags & SET_FIN) {
  450. cm_node->tcp_cntxt.loc_seq_num++;
  451. tcph->fin = 1;
  452. }
  453. if (flags & SET_RST)
  454. tcph->rst = 1;
  455. tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);
  456. sqbuf->tcphlen = tcph->doff << 2;
  457. tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
  458. tcph->urg_ptr = 0;
  459. if (opts_len) {
  460. memcpy(buf, options->addr, opts_len);
  461. buf += opts_len;
  462. }
  463. if (hdr_len) {
  464. memcpy(buf, hdr->addr, hdr_len);
  465. buf += hdr_len;
  466. }
  467. if (pdata && pdata->addr)
  468. memcpy(buf, pdata->addr, pdata->size);
  469. atomic_set(&sqbuf->refcount, 1);
  470. return sqbuf;
  471. }
  472. /**
  473. * i40iw_send_reset - Send RST packet
  474. * @cm_node: connection's node
  475. */
  476. int i40iw_send_reset(struct i40iw_cm_node *cm_node)
  477. {
  478. struct i40iw_puda_buf *sqbuf;
  479. int flags = SET_RST | SET_ACK;
  480. sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, flags);
  481. if (!sqbuf) {
  482. i40iw_pr_err("no sqbuf\n");
  483. return -1;
  484. }
  485. return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 0, 1);
  486. }
  487. /**
  488. * i40iw_active_open_err - send event for active side cm error
  489. * @cm_node: connection's node
  490. * @reset: Flag to send reset or not
  491. */
  492. static void i40iw_active_open_err(struct i40iw_cm_node *cm_node, bool reset)
  493. {
  494. i40iw_cleanup_retrans_entry(cm_node);
  495. cm_node->cm_core->stats_connect_errs++;
  496. if (reset) {
  497. i40iw_debug(cm_node->dev,
  498. I40IW_DEBUG_CM,
  499. "%s cm_node=%p state=%d\n",
  500. __func__,
  501. cm_node,
  502. cm_node->state);
  503. atomic_inc(&cm_node->ref_count);
  504. i40iw_send_reset(cm_node);
  505. }
  506. cm_node->state = I40IW_CM_STATE_CLOSED;
  507. i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
  508. }
  509. /**
  510. * i40iw_passive_open_err - handle passive side cm error
  511. * @cm_node: connection's node
  512. * @reset: send reset or just free cm_node
  513. */
  514. static void i40iw_passive_open_err(struct i40iw_cm_node *cm_node, bool reset)
  515. {
  516. i40iw_cleanup_retrans_entry(cm_node);
  517. cm_node->cm_core->stats_passive_errs++;
  518. cm_node->state = I40IW_CM_STATE_CLOSED;
  519. i40iw_debug(cm_node->dev,
  520. I40IW_DEBUG_CM,
  521. "%s cm_node=%p state =%d\n",
  522. __func__,
  523. cm_node,
  524. cm_node->state);
  525. if (reset)
  526. i40iw_send_reset(cm_node);
  527. else
  528. i40iw_rem_ref_cm_node(cm_node);
  529. }
  530. /**
  531. * i40iw_event_connect_error - to create connect error event
  532. * @event: cm information for connect event
  533. */
  534. static void i40iw_event_connect_error(struct i40iw_cm_event *event)
  535. {
  536. struct i40iw_qp *iwqp;
  537. struct iw_cm_id *cm_id;
  538. cm_id = event->cm_node->cm_id;
  539. if (!cm_id)
  540. return;
  541. iwqp = cm_id->provider_data;
  542. if (!iwqp || !iwqp->iwdev)
  543. return;
  544. iwqp->cm_id = NULL;
  545. cm_id->provider_data = NULL;
  546. i40iw_send_cm_event(event->cm_node, cm_id,
  547. IW_CM_EVENT_CONNECT_REPLY,
  548. -ECONNRESET);
  549. cm_id->rem_ref(cm_id);
  550. i40iw_rem_ref_cm_node(event->cm_node);
  551. }
  552. /**
  553. * i40iw_process_options
  554. * @cm_node: connection's node
  555. * @optionsloc: point to start of options
  556. * @optionsize: size of all options
  557. * @syn_packet: flag if syn packet
  558. */
  559. static int i40iw_process_options(struct i40iw_cm_node *cm_node,
  560. u8 *optionsloc,
  561. u32 optionsize,
  562. u32 syn_packet)
  563. {
  564. u32 tmp;
  565. u32 offset = 0;
  566. union all_known_options *all_options;
  567. char got_mss_option = 0;
  568. while (offset < optionsize) {
  569. all_options = (union all_known_options *)(optionsloc + offset);
  570. switch (all_options->as_base.optionnum) {
  571. case OPTION_NUMBER_END:
  572. offset = optionsize;
  573. break;
  574. case OPTION_NUMBER_NONE:
  575. offset += 1;
  576. continue;
  577. case OPTION_NUMBER_MSS:
  578. i40iw_debug(cm_node->dev,
  579. I40IW_DEBUG_CM,
  580. "%s: MSS Length: %d Offset: %d Size: %d\n",
  581. __func__,
  582. all_options->as_mss.length,
  583. offset,
  584. optionsize);
  585. got_mss_option = 1;
  586. if (all_options->as_mss.length != 4)
  587. return -1;
  588. tmp = ntohs(all_options->as_mss.mss);
  589. if (tmp > 0 && tmp < cm_node->tcp_cntxt.mss)
  590. cm_node->tcp_cntxt.mss = tmp;
  591. break;
  592. case OPTION_NUMBER_WINDOW_SCALE:
  593. cm_node->tcp_cntxt.snd_wscale =
  594. all_options->as_windowscale.shiftcount;
  595. break;
  596. default:
  597. i40iw_debug(cm_node->dev,
  598. I40IW_DEBUG_CM,
  599. "TCP Option not understood: %x\n",
  600. all_options->as_base.optionnum);
  601. break;
  602. }
  603. offset += all_options->as_base.length;
  604. }
  605. if (!got_mss_option && syn_packet)
  606. cm_node->tcp_cntxt.mss = I40IW_CM_DEFAULT_MSS;
  607. return 0;
  608. }
  609. /**
  610. * i40iw_handle_tcp_options -
  611. * @cm_node: connection's node
  612. * @tcph: pointer tcp header
  613. * @optionsize: size of options rcvd
  614. * @passive: active or passive flag
  615. */
  616. static int i40iw_handle_tcp_options(struct i40iw_cm_node *cm_node,
  617. struct tcphdr *tcph,
  618. int optionsize,
  619. int passive)
  620. {
  621. u8 *optionsloc = (u8 *)&tcph[1];
  622. if (optionsize) {
  623. if (i40iw_process_options(cm_node,
  624. optionsloc,
  625. optionsize,
  626. (u32)tcph->syn)) {
  627. i40iw_debug(cm_node->dev,
  628. I40IW_DEBUG_CM,
  629. "%s: Node %p, Sending RESET\n",
  630. __func__,
  631. cm_node);
  632. if (passive)
  633. i40iw_passive_open_err(cm_node, true);
  634. else
  635. i40iw_active_open_err(cm_node, true);
  636. return -1;
  637. }
  638. }
  639. cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) <<
  640. cm_node->tcp_cntxt.snd_wscale;
  641. if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd)
  642. cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
  643. return 0;
  644. }
  645. /**
  646. * i40iw_build_mpa_v1 - build a MPA V1 frame
  647. * @cm_node: connection's node
  648. * @mpa_key: to do read0 or write0
  649. */
  650. static void i40iw_build_mpa_v1(struct i40iw_cm_node *cm_node,
  651. void *start_addr,
  652. u8 mpa_key)
  653. {
  654. struct ietf_mpa_v1 *mpa_frame = (struct ietf_mpa_v1 *)start_addr;
  655. switch (mpa_key) {
  656. case MPA_KEY_REQUEST:
  657. memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);
  658. break;
  659. case MPA_KEY_REPLY:
  660. memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
  661. break;
  662. default:
  663. break;
  664. }
  665. mpa_frame->flags = IETF_MPA_FLAGS_CRC;
  666. mpa_frame->rev = cm_node->mpa_frame_rev;
  667. mpa_frame->priv_data_len = htons(cm_node->pdata.size);
  668. }
  669. /**
  670. * i40iw_build_mpa_v2 - build a MPA V2 frame
  671. * @cm_node: connection's node
  672. * @start_addr: buffer start address
  673. * @mpa_key: to do read0 or write0
  674. */
  675. static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node,
  676. void *start_addr,
  677. u8 mpa_key)
  678. {
  679. struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr;
  680. struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;
  681. u16 ctrl_ird, ctrl_ord;
  682. /* initialize the upper 5 bytes of the frame */
  683. i40iw_build_mpa_v1(cm_node, start_addr, mpa_key);
  684. mpa_frame->flags |= IETF_MPA_V2_FLAG;
  685. mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE);
  686. /* initialize RTR msg */
  687. if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) {
  688. ctrl_ird = IETF_NO_IRD_ORD;
  689. ctrl_ord = IETF_NO_IRD_ORD;
  690. } else {
  691. ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
  692. IETF_NO_IRD_ORD : cm_node->ird_size;
  693. ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
  694. IETF_NO_IRD_ORD : cm_node->ord_size;
  695. }
  696. ctrl_ird |= IETF_PEER_TO_PEER;
  697. switch (mpa_key) {
  698. case MPA_KEY_REQUEST:
  699. ctrl_ord |= IETF_RDMA0_WRITE;
  700. ctrl_ord |= IETF_RDMA0_READ;
  701. break;
  702. case MPA_KEY_REPLY:
  703. switch (cm_node->send_rdma0_op) {
  704. case SEND_RDMA_WRITE_ZERO:
  705. ctrl_ord |= IETF_RDMA0_WRITE;
  706. break;
  707. case SEND_RDMA_READ_ZERO:
  708. ctrl_ord |= IETF_RDMA0_READ;
  709. break;
  710. }
  711. break;
  712. default:
  713. break;
  714. }
  715. rtr_msg->ctrl_ird = htons(ctrl_ird);
  716. rtr_msg->ctrl_ord = htons(ctrl_ord);
  717. }
  718. /**
  719. * i40iw_cm_build_mpa_frame - build mpa frame for mpa version 1 or version 2
  720. * @cm_node: connection's node
  721. * @mpa: mpa: data buffer
  722. * @mpa_key: to do read0 or write0
  723. */
  724. static int i40iw_cm_build_mpa_frame(struct i40iw_cm_node *cm_node,
  725. struct i40iw_kmem_info *mpa,
  726. u8 mpa_key)
  727. {
  728. int hdr_len = 0;
  729. switch (cm_node->mpa_frame_rev) {
  730. case IETF_MPA_V1:
  731. hdr_len = sizeof(struct ietf_mpa_v1);
  732. i40iw_build_mpa_v1(cm_node, mpa->addr, mpa_key);
  733. break;
  734. case IETF_MPA_V2:
  735. hdr_len = sizeof(struct ietf_mpa_v2);
  736. i40iw_build_mpa_v2(cm_node, mpa->addr, mpa_key);
  737. break;
  738. default:
  739. break;
  740. }
  741. return hdr_len;
  742. }
  743. /**
  744. * i40iw_send_mpa_request - active node send mpa request to passive node
  745. * @cm_node: connection's node
  746. */
  747. static int i40iw_send_mpa_request(struct i40iw_cm_node *cm_node)
  748. {
  749. struct i40iw_puda_buf *sqbuf;
  750. if (!cm_node) {
  751. i40iw_pr_err("cm_node == NULL\n");
  752. return -1;
  753. }
  754. cm_node->mpa_hdr.addr = &cm_node->mpa_frame;
  755. cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node,
  756. &cm_node->mpa_hdr,
  757. MPA_KEY_REQUEST);
  758. if (!cm_node->mpa_hdr.size) {
  759. i40iw_pr_err("mpa size = %d\n", cm_node->mpa_hdr.size);
  760. return -1;
  761. }
  762. sqbuf = i40iw_form_cm_frame(cm_node,
  763. NULL,
  764. &cm_node->mpa_hdr,
  765. &cm_node->pdata,
  766. SET_ACK);
  767. if (!sqbuf) {
  768. i40iw_pr_err("sq_buf == NULL\n");
  769. return -1;
  770. }
  771. return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
  772. }
  773. /**
  774. * i40iw_send_mpa_reject -
  775. * @cm_node: connection's node
  776. * @pdata: reject data for connection
  777. * @plen: length of reject data
  778. */
  779. static int i40iw_send_mpa_reject(struct i40iw_cm_node *cm_node,
  780. const void *pdata,
  781. u8 plen)
  782. {
  783. struct i40iw_puda_buf *sqbuf;
  784. struct i40iw_kmem_info priv_info;
  785. cm_node->mpa_hdr.addr = &cm_node->mpa_frame;
  786. cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node,
  787. &cm_node->mpa_hdr,
  788. MPA_KEY_REPLY);
  789. cm_node->mpa_frame.flags |= IETF_MPA_FLAGS_REJECT;
  790. priv_info.addr = (void *)pdata;
  791. priv_info.size = plen;
  792. sqbuf = i40iw_form_cm_frame(cm_node,
  793. NULL,
  794. &cm_node->mpa_hdr,
  795. &priv_info,
  796. SET_ACK | SET_FIN);
  797. if (!sqbuf) {
  798. i40iw_pr_err("no sqbuf\n");
  799. return -ENOMEM;
  800. }
  801. cm_node->state = I40IW_CM_STATE_FIN_WAIT1;
  802. return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
  803. }
  804. /**
  805. * recv_mpa - process an IETF MPA frame
  806. * @cm_node: connection's node
  807. * @buffer: Data pointer
  808. * @type: to return accept or reject
  809. * @len: Len of mpa buffer
  810. */
  811. static int i40iw_parse_mpa(struct i40iw_cm_node *cm_node, u8 *buffer, u32 *type, u32 len)
  812. {
  813. struct ietf_mpa_v1 *mpa_frame;
  814. struct ietf_mpa_v2 *mpa_v2_frame;
  815. struct ietf_rtr_msg *rtr_msg;
  816. int mpa_hdr_len;
  817. int priv_data_len;
  818. *type = I40IW_MPA_REQUEST_ACCEPT;
  819. if (len < sizeof(struct ietf_mpa_v1)) {
  820. i40iw_pr_err("ietf buffer small (%x)\n", len);
  821. return -1;
  822. }
  823. mpa_frame = (struct ietf_mpa_v1 *)buffer;
  824. mpa_hdr_len = sizeof(struct ietf_mpa_v1);
  825. priv_data_len = ntohs(mpa_frame->priv_data_len);
  826. if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {
  827. i40iw_pr_err("large pri_data %d\n", priv_data_len);
  828. return -1;
  829. }
  830. if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) {
  831. i40iw_pr_err("unsupported mpa rev = %d\n", mpa_frame->rev);
  832. return -1;
  833. }
  834. if (mpa_frame->rev > cm_node->mpa_frame_rev) {
  835. i40iw_pr_err("rev %d\n", mpa_frame->rev);
  836. return -1;
  837. }
  838. cm_node->mpa_frame_rev = mpa_frame->rev;
  839. if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) {
  840. if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) {
  841. i40iw_pr_err("Unexpected MPA Key received\n");
  842. return -1;
  843. }
  844. } else {
  845. if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE)) {
  846. i40iw_pr_err("Unexpected MPA Key received\n");
  847. return -1;
  848. }
  849. }
  850. if (priv_data_len + mpa_hdr_len > len) {
  851. i40iw_pr_err("ietf buffer len(%x + %x != %x)\n",
  852. priv_data_len, mpa_hdr_len, len);
  853. return -1;
  854. }
  855. if (len > MAX_CM_BUFFER) {
  856. i40iw_pr_err("ietf buffer large len = %d\n", len);
  857. return -1;
  858. }
  859. switch (mpa_frame->rev) {
  860. case IETF_MPA_V2:{
  861. u16 ird_size;
  862. u16 ord_size;
  863. u16 ctrl_ord;
  864. u16 ctrl_ird;
  865. mpa_v2_frame = (struct ietf_mpa_v2 *)buffer;
  866. mpa_hdr_len += IETF_RTR_MSG_SIZE;
  867. rtr_msg = &mpa_v2_frame->rtr_msg;
  868. /* parse rtr message */
  869. ctrl_ord = ntohs(rtr_msg->ctrl_ord);
  870. ctrl_ird = ntohs(rtr_msg->ctrl_ird);
  871. ird_size = ctrl_ird & IETF_NO_IRD_ORD;
  872. ord_size = ctrl_ord & IETF_NO_IRD_ORD;
  873. if (!(ctrl_ird & IETF_PEER_TO_PEER))
  874. return -1;
  875. if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD) {
  876. cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD;
  877. goto negotiate_done;
  878. }
  879. if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) {
  880. /* responder */
  881. if (!ord_size && (ctrl_ord & IETF_RDMA0_READ))
  882. cm_node->ird_size = 1;
  883. if (cm_node->ord_size > ird_size)
  884. cm_node->ord_size = ird_size;
  885. } else {
  886. /* initiator */
  887. if (!ird_size && (ctrl_ord & IETF_RDMA0_READ))
  888. return -1;
  889. if (cm_node->ord_size > ird_size)
  890. cm_node->ord_size = ird_size;
  891. if (cm_node->ird_size < ord_size)
  892. /* no resources available */
  893. return -1;
  894. }
  895. negotiate_done:
  896. if (ctrl_ord & IETF_RDMA0_READ)
  897. cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
  898. else if (ctrl_ord & IETF_RDMA0_WRITE)
  899. cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;
  900. else /* Not supported RDMA0 operation */
  901. return -1;
  902. i40iw_debug(cm_node->dev, I40IW_DEBUG_CM,
  903. "MPAV2: Negotiated ORD: %d, IRD: %d\n",
  904. cm_node->ord_size, cm_node->ird_size);
  905. break;
  906. }
  907. break;
  908. case IETF_MPA_V1:
  909. default:
  910. break;
  911. }
  912. memcpy(cm_node->pdata_buf, buffer + mpa_hdr_len, priv_data_len);
  913. cm_node->pdata.size = priv_data_len;
  914. if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT)
  915. *type = I40IW_MPA_REQUEST_REJECT;
  916. if (mpa_frame->flags & IETF_MPA_FLAGS_MARKERS)
  917. cm_node->snd_mark_en = true;
  918. return 0;
  919. }
  920. /**
  921. * i40iw_schedule_cm_timer
  922. * @@cm_node: connection's node
  923. * @sqbuf: buffer to send
  924. * @type: if it is send or close
  925. * @send_retrans: if rexmits to be done
  926. * @close_when_complete: is cm_node to be removed
  927. *
  928. * note - cm_node needs to be protected before calling this. Encase in:
  929. * i40iw_rem_ref_cm_node(cm_core, cm_node);
  930. * i40iw_schedule_cm_timer(...)
  931. * atomic_inc(&cm_node->ref_count);
  932. */
  933. int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
  934. struct i40iw_puda_buf *sqbuf,
  935. enum i40iw_timer_type type,
  936. int send_retrans,
  937. int close_when_complete)
  938. {
  939. struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
  940. struct i40iw_cm_core *cm_core = cm_node->cm_core;
  941. struct i40iw_timer_entry *new_send;
  942. int ret = 0;
  943. u32 was_timer_set;
  944. unsigned long flags;
  945. new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
  946. if (!new_send) {
  947. if (type != I40IW_TIMER_TYPE_CLOSE)
  948. i40iw_free_sqbuf(vsi, (void *)sqbuf);
  949. return -ENOMEM;
  950. }
  951. new_send->retrycount = I40IW_DEFAULT_RETRYS;
  952. new_send->retranscount = I40IW_DEFAULT_RETRANS;
  953. new_send->sqbuf = sqbuf;
  954. new_send->timetosend = jiffies;
  955. new_send->type = type;
  956. new_send->send_retrans = send_retrans;
  957. new_send->close_when_complete = close_when_complete;
  958. if (type == I40IW_TIMER_TYPE_CLOSE) {
  959. new_send->timetosend += (HZ / 10);
  960. if (cm_node->close_entry) {
  961. kfree(new_send);
  962. i40iw_pr_err("already close entry\n");
  963. return -EINVAL;
  964. }
  965. cm_node->close_entry = new_send;
  966. }
  967. if (type == I40IW_TIMER_TYPE_SEND) {
  968. spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
  969. cm_node->send_entry = new_send;
  970. atomic_inc(&cm_node->ref_count);
  971. spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
  972. new_send->timetosend = jiffies + I40IW_RETRY_TIMEOUT;
  973. atomic_inc(&sqbuf->refcount);
  974. i40iw_puda_send_buf(vsi->ilq, sqbuf);
  975. if (!send_retrans) {
  976. i40iw_cleanup_retrans_entry(cm_node);
  977. if (close_when_complete)
  978. i40iw_rem_ref_cm_node(cm_node);
  979. return ret;
  980. }
  981. }
  982. spin_lock_irqsave(&cm_core->ht_lock, flags);
  983. was_timer_set = timer_pending(&cm_core->tcp_timer);
  984. if (!was_timer_set) {
  985. cm_core->tcp_timer.expires = new_send->timetosend;
  986. add_timer(&cm_core->tcp_timer);
  987. }
  988. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  989. return ret;
  990. }
  991. /**
  992. * i40iw_retrans_expired - Could not rexmit the packet
  993. * @cm_node: connection's node
  994. */
  995. static void i40iw_retrans_expired(struct i40iw_cm_node *cm_node)
  996. {
  997. struct iw_cm_id *cm_id = cm_node->cm_id;
  998. enum i40iw_cm_node_state state = cm_node->state;
  999. cm_node->state = I40IW_CM_STATE_CLOSED;
  1000. switch (state) {
  1001. case I40IW_CM_STATE_SYN_RCVD:
  1002. case I40IW_CM_STATE_CLOSING:
  1003. i40iw_rem_ref_cm_node(cm_node);
  1004. break;
  1005. case I40IW_CM_STATE_FIN_WAIT1:
  1006. case I40IW_CM_STATE_LAST_ACK:
  1007. if (cm_node->cm_id)
  1008. cm_id->rem_ref(cm_id);
  1009. i40iw_send_reset(cm_node);
  1010. break;
  1011. default:
  1012. atomic_inc(&cm_node->ref_count);
  1013. i40iw_send_reset(cm_node);
  1014. i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
  1015. break;
  1016. }
  1017. }
  1018. /**
  1019. * i40iw_handle_close_entry - for handling retry/timeouts
  1020. * @cm_node: connection's node
  1021. * @rem_node: flag for remove cm_node
  1022. */
  1023. static void i40iw_handle_close_entry(struct i40iw_cm_node *cm_node, u32 rem_node)
  1024. {
  1025. struct i40iw_timer_entry *close_entry = cm_node->close_entry;
  1026. struct iw_cm_id *cm_id = cm_node->cm_id;
  1027. struct i40iw_qp *iwqp;
  1028. unsigned long flags;
  1029. if (!close_entry)
  1030. return;
  1031. iwqp = (struct i40iw_qp *)close_entry->sqbuf;
  1032. if (iwqp) {
  1033. spin_lock_irqsave(&iwqp->lock, flags);
  1034. if (iwqp->cm_id) {
  1035. iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
  1036. iwqp->hw_iwarp_state = I40IW_QP_STATE_ERROR;
  1037. iwqp->last_aeq = I40IW_AE_RESET_SENT;
  1038. iwqp->ibqp_state = IB_QPS_ERR;
  1039. spin_unlock_irqrestore(&iwqp->lock, flags);
  1040. i40iw_cm_disconn(iwqp);
  1041. } else {
  1042. spin_unlock_irqrestore(&iwqp->lock, flags);
  1043. }
  1044. } else if (rem_node) {
  1045. /* TIME_WAIT state */
  1046. i40iw_rem_ref_cm_node(cm_node);
  1047. }
  1048. if (cm_id)
  1049. cm_id->rem_ref(cm_id);
  1050. kfree(close_entry);
  1051. cm_node->close_entry = NULL;
  1052. }
  1053. /**
  1054. * i40iw_build_timer_list - Add cm_nodes to timer list
  1055. * @timer_list: ptr to timer list
  1056. * @hte: ptr to accelerated or non-accelerated list
  1057. */
  1058. static void i40iw_build_timer_list(struct list_head *timer_list,
  1059. struct list_head *hte)
  1060. {
  1061. struct i40iw_cm_node *cm_node;
  1062. struct list_head *list_core_temp, *list_node;
  1063. list_for_each_safe(list_node, list_core_temp, hte) {
  1064. cm_node = container_of(list_node, struct i40iw_cm_node, list);
  1065. if (cm_node->close_entry || cm_node->send_entry) {
  1066. atomic_inc(&cm_node->ref_count);
  1067. list_add(&cm_node->timer_entry, timer_list);
  1068. }
  1069. }
  1070. }
  1071. /**
  1072. * i40iw_cm_timer_tick - system's timer expired callback
  1073. * @pass: Pointing to cm_core
  1074. */
  1075. static void i40iw_cm_timer_tick(struct timer_list *t)
  1076. {
  1077. unsigned long nexttimeout = jiffies + I40IW_LONG_TIME;
  1078. struct i40iw_cm_node *cm_node;
  1079. struct i40iw_timer_entry *send_entry, *close_entry;
  1080. struct list_head *list_core_temp;
  1081. struct i40iw_sc_vsi *vsi;
  1082. struct list_head *list_node;
  1083. struct i40iw_cm_core *cm_core = from_timer(cm_core, t, tcp_timer);
  1084. u32 settimer = 0;
  1085. unsigned long timetosend;
  1086. unsigned long flags;
  1087. struct list_head timer_list;
  1088. INIT_LIST_HEAD(&timer_list);
  1089. spin_lock_irqsave(&cm_core->ht_lock, flags);
  1090. i40iw_build_timer_list(&timer_list, &cm_core->non_accelerated_list);
  1091. i40iw_build_timer_list(&timer_list, &cm_core->accelerated_list);
  1092. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  1093. list_for_each_safe(list_node, list_core_temp, &timer_list) {
  1094. cm_node = container_of(list_node,
  1095. struct i40iw_cm_node,
  1096. timer_entry);
  1097. close_entry = cm_node->close_entry;
  1098. if (close_entry) {
  1099. if (time_after(close_entry->timetosend, jiffies)) {
  1100. if (nexttimeout > close_entry->timetosend ||
  1101. !settimer) {
  1102. nexttimeout = close_entry->timetosend;
  1103. settimer = 1;
  1104. }
  1105. } else {
  1106. i40iw_handle_close_entry(cm_node, 1);
  1107. }
  1108. }
  1109. spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
  1110. send_entry = cm_node->send_entry;
  1111. if (!send_entry)
  1112. goto done;
  1113. if (time_after(send_entry->timetosend, jiffies)) {
  1114. if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
  1115. if ((nexttimeout > send_entry->timetosend) ||
  1116. !settimer) {
  1117. nexttimeout = send_entry->timetosend;
  1118. settimer = 1;
  1119. }
  1120. } else {
  1121. i40iw_free_retrans_entry(cm_node);
  1122. }
  1123. goto done;
  1124. }
  1125. if ((cm_node->state == I40IW_CM_STATE_OFFLOADED) ||
  1126. (cm_node->state == I40IW_CM_STATE_CLOSED)) {
  1127. i40iw_free_retrans_entry(cm_node);
  1128. goto done;
  1129. }
  1130. if (!send_entry->retranscount || !send_entry->retrycount) {
  1131. i40iw_free_retrans_entry(cm_node);
  1132. spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
  1133. i40iw_retrans_expired(cm_node);
  1134. cm_node->state = I40IW_CM_STATE_CLOSED;
  1135. spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
  1136. goto done;
  1137. }
  1138. spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
  1139. vsi = &cm_node->iwdev->vsi;
  1140. if (!cm_node->ack_rcvd) {
  1141. atomic_inc(&send_entry->sqbuf->refcount);
  1142. i40iw_puda_send_buf(vsi->ilq, send_entry->sqbuf);
  1143. cm_node->cm_core->stats_pkt_retrans++;
  1144. }
  1145. spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
  1146. if (send_entry->send_retrans) {
  1147. send_entry->retranscount--;
  1148. timetosend = (I40IW_RETRY_TIMEOUT <<
  1149. (I40IW_DEFAULT_RETRANS -
  1150. send_entry->retranscount));
  1151. send_entry->timetosend = jiffies +
  1152. min(timetosend, I40IW_MAX_TIMEOUT);
  1153. if (nexttimeout > send_entry->timetosend || !settimer) {
  1154. nexttimeout = send_entry->timetosend;
  1155. settimer = 1;
  1156. }
  1157. } else {
  1158. int close_when_complete;
  1159. close_when_complete = send_entry->close_when_complete;
  1160. i40iw_debug(cm_node->dev,
  1161. I40IW_DEBUG_CM,
  1162. "cm_node=%p state=%d\n",
  1163. cm_node,
  1164. cm_node->state);
  1165. i40iw_free_retrans_entry(cm_node);
  1166. if (close_when_complete)
  1167. i40iw_rem_ref_cm_node(cm_node);
  1168. }
  1169. done:
  1170. spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
  1171. i40iw_rem_ref_cm_node(cm_node);
  1172. }
  1173. if (settimer) {
  1174. spin_lock_irqsave(&cm_core->ht_lock, flags);
  1175. if (!timer_pending(&cm_core->tcp_timer)) {
  1176. cm_core->tcp_timer.expires = nexttimeout;
  1177. add_timer(&cm_core->tcp_timer);
  1178. }
  1179. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  1180. }
  1181. }
  1182. /**
  1183. * i40iw_send_syn - send SYN packet
  1184. * @cm_node: connection's node
  1185. * @sendack: flag to set ACK bit or not
  1186. */
  1187. int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack)
  1188. {
  1189. struct i40iw_puda_buf *sqbuf;
  1190. int flags = SET_SYN;
  1191. char optionsbuffer[sizeof(struct option_mss) +
  1192. sizeof(struct option_windowscale) +
  1193. sizeof(struct option_base) + TCP_OPTIONS_PADDING];
  1194. struct i40iw_kmem_info opts;
  1195. int optionssize = 0;
  1196. /* Sending MSS option */
  1197. union all_known_options *options;
  1198. opts.addr = optionsbuffer;
  1199. if (!cm_node) {
  1200. i40iw_pr_err("no cm_node\n");
  1201. return -EINVAL;
  1202. }
  1203. options = (union all_known_options *)&optionsbuffer[optionssize];
  1204. options->as_mss.optionnum = OPTION_NUMBER_MSS;
  1205. options->as_mss.length = sizeof(struct option_mss);
  1206. options->as_mss.mss = htons(cm_node->tcp_cntxt.mss);
  1207. optionssize += sizeof(struct option_mss);
  1208. options = (union all_known_options *)&optionsbuffer[optionssize];
  1209. options->as_windowscale.optionnum = OPTION_NUMBER_WINDOW_SCALE;
  1210. options->as_windowscale.length = sizeof(struct option_windowscale);
  1211. options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;
  1212. optionssize += sizeof(struct option_windowscale);
  1213. options = (union all_known_options *)&optionsbuffer[optionssize];
  1214. options->as_end = OPTION_NUMBER_END;
  1215. optionssize += 1;
  1216. if (sendack)
  1217. flags |= SET_ACK;
  1218. opts.size = optionssize;
  1219. sqbuf = i40iw_form_cm_frame(cm_node, &opts, NULL, NULL, flags);
  1220. if (!sqbuf) {
  1221. i40iw_pr_err("no sqbuf\n");
  1222. return -1;
  1223. }
  1224. return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
  1225. }
  1226. /**
  1227. * i40iw_send_ack - Send ACK packet
  1228. * @cm_node: connection's node
  1229. */
  1230. static void i40iw_send_ack(struct i40iw_cm_node *cm_node)
  1231. {
  1232. struct i40iw_puda_buf *sqbuf;
  1233. struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
  1234. sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK);
  1235. if (sqbuf)
  1236. i40iw_puda_send_buf(vsi->ilq, sqbuf);
  1237. else
  1238. i40iw_pr_err("no sqbuf\n");
  1239. }
  1240. /**
  1241. * i40iw_send_fin - Send FIN pkt
  1242. * @cm_node: connection's node
  1243. */
  1244. static int i40iw_send_fin(struct i40iw_cm_node *cm_node)
  1245. {
  1246. struct i40iw_puda_buf *sqbuf;
  1247. sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK | SET_FIN);
  1248. if (!sqbuf) {
  1249. i40iw_pr_err("no sqbuf\n");
  1250. return -1;
  1251. }
  1252. return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
  1253. }
  1254. /**
  1255. * i40iw_find_node - find a cm node that matches the reference cm node
  1256. * @cm_core: cm's core
  1257. * @rem_port: remote tcp port num
  1258. * @rem_addr: remote ip addr
  1259. * @loc_port: local tcp port num
  1260. * @loc_addr: loc ip addr
  1261. * @add_refcnt: flag to increment refcount of cm_node
  1262. * @accelerated_list: flag for accelerated vs non-accelerated list to search
  1263. */
  1264. struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core,
  1265. u16 rem_port,
  1266. u32 *rem_addr,
  1267. u16 loc_port,
  1268. u32 *loc_addr,
  1269. bool add_refcnt,
  1270. bool accelerated_list)
  1271. {
  1272. struct list_head *hte;
  1273. struct i40iw_cm_node *cm_node;
  1274. unsigned long flags;
  1275. hte = accelerated_list ?
  1276. &cm_core->accelerated_list : &cm_core->non_accelerated_list;
  1277. /* walk list and find cm_node associated with this session ID */
  1278. spin_lock_irqsave(&cm_core->ht_lock, flags);
  1279. list_for_each_entry(cm_node, hte, list) {
  1280. if (!memcmp(cm_node->loc_addr, loc_addr, sizeof(cm_node->loc_addr)) &&
  1281. (cm_node->loc_port == loc_port) &&
  1282. !memcmp(cm_node->rem_addr, rem_addr, sizeof(cm_node->rem_addr)) &&
  1283. (cm_node->rem_port == rem_port)) {
  1284. if (add_refcnt)
  1285. atomic_inc(&cm_node->ref_count);
  1286. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  1287. return cm_node;
  1288. }
  1289. }
  1290. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  1291. /* no owner node */
  1292. return NULL;
  1293. }
  1294. /**
  1295. * i40iw_find_listener - find a cm node listening on this addr-port pair
  1296. * @cm_core: cm's core
  1297. * @dst_port: listener tcp port num
  1298. * @dst_addr: listener ip addr
  1299. * @listener_state: state to match with listen node's
  1300. */
  1301. static struct i40iw_cm_listener *i40iw_find_listener(
  1302. struct i40iw_cm_core *cm_core,
  1303. u32 *dst_addr,
  1304. u16 dst_port,
  1305. u16 vlan_id,
  1306. enum i40iw_cm_listener_state
  1307. listener_state)
  1308. {
  1309. struct i40iw_cm_listener *listen_node;
  1310. static const u32 ip_zero[4] = { 0, 0, 0, 0 };
  1311. u32 listen_addr[4];
  1312. u16 listen_port;
  1313. unsigned long flags;
  1314. /* walk list and find cm_node associated with this session ID */
  1315. spin_lock_irqsave(&cm_core->listen_list_lock, flags);
  1316. list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
  1317. memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
  1318. listen_port = listen_node->loc_port;
  1319. /* compare node pair, return node handle if a match */
  1320. if ((!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) ||
  1321. !memcmp(listen_addr, ip_zero, sizeof(listen_addr))) &&
  1322. (listen_port == dst_port) &&
  1323. (listener_state & listen_node->listener_state)) {
  1324. atomic_inc(&listen_node->ref_count);
  1325. spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
  1326. return listen_node;
  1327. }
  1328. }
  1329. spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
  1330. return NULL;
  1331. }
  1332. /**
  1333. * i40iw_add_hte_node - add a cm node to the hash table
  1334. * @cm_core: cm's core
  1335. * @cm_node: connection's node
  1336. */
  1337. static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core,
  1338. struct i40iw_cm_node *cm_node)
  1339. {
  1340. unsigned long flags;
  1341. if (!cm_node || !cm_core) {
  1342. i40iw_pr_err("cm_node or cm_core == NULL\n");
  1343. return;
  1344. }
  1345. spin_lock_irqsave(&cm_core->ht_lock, flags);
  1346. list_add_tail(&cm_node->list, &cm_core->non_accelerated_list);
  1347. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  1348. }
  1349. /**
  1350. * i40iw_find_port - find port that matches reference port
  1351. * @port: port number
  1352. * @accelerated_list: flag for accelerated vs non-accelerated list
  1353. */
  1354. static bool i40iw_find_port(struct i40iw_cm_core *cm_core, u16 port,
  1355. bool accelerated_list)
  1356. {
  1357. struct list_head *hte;
  1358. struct i40iw_cm_node *cm_node;
  1359. hte = accelerated_list ?
  1360. &cm_core->accelerated_list : &cm_core->non_accelerated_list;
  1361. list_for_each_entry(cm_node, hte, list) {
  1362. if (cm_node->loc_port == port)
  1363. return true;
  1364. }
  1365. return false;
  1366. }
  1367. /**
  1368. * i40iw_port_in_use - determine if port is in use
  1369. * @port: port number
  1370. * @active_side: flag for listener side vs active side
  1371. */
  1372. static bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port, bool active_side)
  1373. {
  1374. struct i40iw_cm_listener *listen_node;
  1375. unsigned long flags;
  1376. bool ret = false;
  1377. if (active_side) {
  1378. spin_lock_irqsave(&cm_core->ht_lock, flags);
  1379. ret = i40iw_find_port(cm_core, port, true);
  1380. if (!ret)
  1381. ret = i40iw_find_port(cm_core, port, false);
  1382. if (!ret)
  1383. clear_bit(port, cm_core->active_side_ports);
  1384. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  1385. } else {
  1386. spin_lock_irqsave(&cm_core->listen_list_lock, flags);
  1387. list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
  1388. if (listen_node->loc_port == port) {
  1389. ret = true;
  1390. break;
  1391. }
  1392. }
  1393. spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
  1394. }
  1395. return ret;
  1396. }
  1397. /**
  1398. * i40iw_del_multiple_qhash - Remove qhash and child listens
  1399. * @iwdev: iWarp device
  1400. * @cm_info: CM info for parent listen node
  1401. * @cm_parent_listen_node: The parent listen node
  1402. */
  1403. static enum i40iw_status_code i40iw_del_multiple_qhash(
  1404. struct i40iw_device *iwdev,
  1405. struct i40iw_cm_info *cm_info,
  1406. struct i40iw_cm_listener *cm_parent_listen_node)
  1407. {
  1408. struct i40iw_cm_listener *child_listen_node;
  1409. enum i40iw_status_code ret = I40IW_ERR_CONFIG;
  1410. struct list_head *pos, *tpos;
  1411. unsigned long flags;
  1412. spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
  1413. list_for_each_safe(pos, tpos, &cm_parent_listen_node->child_listen_list) {
  1414. child_listen_node = list_entry(pos, struct i40iw_cm_listener, child_listen_list);
  1415. if (child_listen_node->ipv4)
  1416. i40iw_debug(&iwdev->sc_dev,
  1417. I40IW_DEBUG_CM,
  1418. "removing child listen for IP=%pI4, port=%d, vlan=%d\n",
  1419. child_listen_node->loc_addr,
  1420. child_listen_node->loc_port,
  1421. child_listen_node->vlan_id);
  1422. else
  1423. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
  1424. "removing child listen for IP=%pI6, port=%d, vlan=%d\n",
  1425. child_listen_node->loc_addr,
  1426. child_listen_node->loc_port,
  1427. child_listen_node->vlan_id);
  1428. list_del(pos);
  1429. memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
  1430. sizeof(cm_info->loc_addr));
  1431. cm_info->vlan_id = child_listen_node->vlan_id;
  1432. if (child_listen_node->qhash_set) {
  1433. ret = i40iw_manage_qhash(iwdev, cm_info,
  1434. I40IW_QHASH_TYPE_TCP_SYN,
  1435. I40IW_QHASH_MANAGE_TYPE_DELETE,
  1436. NULL, false);
  1437. child_listen_node->qhash_set = false;
  1438. } else {
  1439. ret = I40IW_SUCCESS;
  1440. }
  1441. i40iw_debug(&iwdev->sc_dev,
  1442. I40IW_DEBUG_CM,
  1443. "freed pointer = %p\n",
  1444. child_listen_node);
  1445. kfree(child_listen_node);
  1446. cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
  1447. }
  1448. spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
  1449. return ret;
  1450. }
  1451. /**
  1452. * i40iw_netdev_vlan_ipv6 - Gets the netdev and vlan
  1453. * @addr: local IPv6 address
  1454. * @vlan_id: vlan id for the given IPv6 address
  1455. *
  1456. * Returns the net_device of the IPv6 address and also sets the
  1457. * vlan id for that address.
  1458. */
  1459. static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id)
  1460. {
  1461. struct net_device *ip_dev = NULL;
  1462. struct in6_addr laddr6;
  1463. if (!IS_ENABLED(CONFIG_IPV6))
  1464. return NULL;
  1465. i40iw_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
  1466. if (vlan_id)
  1467. *vlan_id = I40IW_NO_VLAN;
  1468. rcu_read_lock();
  1469. for_each_netdev_rcu(&init_net, ip_dev) {
  1470. if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) {
  1471. if (vlan_id)
  1472. *vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
  1473. break;
  1474. }
  1475. }
  1476. rcu_read_unlock();
  1477. return ip_dev;
  1478. }
  1479. /**
  1480. * i40iw_get_vlan_ipv4 - Returns the vlan_id for IPv4 address
  1481. * @addr: local IPv4 address
  1482. */
  1483. static u16 i40iw_get_vlan_ipv4(u32 *addr)
  1484. {
  1485. struct net_device *netdev;
  1486. u16 vlan_id = I40IW_NO_VLAN;
  1487. netdev = ip_dev_find(&init_net, htonl(addr[0]));
  1488. if (netdev) {
  1489. vlan_id = rdma_vlan_dev_vlan_id(netdev);
  1490. dev_put(netdev);
  1491. }
  1492. return vlan_id;
  1493. }
  1494. /**
  1495. * i40iw_add_mqh_6 - Adds multiple qhashes for IPv6
  1496. * @iwdev: iWarp device
  1497. * @cm_info: CM info for parent listen node
  1498. * @cm_parent_listen_node: The parent listen node
  1499. *
  1500. * Adds a qhash and a child listen node for every IPv6 address
  1501. * on the adapter and adds the associated qhash filter
  1502. */
  1503. static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
  1504. struct i40iw_cm_info *cm_info,
  1505. struct i40iw_cm_listener *cm_parent_listen_node)
  1506. {
  1507. struct net_device *ip_dev;
  1508. struct inet6_dev *idev;
  1509. struct inet6_ifaddr *ifp, *tmp;
  1510. enum i40iw_status_code ret = 0;
  1511. struct i40iw_cm_listener *child_listen_node;
  1512. unsigned long flags;
  1513. rtnl_lock();
  1514. for_each_netdev_rcu(&init_net, ip_dev) {
  1515. if ((((rdma_vlan_dev_vlan_id(ip_dev) < I40IW_NO_VLAN) &&
  1516. (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
  1517. (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
  1518. idev = __in6_dev_get(ip_dev);
  1519. if (!idev) {
  1520. i40iw_pr_err("idev == NULL\n");
  1521. break;
  1522. }
  1523. list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
  1524. i40iw_debug(&iwdev->sc_dev,
  1525. I40IW_DEBUG_CM,
  1526. "IP=%pI6, vlan_id=%d, MAC=%pM\n",
  1527. &ifp->addr,
  1528. rdma_vlan_dev_vlan_id(ip_dev),
  1529. ip_dev->dev_addr);
  1530. child_listen_node =
  1531. kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
  1532. i40iw_debug(&iwdev->sc_dev,
  1533. I40IW_DEBUG_CM,
  1534. "Allocating child listener %p\n",
  1535. child_listen_node);
  1536. if (!child_listen_node) {
  1537. ret = I40IW_ERR_NO_MEMORY;
  1538. goto exit;
  1539. }
  1540. cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
  1541. cm_parent_listen_node->vlan_id = cm_info->vlan_id;
  1542. memcpy(child_listen_node, cm_parent_listen_node,
  1543. sizeof(*child_listen_node));
  1544. i40iw_copy_ip_ntohl(child_listen_node->loc_addr,
  1545. ifp->addr.in6_u.u6_addr32);
  1546. memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
  1547. sizeof(cm_info->loc_addr));
  1548. ret = i40iw_manage_qhash(iwdev, cm_info,
  1549. I40IW_QHASH_TYPE_TCP_SYN,
  1550. I40IW_QHASH_MANAGE_TYPE_ADD,
  1551. NULL, true);
  1552. if (!ret) {
  1553. child_listen_node->qhash_set = true;
  1554. spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
  1555. list_add(&child_listen_node->child_listen_list,
  1556. &cm_parent_listen_node->child_listen_list);
  1557. spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
  1558. cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
  1559. } else {
  1560. kfree(child_listen_node);
  1561. }
  1562. }
  1563. }
  1564. }
  1565. exit:
  1566. rtnl_unlock();
  1567. return ret;
  1568. }
  1569. /**
  1570. * i40iw_add_mqh_4 - Adds multiple qhashes for IPv4
  1571. * @iwdev: iWarp device
  1572. * @cm_info: CM info for parent listen node
  1573. * @cm_parent_listen_node: The parent listen node
  1574. *
  1575. * Adds a qhash and a child listen node for every IPv4 address
  1576. * on the adapter and adds the associated qhash filter
  1577. */
  1578. static enum i40iw_status_code i40iw_add_mqh_4(
  1579. struct i40iw_device *iwdev,
  1580. struct i40iw_cm_info *cm_info,
  1581. struct i40iw_cm_listener *cm_parent_listen_node)
  1582. {
  1583. struct net_device *dev;
  1584. struct in_device *idev;
  1585. struct i40iw_cm_listener *child_listen_node;
  1586. enum i40iw_status_code ret = 0;
  1587. unsigned long flags;
  1588. rtnl_lock();
  1589. for_each_netdev(&init_net, dev) {
  1590. if ((((rdma_vlan_dev_vlan_id(dev) < I40IW_NO_VLAN) &&
  1591. (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
  1592. (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
  1593. idev = in_dev_get(dev);
  1594. for_ifa(idev) {
  1595. i40iw_debug(&iwdev->sc_dev,
  1596. I40IW_DEBUG_CM,
  1597. "Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
  1598. &ifa->ifa_address,
  1599. rdma_vlan_dev_vlan_id(dev),
  1600. dev->dev_addr);
  1601. child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
  1602. cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
  1603. i40iw_debug(&iwdev->sc_dev,
  1604. I40IW_DEBUG_CM,
  1605. "Allocating child listener %p\n",
  1606. child_listen_node);
  1607. if (!child_listen_node) {
  1608. in_dev_put(idev);
  1609. ret = I40IW_ERR_NO_MEMORY;
  1610. goto exit;
  1611. }
  1612. cm_info->vlan_id = rdma_vlan_dev_vlan_id(dev);
  1613. cm_parent_listen_node->vlan_id = cm_info->vlan_id;
  1614. memcpy(child_listen_node,
  1615. cm_parent_listen_node,
  1616. sizeof(*child_listen_node));
  1617. child_listen_node->loc_addr[0] = ntohl(ifa->ifa_address);
  1618. memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
  1619. sizeof(cm_info->loc_addr));
  1620. ret = i40iw_manage_qhash(iwdev,
  1621. cm_info,
  1622. I40IW_QHASH_TYPE_TCP_SYN,
  1623. I40IW_QHASH_MANAGE_TYPE_ADD,
  1624. NULL,
  1625. true);
  1626. if (!ret) {
  1627. child_listen_node->qhash_set = true;
  1628. spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
  1629. list_add(&child_listen_node->child_listen_list,
  1630. &cm_parent_listen_node->child_listen_list);
  1631. spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
  1632. } else {
  1633. kfree(child_listen_node);
  1634. cm_parent_listen_node->cm_core->stats_listen_nodes_created--;
  1635. }
  1636. }
  1637. endfor_ifa(idev);
  1638. in_dev_put(idev);
  1639. }
  1640. }
  1641. exit:
  1642. rtnl_unlock();
  1643. return ret;
  1644. }
  1645. /**
  1646. * i40iw_dec_refcnt_listen - delete listener and associated cm nodes
  1647. * @cm_core: cm's core
  1648. * @free_hanging_nodes: to free associated cm_nodes
  1649. * @apbvt_del: flag to delete the apbvt
  1650. */
  1651. static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
  1652. struct i40iw_cm_listener *listener,
  1653. int free_hanging_nodes, bool apbvt_del)
  1654. {
  1655. int ret = -EINVAL;
  1656. int err = 0;
  1657. struct list_head *list_pos;
  1658. struct list_head *list_temp;
  1659. struct i40iw_cm_node *cm_node;
  1660. struct list_head reset_list;
  1661. struct i40iw_cm_info nfo;
  1662. struct i40iw_cm_node *loopback;
  1663. enum i40iw_cm_node_state old_state;
  1664. unsigned long flags;
  1665. /* free non-accelerated child nodes for this listener */
  1666. INIT_LIST_HEAD(&reset_list);
  1667. if (free_hanging_nodes) {
  1668. spin_lock_irqsave(&cm_core->ht_lock, flags);
  1669. list_for_each_safe(list_pos,
  1670. list_temp, &cm_core->non_accelerated_list) {
  1671. cm_node = container_of(list_pos, struct i40iw_cm_node, list);
  1672. if ((cm_node->listener == listener) &&
  1673. !cm_node->accelerated) {
  1674. atomic_inc(&cm_node->ref_count);
  1675. list_add(&cm_node->reset_entry, &reset_list);
  1676. }
  1677. }
  1678. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  1679. }
  1680. list_for_each_safe(list_pos, list_temp, &reset_list) {
  1681. cm_node = container_of(list_pos, struct i40iw_cm_node, reset_entry);
  1682. loopback = cm_node->loopbackpartner;
  1683. if (cm_node->state >= I40IW_CM_STATE_FIN_WAIT1) {
  1684. i40iw_rem_ref_cm_node(cm_node);
  1685. } else {
  1686. if (!loopback) {
  1687. i40iw_cleanup_retrans_entry(cm_node);
  1688. err = i40iw_send_reset(cm_node);
  1689. if (err) {
  1690. cm_node->state = I40IW_CM_STATE_CLOSED;
  1691. i40iw_pr_err("send reset\n");
  1692. } else {
  1693. old_state = cm_node->state;
  1694. cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED;
  1695. if (old_state != I40IW_CM_STATE_MPAREQ_RCVD)
  1696. i40iw_rem_ref_cm_node(cm_node);
  1697. }
  1698. } else {
  1699. struct i40iw_cm_event event;
  1700. event.cm_node = loopback;
  1701. memcpy(event.cm_info.rem_addr,
  1702. loopback->rem_addr, sizeof(event.cm_info.rem_addr));
  1703. memcpy(event.cm_info.loc_addr,
  1704. loopback->loc_addr, sizeof(event.cm_info.loc_addr));
  1705. event.cm_info.rem_port = loopback->rem_port;
  1706. event.cm_info.loc_port = loopback->loc_port;
  1707. event.cm_info.cm_id = loopback->cm_id;
  1708. event.cm_info.ipv4 = loopback->ipv4;
  1709. atomic_inc(&loopback->ref_count);
  1710. loopback->state = I40IW_CM_STATE_CLOSED;
  1711. i40iw_event_connect_error(&event);
  1712. cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED;
  1713. i40iw_rem_ref_cm_node(cm_node);
  1714. }
  1715. }
  1716. }
  1717. if (!atomic_dec_return(&listener->ref_count)) {
  1718. spin_lock_irqsave(&cm_core->listen_list_lock, flags);
  1719. list_del(&listener->list);
  1720. spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
  1721. if (listener->iwdev) {
  1722. if (apbvt_del && !i40iw_port_in_use(cm_core, listener->loc_port, false))
  1723. i40iw_manage_apbvt(listener->iwdev,
  1724. listener->loc_port,
  1725. I40IW_MANAGE_APBVT_DEL);
  1726. memcpy(nfo.loc_addr, listener->loc_addr, sizeof(nfo.loc_addr));
  1727. nfo.loc_port = listener->loc_port;
  1728. nfo.ipv4 = listener->ipv4;
  1729. nfo.vlan_id = listener->vlan_id;
  1730. nfo.user_pri = listener->user_pri;
  1731. if (!list_empty(&listener->child_listen_list)) {
  1732. i40iw_del_multiple_qhash(listener->iwdev, &nfo, listener);
  1733. } else {
  1734. if (listener->qhash_set)
  1735. i40iw_manage_qhash(listener->iwdev,
  1736. &nfo,
  1737. I40IW_QHASH_TYPE_TCP_SYN,
  1738. I40IW_QHASH_MANAGE_TYPE_DELETE,
  1739. NULL,
  1740. false);
  1741. }
  1742. }
  1743. cm_core->stats_listen_destroyed++;
  1744. kfree(listener);
  1745. cm_core->stats_listen_nodes_destroyed++;
  1746. listener = NULL;
  1747. ret = 0;
  1748. }
  1749. if (listener) {
  1750. if (atomic_read(&listener->pend_accepts_cnt) > 0)
  1751. i40iw_debug(cm_core->dev,
  1752. I40IW_DEBUG_CM,
  1753. "%s: listener (%p) pending accepts=%u\n",
  1754. __func__,
  1755. listener,
  1756. atomic_read(&listener->pend_accepts_cnt));
  1757. }
  1758. return ret;
  1759. }
  1760. /**
  1761. * i40iw_cm_del_listen - delete a linstener
  1762. * @cm_core: cm's core
  1763. * @listener: passive connection's listener
  1764. * @apbvt_del: flag to delete apbvt
  1765. */
  1766. static int i40iw_cm_del_listen(struct i40iw_cm_core *cm_core,
  1767. struct i40iw_cm_listener *listener,
  1768. bool apbvt_del)
  1769. {
  1770. listener->listener_state = I40IW_CM_LISTENER_PASSIVE_STATE;
  1771. listener->cm_id = NULL; /* going to be destroyed pretty soon */
  1772. return i40iw_dec_refcnt_listen(cm_core, listener, 1, apbvt_del);
  1773. }
  1774. /**
  1775. * i40iw_addr_resolve_neigh - resolve neighbor address
  1776. * @iwdev: iwarp device structure
  1777. * @src_ip: local ip address
  1778. * @dst_ip: remote ip address
  1779. * @arpindex: if there is an arp entry
  1780. */
  1781. static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
  1782. u32 src_ip,
  1783. u32 dst_ip,
  1784. int arpindex)
  1785. {
  1786. struct rtable *rt;
  1787. struct neighbour *neigh;
  1788. int rc = arpindex;
  1789. struct net_device *netdev = iwdev->netdev;
  1790. __be32 dst_ipaddr = htonl(dst_ip);
  1791. __be32 src_ipaddr = htonl(src_ip);
  1792. rt = ip_route_output(&init_net, dst_ipaddr, src_ipaddr, 0, 0);
  1793. if (IS_ERR(rt)) {
  1794. i40iw_pr_err("ip_route_output\n");
  1795. return rc;
  1796. }
  1797. if (netif_is_bond_slave(netdev))
  1798. netdev = netdev_master_upper_dev_get(netdev);
  1799. neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
  1800. rcu_read_lock();
  1801. if (neigh) {
  1802. if (neigh->nud_state & NUD_VALID) {
  1803. if (arpindex >= 0) {
  1804. if (ether_addr_equal(iwdev->arp_table[arpindex].mac_addr,
  1805. neigh->ha))
  1806. /* Mac address same as arp table */
  1807. goto resolve_neigh_exit;
  1808. i40iw_manage_arp_cache(iwdev,
  1809. iwdev->arp_table[arpindex].mac_addr,
  1810. &dst_ip,
  1811. true,
  1812. I40IW_ARP_DELETE);
  1813. }
  1814. i40iw_manage_arp_cache(iwdev, neigh->ha, &dst_ip, true, I40IW_ARP_ADD);
  1815. rc = i40iw_arp_table(iwdev, &dst_ip, true, NULL, I40IW_ARP_RESOLVE);
  1816. } else {
  1817. neigh_event_send(neigh, NULL);
  1818. }
  1819. }
  1820. resolve_neigh_exit:
  1821. rcu_read_unlock();
  1822. if (neigh)
  1823. neigh_release(neigh);
  1824. ip_rt_put(rt);
  1825. return rc;
  1826. }
  1827. /**
  1828. * i40iw_get_dst_ipv6
  1829. */
  1830. static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
  1831. struct sockaddr_in6 *dst_addr)
  1832. {
  1833. struct dst_entry *dst;
  1834. struct flowi6 fl6;
  1835. memset(&fl6, 0, sizeof(fl6));
  1836. fl6.daddr = dst_addr->sin6_addr;
  1837. fl6.saddr = src_addr->sin6_addr;
  1838. if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
  1839. fl6.flowi6_oif = dst_addr->sin6_scope_id;
  1840. dst = ip6_route_output(&init_net, NULL, &fl6);
  1841. return dst;
  1842. }
  1843. /**
  1844. * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
  1845. * @iwdev: iwarp device structure
  1846. * @dst_ip: remote ip address
  1847. * @arpindex: if there is an arp entry
  1848. */
  1849. static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
  1850. u32 *src,
  1851. u32 *dest,
  1852. int arpindex)
  1853. {
  1854. struct neighbour *neigh;
  1855. int rc = arpindex;
  1856. struct net_device *netdev = iwdev->netdev;
  1857. struct dst_entry *dst;
  1858. struct sockaddr_in6 dst_addr;
  1859. struct sockaddr_in6 src_addr;
  1860. memset(&dst_addr, 0, sizeof(dst_addr));
  1861. dst_addr.sin6_family = AF_INET6;
  1862. i40iw_copy_ip_htonl(dst_addr.sin6_addr.in6_u.u6_addr32, dest);
  1863. memset(&src_addr, 0, sizeof(src_addr));
  1864. src_addr.sin6_family = AF_INET6;
  1865. i40iw_copy_ip_htonl(src_addr.sin6_addr.in6_u.u6_addr32, src);
  1866. dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr);
  1867. if (!dst || dst->error) {
  1868. if (dst) {
  1869. dst_release(dst);
  1870. i40iw_pr_err("ip6_route_output returned dst->error = %d\n",
  1871. dst->error);
  1872. }
  1873. return rc;
  1874. }
  1875. if (netif_is_bond_slave(netdev))
  1876. netdev = netdev_master_upper_dev_get(netdev);
  1877. neigh = dst_neigh_lookup(dst, &dst_addr);
  1878. rcu_read_lock();
  1879. if (neigh) {
  1880. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "dst_neigh_lookup MAC=%pM\n", neigh->ha);
  1881. if (neigh->nud_state & NUD_VALID) {
  1882. if (arpindex >= 0) {
  1883. if (ether_addr_equal
  1884. (iwdev->arp_table[arpindex].mac_addr,
  1885. neigh->ha)) {
  1886. /* Mac address same as in arp table */
  1887. goto resolve_neigh_exit6;
  1888. }
  1889. i40iw_manage_arp_cache(iwdev,
  1890. iwdev->arp_table[arpindex].mac_addr,
  1891. dest,
  1892. false,
  1893. I40IW_ARP_DELETE);
  1894. }
  1895. i40iw_manage_arp_cache(iwdev,
  1896. neigh->ha,
  1897. dest,
  1898. false,
  1899. I40IW_ARP_ADD);
  1900. rc = i40iw_arp_table(iwdev,
  1901. dest,
  1902. false,
  1903. NULL,
  1904. I40IW_ARP_RESOLVE);
  1905. } else {
  1906. neigh_event_send(neigh, NULL);
  1907. }
  1908. }
  1909. resolve_neigh_exit6:
  1910. rcu_read_unlock();
  1911. if (neigh)
  1912. neigh_release(neigh);
  1913. dst_release(dst);
  1914. return rc;
  1915. }
  1916. /**
  1917. * i40iw_ipv4_is_loopback - check if loopback
  1918. * @loc_addr: local addr to compare
  1919. * @rem_addr: remote address
  1920. */
  1921. static bool i40iw_ipv4_is_loopback(u32 loc_addr, u32 rem_addr)
  1922. {
  1923. return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr);
  1924. }
  1925. /**
  1926. * i40iw_ipv6_is_loopback - check if loopback
  1927. * @loc_addr: local addr to compare
  1928. * @rem_addr: remote address
  1929. */
  1930. static bool i40iw_ipv6_is_loopback(u32 *loc_addr, u32 *rem_addr)
  1931. {
  1932. struct in6_addr raddr6;
  1933. i40iw_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr);
  1934. return !memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6);
  1935. }
  1936. /**
  1937. * i40iw_make_cm_node - create a new instance of a cm node
  1938. * @cm_core: cm's core
  1939. * @iwdev: iwarp device structure
  1940. * @cm_info: quad info for connection
  1941. * @listener: passive connection's listener
  1942. */
  1943. static struct i40iw_cm_node *i40iw_make_cm_node(
  1944. struct i40iw_cm_core *cm_core,
  1945. struct i40iw_device *iwdev,
  1946. struct i40iw_cm_info *cm_info,
  1947. struct i40iw_cm_listener *listener)
  1948. {
  1949. struct i40iw_cm_node *cm_node;
  1950. struct timespec ts;
  1951. int oldarpindex;
  1952. int arpindex;
  1953. struct net_device *netdev = iwdev->netdev;
  1954. /* create an hte and cm_node for this instance */
  1955. cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
  1956. if (!cm_node)
  1957. return NULL;
  1958. /* set our node specific transport info */
  1959. cm_node->ipv4 = cm_info->ipv4;
  1960. cm_node->vlan_id = cm_info->vlan_id;
  1961. if ((cm_node->vlan_id == I40IW_NO_VLAN) && iwdev->dcb)
  1962. cm_node->vlan_id = 0;
  1963. cm_node->tos = cm_info->tos;
  1964. cm_node->user_pri = cm_info->user_pri;
  1965. if (listener) {
  1966. if (listener->tos != cm_info->tos)
  1967. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB,
  1968. "application TOS[%d] and remote client TOS[%d] mismatch\n",
  1969. listener->tos, cm_info->tos);
  1970. cm_node->tos = max(listener->tos, cm_info->tos);
  1971. cm_node->user_pri = rt_tos2priority(cm_node->tos);
  1972. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "listener: TOS:[%d] UP:[%d]\n",
  1973. cm_node->tos, cm_node->user_pri);
  1974. }
  1975. memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
  1976. memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr));
  1977. cm_node->loc_port = cm_info->loc_port;
  1978. cm_node->rem_port = cm_info->rem_port;
  1979. cm_node->mpa_frame_rev = iwdev->mpa_version;
  1980. cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
  1981. cm_node->ird_size = I40IW_MAX_IRD_SIZE;
  1982. cm_node->ord_size = I40IW_MAX_ORD_SIZE;
  1983. cm_node->listener = listener;
  1984. cm_node->cm_id = cm_info->cm_id;
  1985. ether_addr_copy(cm_node->loc_mac, netdev->dev_addr);
  1986. spin_lock_init(&cm_node->retrans_list_lock);
  1987. cm_node->ack_rcvd = false;
  1988. atomic_set(&cm_node->ref_count, 1);
  1989. /* associate our parent CM core */
  1990. cm_node->cm_core = cm_core;
  1991. cm_node->tcp_cntxt.loc_id = I40IW_CM_DEF_LOCAL_ID;
  1992. cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
  1993. cm_node->tcp_cntxt.rcv_wnd =
  1994. I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE;
  1995. ts = current_kernel_time();
  1996. cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec;
  1997. cm_node->tcp_cntxt.mss = (cm_node->ipv4) ? (iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV4) :
  1998. (iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV6);
  1999. cm_node->iwdev = iwdev;
  2000. cm_node->dev = &iwdev->sc_dev;
  2001. if ((cm_node->ipv4 &&
  2002. i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
  2003. (!cm_node->ipv4 && i40iw_ipv6_is_loopback(cm_node->loc_addr,
  2004. cm_node->rem_addr))) {
  2005. arpindex = i40iw_arp_table(iwdev,
  2006. cm_node->rem_addr,
  2007. false,
  2008. NULL,
  2009. I40IW_ARP_RESOLVE);
  2010. } else {
  2011. oldarpindex = i40iw_arp_table(iwdev,
  2012. cm_node->rem_addr,
  2013. false,
  2014. NULL,
  2015. I40IW_ARP_RESOLVE);
  2016. if (cm_node->ipv4)
  2017. arpindex = i40iw_addr_resolve_neigh(iwdev,
  2018. cm_info->loc_addr[0],
  2019. cm_info->rem_addr[0],
  2020. oldarpindex);
  2021. else if (IS_ENABLED(CONFIG_IPV6))
  2022. arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev,
  2023. cm_info->loc_addr,
  2024. cm_info->rem_addr,
  2025. oldarpindex);
  2026. else
  2027. arpindex = -EINVAL;
  2028. }
  2029. if (arpindex < 0) {
  2030. i40iw_pr_err("cm_node arpindex\n");
  2031. kfree(cm_node);
  2032. return NULL;
  2033. }
  2034. ether_addr_copy(cm_node->rem_mac, iwdev->arp_table[arpindex].mac_addr);
  2035. i40iw_add_hte_node(cm_core, cm_node);
  2036. cm_core->stats_nodes_created++;
  2037. return cm_node;
  2038. }
  2039. /**
  2040. * i40iw_rem_ref_cm_node - destroy an instance of a cm node
  2041. * @cm_node: connection's node
  2042. */
  2043. static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
  2044. {
  2045. struct i40iw_cm_core *cm_core = cm_node->cm_core;
  2046. struct i40iw_qp *iwqp;
  2047. struct i40iw_cm_info nfo;
  2048. unsigned long flags;
  2049. spin_lock_irqsave(&cm_node->cm_core->ht_lock, flags);
  2050. if (atomic_dec_return(&cm_node->ref_count)) {
  2051. spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
  2052. return;
  2053. }
  2054. list_del(&cm_node->list);
  2055. spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
  2056. /* if the node is destroyed before connection was accelerated */
  2057. if (!cm_node->accelerated && cm_node->accept_pend) {
  2058. pr_err("node destroyed before established\n");
  2059. atomic_dec(&cm_node->listener->pend_accepts_cnt);
  2060. }
  2061. if (cm_node->close_entry)
  2062. i40iw_handle_close_entry(cm_node, 0);
  2063. if (cm_node->listener) {
  2064. i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
  2065. } else {
  2066. if (!i40iw_port_in_use(cm_core, cm_node->loc_port, true) && cm_node->apbvt_set) {
  2067. i40iw_manage_apbvt(cm_node->iwdev,
  2068. cm_node->loc_port,
  2069. I40IW_MANAGE_APBVT_DEL);
  2070. cm_node->apbvt_set = 0;
  2071. }
  2072. i40iw_get_addr_info(cm_node, &nfo);
  2073. if (cm_node->qhash_set) {
  2074. i40iw_manage_qhash(cm_node->iwdev,
  2075. &nfo,
  2076. I40IW_QHASH_TYPE_TCP_ESTABLISHED,
  2077. I40IW_QHASH_MANAGE_TYPE_DELETE,
  2078. NULL,
  2079. false);
  2080. cm_node->qhash_set = 0;
  2081. }
  2082. }
  2083. iwqp = cm_node->iwqp;
  2084. if (iwqp) {
  2085. iwqp->cm_node = NULL;
  2086. i40iw_rem_ref(&iwqp->ibqp);
  2087. cm_node->iwqp = NULL;
  2088. } else if (cm_node->qhash_set) {
  2089. i40iw_get_addr_info(cm_node, &nfo);
  2090. i40iw_manage_qhash(cm_node->iwdev,
  2091. &nfo,
  2092. I40IW_QHASH_TYPE_TCP_ESTABLISHED,
  2093. I40IW_QHASH_MANAGE_TYPE_DELETE,
  2094. NULL,
  2095. false);
  2096. cm_node->qhash_set = 0;
  2097. }
  2098. cm_node->cm_core->stats_nodes_destroyed++;
  2099. kfree(cm_node);
  2100. }
  2101. /**
  2102. * i40iw_handle_fin_pkt - FIN packet received
  2103. * @cm_node: connection's node
  2104. */
  2105. static void i40iw_handle_fin_pkt(struct i40iw_cm_node *cm_node)
  2106. {
  2107. u32 ret;
  2108. switch (cm_node->state) {
  2109. case I40IW_CM_STATE_SYN_RCVD:
  2110. case I40IW_CM_STATE_SYN_SENT:
  2111. case I40IW_CM_STATE_ESTABLISHED:
  2112. case I40IW_CM_STATE_MPAREJ_RCVD:
  2113. cm_node->tcp_cntxt.rcv_nxt++;
  2114. i40iw_cleanup_retrans_entry(cm_node);
  2115. cm_node->state = I40IW_CM_STATE_LAST_ACK;
  2116. i40iw_send_fin(cm_node);
  2117. break;
  2118. case I40IW_CM_STATE_MPAREQ_SENT:
  2119. i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
  2120. cm_node->tcp_cntxt.rcv_nxt++;
  2121. i40iw_cleanup_retrans_entry(cm_node);
  2122. cm_node->state = I40IW_CM_STATE_CLOSED;
  2123. atomic_inc(&cm_node->ref_count);
  2124. i40iw_send_reset(cm_node);
  2125. break;
  2126. case I40IW_CM_STATE_FIN_WAIT1:
  2127. cm_node->tcp_cntxt.rcv_nxt++;
  2128. i40iw_cleanup_retrans_entry(cm_node);
  2129. cm_node->state = I40IW_CM_STATE_CLOSING;
  2130. i40iw_send_ack(cm_node);
  2131. /*
  2132. * Wait for ACK as this is simultaneous close.
  2133. * After we receive ACK, do not send anything.
  2134. * Just rm the node.
  2135. */
  2136. break;
  2137. case I40IW_CM_STATE_FIN_WAIT2:
  2138. cm_node->tcp_cntxt.rcv_nxt++;
  2139. i40iw_cleanup_retrans_entry(cm_node);
  2140. cm_node->state = I40IW_CM_STATE_TIME_WAIT;
  2141. i40iw_send_ack(cm_node);
  2142. ret =
  2143. i40iw_schedule_cm_timer(cm_node, NULL, I40IW_TIMER_TYPE_CLOSE, 1, 0);
  2144. if (ret)
  2145. i40iw_pr_err("node %p state = %d\n", cm_node, cm_node->state);
  2146. break;
  2147. case I40IW_CM_STATE_TIME_WAIT:
  2148. cm_node->tcp_cntxt.rcv_nxt++;
  2149. i40iw_cleanup_retrans_entry(cm_node);
  2150. cm_node->state = I40IW_CM_STATE_CLOSED;
  2151. i40iw_rem_ref_cm_node(cm_node);
  2152. break;
  2153. case I40IW_CM_STATE_OFFLOADED:
  2154. default:
  2155. i40iw_pr_err("bad state node %p state = %d\n", cm_node, cm_node->state);
  2156. break;
  2157. }
  2158. }
  2159. /**
  2160. * i40iw_handle_rst_pkt - process received RST packet
  2161. * @cm_node: connection's node
  2162. * @rbuf: receive buffer
  2163. */
  2164. static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node,
  2165. struct i40iw_puda_buf *rbuf)
  2166. {
  2167. i40iw_cleanup_retrans_entry(cm_node);
  2168. switch (cm_node->state) {
  2169. case I40IW_CM_STATE_SYN_SENT:
  2170. case I40IW_CM_STATE_MPAREQ_SENT:
  2171. switch (cm_node->mpa_frame_rev) {
  2172. case IETF_MPA_V2:
  2173. cm_node->mpa_frame_rev = IETF_MPA_V1;
  2174. /* send a syn and goto syn sent state */
  2175. cm_node->state = I40IW_CM_STATE_SYN_SENT;
  2176. if (i40iw_send_syn(cm_node, 0))
  2177. i40iw_active_open_err(cm_node, false);
  2178. break;
  2179. case IETF_MPA_V1:
  2180. default:
  2181. i40iw_active_open_err(cm_node, false);
  2182. break;
  2183. }
  2184. break;
  2185. case I40IW_CM_STATE_MPAREQ_RCVD:
  2186. atomic_add_return(1, &cm_node->passive_state);
  2187. break;
  2188. case I40IW_CM_STATE_ESTABLISHED:
  2189. case I40IW_CM_STATE_SYN_RCVD:
  2190. case I40IW_CM_STATE_LISTENING:
  2191. i40iw_pr_err("Bad state state = %d\n", cm_node->state);
  2192. i40iw_passive_open_err(cm_node, false);
  2193. break;
  2194. case I40IW_CM_STATE_OFFLOADED:
  2195. i40iw_active_open_err(cm_node, false);
  2196. break;
  2197. case I40IW_CM_STATE_CLOSED:
  2198. break;
  2199. case I40IW_CM_STATE_FIN_WAIT2:
  2200. case I40IW_CM_STATE_FIN_WAIT1:
  2201. case I40IW_CM_STATE_LAST_ACK:
  2202. cm_node->cm_id->rem_ref(cm_node->cm_id);
  2203. /* fall through */
  2204. case I40IW_CM_STATE_TIME_WAIT:
  2205. cm_node->state = I40IW_CM_STATE_CLOSED;
  2206. i40iw_rem_ref_cm_node(cm_node);
  2207. break;
  2208. default:
  2209. break;
  2210. }
  2211. }
  2212. /**
  2213. * i40iw_handle_rcv_mpa - Process a recv'd mpa buffer
  2214. * @cm_node: connection's node
  2215. * @rbuf: receive buffer
  2216. */
  2217. static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node,
  2218. struct i40iw_puda_buf *rbuf)
  2219. {
  2220. int ret;
  2221. int datasize = rbuf->datalen;
  2222. u8 *dataloc = rbuf->data;
  2223. enum i40iw_cm_event_type type = I40IW_CM_EVENT_UNKNOWN;
  2224. u32 res_type;
  2225. ret = i40iw_parse_mpa(cm_node, dataloc, &res_type, datasize);
  2226. if (ret) {
  2227. if (cm_node->state == I40IW_CM_STATE_MPAREQ_SENT)
  2228. i40iw_active_open_err(cm_node, true);
  2229. else
  2230. i40iw_passive_open_err(cm_node, true);
  2231. return;
  2232. }
  2233. switch (cm_node->state) {
  2234. case I40IW_CM_STATE_ESTABLISHED:
  2235. if (res_type == I40IW_MPA_REQUEST_REJECT)
  2236. i40iw_pr_err("state for reject\n");
  2237. cm_node->state = I40IW_CM_STATE_MPAREQ_RCVD;
  2238. type = I40IW_CM_EVENT_MPA_REQ;
  2239. i40iw_send_ack(cm_node); /* ACK received MPA request */
  2240. atomic_set(&cm_node->passive_state,
  2241. I40IW_PASSIVE_STATE_INDICATED);
  2242. break;
  2243. case I40IW_CM_STATE_MPAREQ_SENT:
  2244. i40iw_cleanup_retrans_entry(cm_node);
  2245. if (res_type == I40IW_MPA_REQUEST_REJECT) {
  2246. type = I40IW_CM_EVENT_MPA_REJECT;
  2247. cm_node->state = I40IW_CM_STATE_MPAREJ_RCVD;
  2248. } else {
  2249. type = I40IW_CM_EVENT_CONNECTED;
  2250. cm_node->state = I40IW_CM_STATE_OFFLOADED;
  2251. }
  2252. i40iw_send_ack(cm_node);
  2253. break;
  2254. default:
  2255. pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state);
  2256. break;
  2257. }
  2258. i40iw_create_event(cm_node, type);
  2259. }
  2260. /**
  2261. * i40iw_indicate_pkt_err - Send up err event to cm
  2262. * @cm_node: connection's node
  2263. */
  2264. static void i40iw_indicate_pkt_err(struct i40iw_cm_node *cm_node)
  2265. {
  2266. switch (cm_node->state) {
  2267. case I40IW_CM_STATE_SYN_SENT:
  2268. case I40IW_CM_STATE_MPAREQ_SENT:
  2269. i40iw_active_open_err(cm_node, true);
  2270. break;
  2271. case I40IW_CM_STATE_ESTABLISHED:
  2272. case I40IW_CM_STATE_SYN_RCVD:
  2273. i40iw_passive_open_err(cm_node, true);
  2274. break;
  2275. case I40IW_CM_STATE_OFFLOADED:
  2276. default:
  2277. break;
  2278. }
  2279. }
  2280. /**
  2281. * i40iw_check_syn - Check for error on received syn ack
  2282. * @cm_node: connection's node
  2283. * @tcph: pointer tcp header
  2284. */
  2285. static int i40iw_check_syn(struct i40iw_cm_node *cm_node, struct tcphdr *tcph)
  2286. {
  2287. int err = 0;
  2288. if (ntohl(tcph->ack_seq) != cm_node->tcp_cntxt.loc_seq_num) {
  2289. err = 1;
  2290. i40iw_active_open_err(cm_node, true);
  2291. }
  2292. return err;
  2293. }
  2294. /**
  2295. * i40iw_check_seq - check seq numbers if OK
  2296. * @cm_node: connection's node
  2297. * @tcph: pointer tcp header
  2298. */
  2299. static int i40iw_check_seq(struct i40iw_cm_node *cm_node, struct tcphdr *tcph)
  2300. {
  2301. int err = 0;
  2302. u32 seq;
  2303. u32 ack_seq;
  2304. u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num;
  2305. u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
  2306. u32 rcv_wnd;
  2307. seq = ntohl(tcph->seq);
  2308. ack_seq = ntohl(tcph->ack_seq);
  2309. rcv_wnd = cm_node->tcp_cntxt.rcv_wnd;
  2310. if (ack_seq != loc_seq_num)
  2311. err = -1;
  2312. else if (!between(seq, rcv_nxt, (rcv_nxt + rcv_wnd)))
  2313. err = -1;
  2314. if (err) {
  2315. i40iw_pr_err("seq number\n");
  2316. i40iw_indicate_pkt_err(cm_node);
  2317. }
  2318. return err;
  2319. }
  2320. /**
  2321. * i40iw_handle_syn_pkt - is for Passive node
  2322. * @cm_node: connection's node
  2323. * @rbuf: receive buffer
  2324. */
  2325. static void i40iw_handle_syn_pkt(struct i40iw_cm_node *cm_node,
  2326. struct i40iw_puda_buf *rbuf)
  2327. {
  2328. struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
  2329. int ret;
  2330. u32 inc_sequence;
  2331. int optionsize;
  2332. struct i40iw_cm_info nfo;
  2333. optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
  2334. inc_sequence = ntohl(tcph->seq);
  2335. switch (cm_node->state) {
  2336. case I40IW_CM_STATE_SYN_SENT:
  2337. case I40IW_CM_STATE_MPAREQ_SENT:
  2338. /* Rcvd syn on active open connection */
  2339. i40iw_active_open_err(cm_node, 1);
  2340. break;
  2341. case I40IW_CM_STATE_LISTENING:
  2342. /* Passive OPEN */
  2343. if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
  2344. cm_node->listener->backlog) {
  2345. cm_node->cm_core->stats_backlog_drops++;
  2346. i40iw_passive_open_err(cm_node, false);
  2347. break;
  2348. }
  2349. ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1);
  2350. if (ret) {
  2351. i40iw_passive_open_err(cm_node, false);
  2352. /* drop pkt */
  2353. break;
  2354. }
  2355. cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
  2356. cm_node->accept_pend = 1;
  2357. atomic_inc(&cm_node->listener->pend_accepts_cnt);
  2358. cm_node->state = I40IW_CM_STATE_SYN_RCVD;
  2359. i40iw_get_addr_info(cm_node, &nfo);
  2360. ret = i40iw_manage_qhash(cm_node->iwdev,
  2361. &nfo,
  2362. I40IW_QHASH_TYPE_TCP_ESTABLISHED,
  2363. I40IW_QHASH_MANAGE_TYPE_ADD,
  2364. (void *)cm_node,
  2365. false);
  2366. cm_node->qhash_set = true;
  2367. break;
  2368. case I40IW_CM_STATE_CLOSED:
  2369. i40iw_cleanup_retrans_entry(cm_node);
  2370. atomic_inc(&cm_node->ref_count);
  2371. i40iw_send_reset(cm_node);
  2372. break;
  2373. case I40IW_CM_STATE_OFFLOADED:
  2374. case I40IW_CM_STATE_ESTABLISHED:
  2375. case I40IW_CM_STATE_FIN_WAIT1:
  2376. case I40IW_CM_STATE_FIN_WAIT2:
  2377. case I40IW_CM_STATE_MPAREQ_RCVD:
  2378. case I40IW_CM_STATE_LAST_ACK:
  2379. case I40IW_CM_STATE_CLOSING:
  2380. case I40IW_CM_STATE_UNKNOWN:
  2381. default:
  2382. break;
  2383. }
  2384. }
  2385. /**
  2386. * i40iw_handle_synack_pkt - Process SYN+ACK packet (active side)
  2387. * @cm_node: connection's node
  2388. * @rbuf: receive buffer
  2389. */
  2390. static void i40iw_handle_synack_pkt(struct i40iw_cm_node *cm_node,
  2391. struct i40iw_puda_buf *rbuf)
  2392. {
  2393. struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
  2394. int ret;
  2395. u32 inc_sequence;
  2396. int optionsize;
  2397. optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
  2398. inc_sequence = ntohl(tcph->seq);
  2399. switch (cm_node->state) {
  2400. case I40IW_CM_STATE_SYN_SENT:
  2401. i40iw_cleanup_retrans_entry(cm_node);
  2402. /* active open */
  2403. if (i40iw_check_syn(cm_node, tcph)) {
  2404. i40iw_pr_err("check syn fail\n");
  2405. return;
  2406. }
  2407. cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
  2408. /* setup options */
  2409. ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 0);
  2410. if (ret) {
  2411. i40iw_debug(cm_node->dev,
  2412. I40IW_DEBUG_CM,
  2413. "cm_node=%p tcp_options failed\n",
  2414. cm_node);
  2415. break;
  2416. }
  2417. i40iw_cleanup_retrans_entry(cm_node);
  2418. cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
  2419. i40iw_send_ack(cm_node); /* ACK for the syn_ack */
  2420. ret = i40iw_send_mpa_request(cm_node);
  2421. if (ret) {
  2422. i40iw_debug(cm_node->dev,
  2423. I40IW_DEBUG_CM,
  2424. "cm_node=%p i40iw_send_mpa_request failed\n",
  2425. cm_node);
  2426. break;
  2427. }
  2428. cm_node->state = I40IW_CM_STATE_MPAREQ_SENT;
  2429. break;
  2430. case I40IW_CM_STATE_MPAREQ_RCVD:
  2431. i40iw_passive_open_err(cm_node, true);
  2432. break;
  2433. case I40IW_CM_STATE_LISTENING:
  2434. cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
  2435. i40iw_cleanup_retrans_entry(cm_node);
  2436. cm_node->state = I40IW_CM_STATE_CLOSED;
  2437. i40iw_send_reset(cm_node);
  2438. break;
  2439. case I40IW_CM_STATE_CLOSED:
  2440. cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
  2441. i40iw_cleanup_retrans_entry(cm_node);
  2442. atomic_inc(&cm_node->ref_count);
  2443. i40iw_send_reset(cm_node);
  2444. break;
  2445. case I40IW_CM_STATE_ESTABLISHED:
  2446. case I40IW_CM_STATE_FIN_WAIT1:
  2447. case I40IW_CM_STATE_FIN_WAIT2:
  2448. case I40IW_CM_STATE_LAST_ACK:
  2449. case I40IW_CM_STATE_OFFLOADED:
  2450. case I40IW_CM_STATE_CLOSING:
  2451. case I40IW_CM_STATE_UNKNOWN:
  2452. case I40IW_CM_STATE_MPAREQ_SENT:
  2453. default:
  2454. break;
  2455. }
  2456. }
  2457. /**
  2458. * i40iw_handle_ack_pkt - process packet with ACK
  2459. * @cm_node: connection's node
  2460. * @rbuf: receive buffer
  2461. */
  2462. static int i40iw_handle_ack_pkt(struct i40iw_cm_node *cm_node,
  2463. struct i40iw_puda_buf *rbuf)
  2464. {
  2465. struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
  2466. u32 inc_sequence;
  2467. int ret = 0;
  2468. int optionsize;
  2469. u32 datasize = rbuf->datalen;
  2470. optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
  2471. if (i40iw_check_seq(cm_node, tcph))
  2472. return -EINVAL;
  2473. inc_sequence = ntohl(tcph->seq);
  2474. switch (cm_node->state) {
  2475. case I40IW_CM_STATE_SYN_RCVD:
  2476. i40iw_cleanup_retrans_entry(cm_node);
  2477. ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1);
  2478. if (ret)
  2479. break;
  2480. cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
  2481. cm_node->state = I40IW_CM_STATE_ESTABLISHED;
  2482. if (datasize) {
  2483. cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
  2484. i40iw_handle_rcv_mpa(cm_node, rbuf);
  2485. }
  2486. break;
  2487. case I40IW_CM_STATE_ESTABLISHED:
  2488. i40iw_cleanup_retrans_entry(cm_node);
  2489. if (datasize) {
  2490. cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
  2491. i40iw_handle_rcv_mpa(cm_node, rbuf);
  2492. }
  2493. break;
  2494. case I40IW_CM_STATE_MPAREQ_SENT:
  2495. cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
  2496. if (datasize) {
  2497. cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
  2498. cm_node->ack_rcvd = false;
  2499. i40iw_handle_rcv_mpa(cm_node, rbuf);
  2500. } else {
  2501. cm_node->ack_rcvd = true;
  2502. }
  2503. break;
  2504. case I40IW_CM_STATE_LISTENING:
  2505. i40iw_cleanup_retrans_entry(cm_node);
  2506. cm_node->state = I40IW_CM_STATE_CLOSED;
  2507. i40iw_send_reset(cm_node);
  2508. break;
  2509. case I40IW_CM_STATE_CLOSED:
  2510. i40iw_cleanup_retrans_entry(cm_node);
  2511. atomic_inc(&cm_node->ref_count);
  2512. i40iw_send_reset(cm_node);
  2513. break;
  2514. case I40IW_CM_STATE_LAST_ACK:
  2515. case I40IW_CM_STATE_CLOSING:
  2516. i40iw_cleanup_retrans_entry(cm_node);
  2517. cm_node->state = I40IW_CM_STATE_CLOSED;
  2518. if (!cm_node->accept_pend)
  2519. cm_node->cm_id->rem_ref(cm_node->cm_id);
  2520. i40iw_rem_ref_cm_node(cm_node);
  2521. break;
  2522. case I40IW_CM_STATE_FIN_WAIT1:
  2523. i40iw_cleanup_retrans_entry(cm_node);
  2524. cm_node->state = I40IW_CM_STATE_FIN_WAIT2;
  2525. break;
  2526. case I40IW_CM_STATE_SYN_SENT:
  2527. case I40IW_CM_STATE_FIN_WAIT2:
  2528. case I40IW_CM_STATE_OFFLOADED:
  2529. case I40IW_CM_STATE_MPAREQ_RCVD:
  2530. case I40IW_CM_STATE_UNKNOWN:
  2531. default:
  2532. i40iw_cleanup_retrans_entry(cm_node);
  2533. break;
  2534. }
  2535. return ret;
  2536. }
  2537. /**
  2538. * i40iw_process_packet - process cm packet
  2539. * @cm_node: connection's node
  2540. * @rbuf: receive buffer
  2541. */
  2542. static void i40iw_process_packet(struct i40iw_cm_node *cm_node,
  2543. struct i40iw_puda_buf *rbuf)
  2544. {
  2545. enum i40iw_tcpip_pkt_type pkt_type = I40IW_PKT_TYPE_UNKNOWN;
  2546. struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
  2547. u32 fin_set = 0;
  2548. int ret;
  2549. if (tcph->rst) {
  2550. pkt_type = I40IW_PKT_TYPE_RST;
  2551. } else if (tcph->syn) {
  2552. pkt_type = I40IW_PKT_TYPE_SYN;
  2553. if (tcph->ack)
  2554. pkt_type = I40IW_PKT_TYPE_SYNACK;
  2555. } else if (tcph->ack) {
  2556. pkt_type = I40IW_PKT_TYPE_ACK;
  2557. }
  2558. if (tcph->fin)
  2559. fin_set = 1;
  2560. switch (pkt_type) {
  2561. case I40IW_PKT_TYPE_SYN:
  2562. i40iw_handle_syn_pkt(cm_node, rbuf);
  2563. break;
  2564. case I40IW_PKT_TYPE_SYNACK:
  2565. i40iw_handle_synack_pkt(cm_node, rbuf);
  2566. break;
  2567. case I40IW_PKT_TYPE_ACK:
  2568. ret = i40iw_handle_ack_pkt(cm_node, rbuf);
  2569. if (fin_set && !ret)
  2570. i40iw_handle_fin_pkt(cm_node);
  2571. break;
  2572. case I40IW_PKT_TYPE_RST:
  2573. i40iw_handle_rst_pkt(cm_node, rbuf);
  2574. break;
  2575. default:
  2576. if (fin_set &&
  2577. (!i40iw_check_seq(cm_node, (struct tcphdr *)rbuf->tcph)))
  2578. i40iw_handle_fin_pkt(cm_node);
  2579. break;
  2580. }
  2581. }
  2582. /**
  2583. * i40iw_make_listen_node - create a listen node with params
  2584. * @cm_core: cm's core
  2585. * @iwdev: iwarp device structure
  2586. * @cm_info: quad info for connection
  2587. */
  2588. static struct i40iw_cm_listener *i40iw_make_listen_node(
  2589. struct i40iw_cm_core *cm_core,
  2590. struct i40iw_device *iwdev,
  2591. struct i40iw_cm_info *cm_info)
  2592. {
  2593. struct i40iw_cm_listener *listener;
  2594. unsigned long flags;
  2595. /* cannot have multiple matching listeners */
  2596. listener = i40iw_find_listener(cm_core, cm_info->loc_addr,
  2597. cm_info->loc_port,
  2598. cm_info->vlan_id,
  2599. I40IW_CM_LISTENER_EITHER_STATE);
  2600. if (listener &&
  2601. (listener->listener_state == I40IW_CM_LISTENER_ACTIVE_STATE)) {
  2602. atomic_dec(&listener->ref_count);
  2603. i40iw_debug(cm_core->dev,
  2604. I40IW_DEBUG_CM,
  2605. "Not creating listener since it already exists\n");
  2606. return NULL;
  2607. }
  2608. if (!listener) {
  2609. /* create a CM listen node (1/2 node to compare incoming traffic to) */
  2610. listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
  2611. if (!listener)
  2612. return NULL;
  2613. cm_core->stats_listen_nodes_created++;
  2614. memcpy(listener->loc_addr, cm_info->loc_addr, sizeof(listener->loc_addr));
  2615. listener->loc_port = cm_info->loc_port;
  2616. INIT_LIST_HEAD(&listener->child_listen_list);
  2617. atomic_set(&listener->ref_count, 1);
  2618. } else {
  2619. listener->reused_node = 1;
  2620. }
  2621. listener->cm_id = cm_info->cm_id;
  2622. listener->ipv4 = cm_info->ipv4;
  2623. listener->vlan_id = cm_info->vlan_id;
  2624. atomic_set(&listener->pend_accepts_cnt, 0);
  2625. listener->cm_core = cm_core;
  2626. listener->iwdev = iwdev;
  2627. listener->backlog = cm_info->backlog;
  2628. listener->listener_state = I40IW_CM_LISTENER_ACTIVE_STATE;
  2629. if (!listener->reused_node) {
  2630. spin_lock_irqsave(&cm_core->listen_list_lock, flags);
  2631. list_add(&listener->list, &cm_core->listen_nodes);
  2632. spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
  2633. }
  2634. return listener;
  2635. }
  2636. /**
  2637. * i40iw_create_cm_node - make a connection node with params
  2638. * @cm_core: cm's core
  2639. * @iwdev: iwarp device structure
  2640. * @conn_param: upper layer connection parameters
  2641. * @cm_info: quad info for connection
  2642. */
  2643. static struct i40iw_cm_node *i40iw_create_cm_node(
  2644. struct i40iw_cm_core *cm_core,
  2645. struct i40iw_device *iwdev,
  2646. struct iw_cm_conn_param *conn_param,
  2647. struct i40iw_cm_info *cm_info)
  2648. {
  2649. struct i40iw_cm_node *cm_node;
  2650. struct i40iw_cm_listener *loopback_remotelistener;
  2651. struct i40iw_cm_node *loopback_remotenode;
  2652. struct i40iw_cm_info loopback_cm_info;
  2653. u16 private_data_len = conn_param->private_data_len;
  2654. const void *private_data = conn_param->private_data;
  2655. /* create a CM connection node */
  2656. cm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL);
  2657. if (!cm_node)
  2658. return ERR_PTR(-ENOMEM);
  2659. /* set our node side to client (active) side */
  2660. cm_node->tcp_cntxt.client = 1;
  2661. cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
  2662. i40iw_record_ird_ord(cm_node, conn_param->ird, conn_param->ord);
  2663. if (!memcmp(cm_info->loc_addr, cm_info->rem_addr, sizeof(cm_info->loc_addr))) {
  2664. loopback_remotelistener = i40iw_find_listener(
  2665. cm_core,
  2666. cm_info->rem_addr,
  2667. cm_node->rem_port,
  2668. cm_node->vlan_id,
  2669. I40IW_CM_LISTENER_ACTIVE_STATE);
  2670. if (!loopback_remotelistener) {
  2671. i40iw_rem_ref_cm_node(cm_node);
  2672. return ERR_PTR(-ECONNREFUSED);
  2673. } else {
  2674. loopback_cm_info = *cm_info;
  2675. loopback_cm_info.loc_port = cm_info->rem_port;
  2676. loopback_cm_info.rem_port = cm_info->loc_port;
  2677. loopback_cm_info.cm_id = loopback_remotelistener->cm_id;
  2678. loopback_cm_info.ipv4 = cm_info->ipv4;
  2679. loopback_remotenode = i40iw_make_cm_node(cm_core,
  2680. iwdev,
  2681. &loopback_cm_info,
  2682. loopback_remotelistener);
  2683. if (!loopback_remotenode) {
  2684. i40iw_rem_ref_cm_node(cm_node);
  2685. return ERR_PTR(-ENOMEM);
  2686. }
  2687. cm_core->stats_loopbacks++;
  2688. loopback_remotenode->loopbackpartner = cm_node;
  2689. loopback_remotenode->tcp_cntxt.rcv_wscale =
  2690. I40IW_CM_DEFAULT_RCV_WND_SCALE;
  2691. cm_node->loopbackpartner = loopback_remotenode;
  2692. memcpy(loopback_remotenode->pdata_buf, private_data,
  2693. private_data_len);
  2694. loopback_remotenode->pdata.size = private_data_len;
  2695. if (loopback_remotenode->ord_size > cm_node->ird_size)
  2696. loopback_remotenode->ord_size =
  2697. cm_node->ird_size;
  2698. cm_node->state = I40IW_CM_STATE_OFFLOADED;
  2699. cm_node->tcp_cntxt.rcv_nxt =
  2700. loopback_remotenode->tcp_cntxt.loc_seq_num;
  2701. loopback_remotenode->tcp_cntxt.rcv_nxt =
  2702. cm_node->tcp_cntxt.loc_seq_num;
  2703. cm_node->tcp_cntxt.max_snd_wnd =
  2704. loopback_remotenode->tcp_cntxt.rcv_wnd;
  2705. loopback_remotenode->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
  2706. cm_node->tcp_cntxt.snd_wnd = loopback_remotenode->tcp_cntxt.rcv_wnd;
  2707. loopback_remotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
  2708. cm_node->tcp_cntxt.snd_wscale = loopback_remotenode->tcp_cntxt.rcv_wscale;
  2709. loopback_remotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale;
  2710. }
  2711. return cm_node;
  2712. }
  2713. cm_node->pdata.size = private_data_len;
  2714. cm_node->pdata.addr = cm_node->pdata_buf;
  2715. memcpy(cm_node->pdata_buf, private_data, private_data_len);
  2716. cm_node->state = I40IW_CM_STATE_SYN_SENT;
  2717. return cm_node;
  2718. }
  2719. /**
  2720. * i40iw_cm_reject - reject and teardown a connection
  2721. * @cm_node: connection's node
  2722. * @pdate: ptr to private data for reject
  2723. * @plen: size of private data
  2724. */
  2725. static int i40iw_cm_reject(struct i40iw_cm_node *cm_node, const void *pdata, u8 plen)
  2726. {
  2727. int ret = 0;
  2728. int err;
  2729. int passive_state;
  2730. struct iw_cm_id *cm_id = cm_node->cm_id;
  2731. struct i40iw_cm_node *loopback = cm_node->loopbackpartner;
  2732. if (cm_node->tcp_cntxt.client)
  2733. return ret;
  2734. i40iw_cleanup_retrans_entry(cm_node);
  2735. if (!loopback) {
  2736. passive_state = atomic_add_return(1, &cm_node->passive_state);
  2737. if (passive_state == I40IW_SEND_RESET_EVENT) {
  2738. cm_node->state = I40IW_CM_STATE_CLOSED;
  2739. i40iw_rem_ref_cm_node(cm_node);
  2740. } else {
  2741. if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
  2742. i40iw_rem_ref_cm_node(cm_node);
  2743. } else {
  2744. ret = i40iw_send_mpa_reject(cm_node, pdata, plen);
  2745. if (ret) {
  2746. cm_node->state = I40IW_CM_STATE_CLOSED;
  2747. err = i40iw_send_reset(cm_node);
  2748. if (err)
  2749. i40iw_pr_err("send reset failed\n");
  2750. } else {
  2751. cm_id->add_ref(cm_id);
  2752. }
  2753. }
  2754. }
  2755. } else {
  2756. cm_node->cm_id = NULL;
  2757. if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
  2758. i40iw_rem_ref_cm_node(cm_node);
  2759. i40iw_rem_ref_cm_node(loopback);
  2760. } else {
  2761. ret = i40iw_send_cm_event(loopback,
  2762. loopback->cm_id,
  2763. IW_CM_EVENT_CONNECT_REPLY,
  2764. -ECONNREFUSED);
  2765. i40iw_rem_ref_cm_node(cm_node);
  2766. loopback->state = I40IW_CM_STATE_CLOSING;
  2767. cm_id = loopback->cm_id;
  2768. i40iw_rem_ref_cm_node(loopback);
  2769. cm_id->rem_ref(cm_id);
  2770. }
  2771. }
  2772. return ret;
  2773. }
  2774. /**
  2775. * i40iw_cm_close - close of cm connection
  2776. * @cm_node: connection's node
  2777. */
  2778. static int i40iw_cm_close(struct i40iw_cm_node *cm_node)
  2779. {
  2780. int ret = 0;
  2781. if (!cm_node)
  2782. return -EINVAL;
  2783. switch (cm_node->state) {
  2784. case I40IW_CM_STATE_SYN_RCVD:
  2785. case I40IW_CM_STATE_SYN_SENT:
  2786. case I40IW_CM_STATE_ONE_SIDE_ESTABLISHED:
  2787. case I40IW_CM_STATE_ESTABLISHED:
  2788. case I40IW_CM_STATE_ACCEPTING:
  2789. case I40IW_CM_STATE_MPAREQ_SENT:
  2790. case I40IW_CM_STATE_MPAREQ_RCVD:
  2791. i40iw_cleanup_retrans_entry(cm_node);
  2792. i40iw_send_reset(cm_node);
  2793. break;
  2794. case I40IW_CM_STATE_CLOSE_WAIT:
  2795. cm_node->state = I40IW_CM_STATE_LAST_ACK;
  2796. i40iw_send_fin(cm_node);
  2797. break;
  2798. case I40IW_CM_STATE_FIN_WAIT1:
  2799. case I40IW_CM_STATE_FIN_WAIT2:
  2800. case I40IW_CM_STATE_LAST_ACK:
  2801. case I40IW_CM_STATE_TIME_WAIT:
  2802. case I40IW_CM_STATE_CLOSING:
  2803. ret = -1;
  2804. break;
  2805. case I40IW_CM_STATE_LISTENING:
  2806. i40iw_cleanup_retrans_entry(cm_node);
  2807. i40iw_send_reset(cm_node);
  2808. break;
  2809. case I40IW_CM_STATE_MPAREJ_RCVD:
  2810. case I40IW_CM_STATE_UNKNOWN:
  2811. case I40IW_CM_STATE_INITED:
  2812. case I40IW_CM_STATE_CLOSED:
  2813. case I40IW_CM_STATE_LISTENER_DESTROYED:
  2814. i40iw_rem_ref_cm_node(cm_node);
  2815. break;
  2816. case I40IW_CM_STATE_OFFLOADED:
  2817. if (cm_node->send_entry)
  2818. i40iw_pr_err("send_entry\n");
  2819. i40iw_rem_ref_cm_node(cm_node);
  2820. break;
  2821. }
  2822. return ret;
  2823. }
  2824. /**
  2825. * i40iw_receive_ilq - recv an ETHERNET packet, and process it
  2826. * through CM
  2827. * @vsi: pointer to the vsi structure
  2828. * @rbuf: receive buffer
  2829. */
  2830. void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf)
  2831. {
  2832. struct i40iw_cm_node *cm_node;
  2833. struct i40iw_cm_listener *listener;
  2834. struct iphdr *iph;
  2835. struct ipv6hdr *ip6h;
  2836. struct tcphdr *tcph;
  2837. struct i40iw_cm_info cm_info;
  2838. struct i40iw_sc_dev *dev = vsi->dev;
  2839. struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
  2840. struct i40iw_cm_core *cm_core = &iwdev->cm_core;
  2841. struct vlan_ethhdr *ethh;
  2842. u16 vtag;
  2843. /* if vlan, then maclen = 18 else 14 */
  2844. iph = (struct iphdr *)rbuf->iph;
  2845. memset(&cm_info, 0, sizeof(cm_info));
  2846. i40iw_debug_buf(dev,
  2847. I40IW_DEBUG_ILQ,
  2848. "RECEIVE ILQ BUFFER",
  2849. rbuf->mem.va,
  2850. rbuf->totallen);
  2851. ethh = (struct vlan_ethhdr *)rbuf->mem.va;
  2852. if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) {
  2853. vtag = ntohs(ethh->h_vlan_TCI);
  2854. cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
  2855. cm_info.vlan_id = vtag & VLAN_VID_MASK;
  2856. i40iw_debug(cm_core->dev,
  2857. I40IW_DEBUG_CM,
  2858. "%s vlan_id=%d\n",
  2859. __func__,
  2860. cm_info.vlan_id);
  2861. } else {
  2862. cm_info.vlan_id = I40IW_NO_VLAN;
  2863. }
  2864. tcph = (struct tcphdr *)rbuf->tcph;
  2865. if (rbuf->ipv4) {
  2866. cm_info.loc_addr[0] = ntohl(iph->daddr);
  2867. cm_info.rem_addr[0] = ntohl(iph->saddr);
  2868. cm_info.ipv4 = true;
  2869. cm_info.tos = iph->tos;
  2870. } else {
  2871. ip6h = (struct ipv6hdr *)rbuf->iph;
  2872. i40iw_copy_ip_ntohl(cm_info.loc_addr,
  2873. ip6h->daddr.in6_u.u6_addr32);
  2874. i40iw_copy_ip_ntohl(cm_info.rem_addr,
  2875. ip6h->saddr.in6_u.u6_addr32);
  2876. cm_info.ipv4 = false;
  2877. cm_info.tos = (ip6h->priority << 4) | (ip6h->flow_lbl[0] >> 4);
  2878. }
  2879. cm_info.loc_port = ntohs(tcph->dest);
  2880. cm_info.rem_port = ntohs(tcph->source);
  2881. cm_node = i40iw_find_node(cm_core,
  2882. cm_info.rem_port,
  2883. cm_info.rem_addr,
  2884. cm_info.loc_port,
  2885. cm_info.loc_addr,
  2886. true,
  2887. false);
  2888. if (!cm_node) {
  2889. /* Only type of packet accepted are for */
  2890. /* the PASSIVE open (syn only) */
  2891. if (!tcph->syn || tcph->ack)
  2892. return;
  2893. listener =
  2894. i40iw_find_listener(cm_core,
  2895. cm_info.loc_addr,
  2896. cm_info.loc_port,
  2897. cm_info.vlan_id,
  2898. I40IW_CM_LISTENER_ACTIVE_STATE);
  2899. if (!listener) {
  2900. cm_info.cm_id = NULL;
  2901. i40iw_debug(cm_core->dev,
  2902. I40IW_DEBUG_CM,
  2903. "%s no listener found\n",
  2904. __func__);
  2905. return;
  2906. }
  2907. cm_info.cm_id = listener->cm_id;
  2908. cm_node = i40iw_make_cm_node(cm_core, iwdev, &cm_info, listener);
  2909. if (!cm_node) {
  2910. i40iw_debug(cm_core->dev,
  2911. I40IW_DEBUG_CM,
  2912. "%s allocate node failed\n",
  2913. __func__);
  2914. atomic_dec(&listener->ref_count);
  2915. return;
  2916. }
  2917. if (!tcph->rst && !tcph->fin) {
  2918. cm_node->state = I40IW_CM_STATE_LISTENING;
  2919. } else {
  2920. i40iw_rem_ref_cm_node(cm_node);
  2921. return;
  2922. }
  2923. atomic_inc(&cm_node->ref_count);
  2924. } else if (cm_node->state == I40IW_CM_STATE_OFFLOADED) {
  2925. i40iw_rem_ref_cm_node(cm_node);
  2926. return;
  2927. }
  2928. i40iw_process_packet(cm_node, rbuf);
  2929. i40iw_rem_ref_cm_node(cm_node);
  2930. }
  2931. /**
  2932. * i40iw_setup_cm_core - allocate a top level instance of a cm
  2933. * core
  2934. * @iwdev: iwarp device structure
  2935. */
  2936. void i40iw_setup_cm_core(struct i40iw_device *iwdev)
  2937. {
  2938. struct i40iw_cm_core *cm_core = &iwdev->cm_core;
  2939. cm_core->iwdev = iwdev;
  2940. cm_core->dev = &iwdev->sc_dev;
  2941. INIT_LIST_HEAD(&cm_core->accelerated_list);
  2942. INIT_LIST_HEAD(&cm_core->non_accelerated_list);
  2943. INIT_LIST_HEAD(&cm_core->listen_nodes);
  2944. timer_setup(&cm_core->tcp_timer, i40iw_cm_timer_tick, 0);
  2945. spin_lock_init(&cm_core->ht_lock);
  2946. spin_lock_init(&cm_core->listen_list_lock);
  2947. cm_core->event_wq = alloc_ordered_workqueue("iwewq",
  2948. WQ_MEM_RECLAIM);
  2949. cm_core->disconn_wq = alloc_ordered_workqueue("iwdwq",
  2950. WQ_MEM_RECLAIM);
  2951. }
  2952. /**
  2953. * i40iw_cleanup_cm_core - deallocate a top level instance of a
  2954. * cm core
  2955. * @cm_core: cm's core
  2956. */
  2957. void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core)
  2958. {
  2959. unsigned long flags;
  2960. if (!cm_core)
  2961. return;
  2962. spin_lock_irqsave(&cm_core->ht_lock, flags);
  2963. if (timer_pending(&cm_core->tcp_timer))
  2964. del_timer_sync(&cm_core->tcp_timer);
  2965. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  2966. destroy_workqueue(cm_core->event_wq);
  2967. destroy_workqueue(cm_core->disconn_wq);
  2968. }
  2969. /**
  2970. * i40iw_init_tcp_ctx - setup qp context
  2971. * @cm_node: connection's node
  2972. * @tcp_info: offload info for tcp
  2973. * @iwqp: associate qp for the connection
  2974. */
  2975. static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node,
  2976. struct i40iw_tcp_offload_info *tcp_info,
  2977. struct i40iw_qp *iwqp)
  2978. {
  2979. tcp_info->ipv4 = cm_node->ipv4;
  2980. tcp_info->drop_ooo_seg = true;
  2981. tcp_info->wscale = true;
  2982. tcp_info->ignore_tcp_opt = true;
  2983. tcp_info->ignore_tcp_uns_opt = true;
  2984. tcp_info->no_nagle = false;
  2985. tcp_info->ttl = I40IW_DEFAULT_TTL;
  2986. tcp_info->rtt_var = cpu_to_le32(I40IW_DEFAULT_RTT_VAR);
  2987. tcp_info->ss_thresh = cpu_to_le32(I40IW_DEFAULT_SS_THRESH);
  2988. tcp_info->rexmit_thresh = I40IW_DEFAULT_REXMIT_THRESH;
  2989. tcp_info->tcp_state = I40IW_TCP_STATE_ESTABLISHED;
  2990. tcp_info->snd_wscale = cm_node->tcp_cntxt.snd_wscale;
  2991. tcp_info->rcv_wscale = cm_node->tcp_cntxt.rcv_wscale;
  2992. tcp_info->snd_nxt = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
  2993. tcp_info->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd);
  2994. tcp_info->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
  2995. tcp_info->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
  2996. tcp_info->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
  2997. tcp_info->cwnd = cpu_to_le32(2 * cm_node->tcp_cntxt.mss);
  2998. tcp_info->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
  2999. tcp_info->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
  3000. tcp_info->max_snd_window = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd);
  3001. tcp_info->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd <<
  3002. cm_node->tcp_cntxt.rcv_wscale);
  3003. tcp_info->flow_label = 0;
  3004. tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss));
  3005. if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
  3006. tcp_info->insert_vlan_tag = true;
  3007. tcp_info->vlan_tag = cpu_to_le16(((u16)cm_node->user_pri << I40IW_VLAN_PRIO_SHIFT) |
  3008. cm_node->vlan_id);
  3009. }
  3010. if (cm_node->ipv4) {
  3011. tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
  3012. tcp_info->dst_port = cpu_to_le16(cm_node->rem_port);
  3013. tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[0]);
  3014. tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[0]);
  3015. tcp_info->arp_idx =
  3016. cpu_to_le16((u16)i40iw_arp_table(
  3017. iwqp->iwdev,
  3018. &tcp_info->dest_ip_addr3,
  3019. true,
  3020. NULL,
  3021. I40IW_ARP_RESOLVE));
  3022. } else {
  3023. tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
  3024. tcp_info->dst_port = cpu_to_le16(cm_node->rem_port);
  3025. tcp_info->dest_ip_addr0 = cpu_to_le32(cm_node->rem_addr[0]);
  3026. tcp_info->dest_ip_addr1 = cpu_to_le32(cm_node->rem_addr[1]);
  3027. tcp_info->dest_ip_addr2 = cpu_to_le32(cm_node->rem_addr[2]);
  3028. tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[3]);
  3029. tcp_info->local_ipaddr0 = cpu_to_le32(cm_node->loc_addr[0]);
  3030. tcp_info->local_ipaddr1 = cpu_to_le32(cm_node->loc_addr[1]);
  3031. tcp_info->local_ipaddr2 = cpu_to_le32(cm_node->loc_addr[2]);
  3032. tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[3]);
  3033. tcp_info->arp_idx =
  3034. cpu_to_le16((u16)i40iw_arp_table(
  3035. iwqp->iwdev,
  3036. &tcp_info->dest_ip_addr0,
  3037. false,
  3038. NULL,
  3039. I40IW_ARP_RESOLVE));
  3040. }
  3041. }
  3042. /**
  3043. * i40iw_cm_init_tsa_conn - setup qp for RTS
  3044. * @iwqp: associate qp for the connection
  3045. * @cm_node: connection's node
  3046. */
  3047. static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,
  3048. struct i40iw_cm_node *cm_node)
  3049. {
  3050. struct i40iw_tcp_offload_info tcp_info;
  3051. struct i40iwarp_offload_info *iwarp_info;
  3052. struct i40iw_qp_host_ctx_info *ctx_info;
  3053. struct i40iw_device *iwdev = iwqp->iwdev;
  3054. struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
  3055. memset(&tcp_info, 0x00, sizeof(struct i40iw_tcp_offload_info));
  3056. iwarp_info = &iwqp->iwarp_info;
  3057. ctx_info = &iwqp->ctx_info;
  3058. ctx_info->tcp_info = &tcp_info;
  3059. ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
  3060. ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
  3061. iwarp_info->ord_size = cm_node->ord_size;
  3062. iwarp_info->ird_size = i40iw_derive_hw_ird_setting(cm_node->ird_size);
  3063. if (iwarp_info->ord_size == 1)
  3064. iwarp_info->ord_size = 2;
  3065. iwarp_info->rd_enable = true;
  3066. iwarp_info->rdmap_ver = 1;
  3067. iwarp_info->ddp_ver = 1;
  3068. iwarp_info->pd_id = iwqp->iwpd->sc_pd.pd_id;
  3069. ctx_info->tcp_info_valid = true;
  3070. ctx_info->iwarp_info_valid = true;
  3071. ctx_info->add_to_qoslist = true;
  3072. ctx_info->user_pri = cm_node->user_pri;
  3073. i40iw_init_tcp_ctx(cm_node, &tcp_info, iwqp);
  3074. if (cm_node->snd_mark_en) {
  3075. iwarp_info->snd_mark_en = true;
  3076. iwarp_info->snd_mark_offset = (tcp_info.snd_nxt &
  3077. SNDMARKER_SEQNMASK) + cm_node->lsmm_size;
  3078. }
  3079. cm_node->state = I40IW_CM_STATE_OFFLOADED;
  3080. tcp_info.tcp_state = I40IW_TCP_STATE_ESTABLISHED;
  3081. tcp_info.src_mac_addr_idx = iwdev->mac_ip_table_idx;
  3082. tcp_info.tos = cm_node->tos;
  3083. dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, (u64 *)(iwqp->host_ctx.va), ctx_info);
  3084. /* once tcp_info is set, no need to do it again */
  3085. ctx_info->tcp_info_valid = false;
  3086. ctx_info->iwarp_info_valid = false;
  3087. ctx_info->add_to_qoslist = false;
  3088. }
  3089. /**
  3090. * i40iw_cm_disconn - when a connection is being closed
  3091. * @iwqp: associate qp for the connection
  3092. */
  3093. void i40iw_cm_disconn(struct i40iw_qp *iwqp)
  3094. {
  3095. struct disconn_work *work;
  3096. struct i40iw_device *iwdev = iwqp->iwdev;
  3097. struct i40iw_cm_core *cm_core = &iwdev->cm_core;
  3098. unsigned long flags;
  3099. work = kzalloc(sizeof(*work), GFP_ATOMIC);
  3100. if (!work)
  3101. return; /* Timer will clean up */
  3102. spin_lock_irqsave(&iwdev->qptable_lock, flags);
  3103. if (!iwdev->qp_table[iwqp->ibqp.qp_num]) {
  3104. spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
  3105. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
  3106. "%s qp_id %d is already freed\n",
  3107. __func__, iwqp->ibqp.qp_num);
  3108. kfree(work);
  3109. return;
  3110. }
  3111. i40iw_add_ref(&iwqp->ibqp);
  3112. spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
  3113. work->iwqp = iwqp;
  3114. INIT_WORK(&work->work, i40iw_disconnect_worker);
  3115. queue_work(cm_core->disconn_wq, &work->work);
  3116. return;
  3117. }
  3118. /**
  3119. * i40iw_qp_disconnect - free qp and close cm
  3120. * @iwqp: associate qp for the connection
  3121. */
  3122. static void i40iw_qp_disconnect(struct i40iw_qp *iwqp)
  3123. {
  3124. struct i40iw_device *iwdev;
  3125. struct i40iw_ib_device *iwibdev;
  3126. iwdev = to_iwdev(iwqp->ibqp.device);
  3127. if (!iwdev) {
  3128. i40iw_pr_err("iwdev == NULL\n");
  3129. return;
  3130. }
  3131. iwibdev = iwdev->iwibdev;
  3132. if (iwqp->active_conn) {
  3133. /* indicate this connection is NOT active */
  3134. iwqp->active_conn = 0;
  3135. } else {
  3136. /* Need to free the Last Streaming Mode Message */
  3137. if (iwqp->ietf_mem.va) {
  3138. if (iwqp->lsmm_mr)
  3139. iwibdev->ibdev.dereg_mr(iwqp->lsmm_mr);
  3140. i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem);
  3141. }
  3142. }
  3143. /* close the CM node down if it is still active */
  3144. if (iwqp->cm_node) {
  3145. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "%s Call close API\n", __func__);
  3146. i40iw_cm_close(iwqp->cm_node);
  3147. }
  3148. }
  3149. /**
  3150. * i40iw_cm_disconn_true - called by worker thread to disconnect qp
  3151. * @iwqp: associate qp for the connection
  3152. */
  3153. static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
  3154. {
  3155. struct iw_cm_id *cm_id;
  3156. struct i40iw_device *iwdev;
  3157. struct i40iw_sc_qp *qp = &iwqp->sc_qp;
  3158. u16 last_ae;
  3159. u8 original_hw_tcp_state;
  3160. u8 original_ibqp_state;
  3161. int disconn_status = 0;
  3162. int issue_disconn = 0;
  3163. int issue_close = 0;
  3164. int issue_flush = 0;
  3165. struct ib_event ibevent;
  3166. unsigned long flags;
  3167. int ret;
  3168. if (!iwqp) {
  3169. i40iw_pr_err("iwqp == NULL\n");
  3170. return;
  3171. }
  3172. spin_lock_irqsave(&iwqp->lock, flags);
  3173. cm_id = iwqp->cm_id;
  3174. /* make sure we havent already closed this connection */
  3175. if (!cm_id) {
  3176. spin_unlock_irqrestore(&iwqp->lock, flags);
  3177. return;
  3178. }
  3179. iwdev = to_iwdev(iwqp->ibqp.device);
  3180. original_hw_tcp_state = iwqp->hw_tcp_state;
  3181. original_ibqp_state = iwqp->ibqp_state;
  3182. last_ae = iwqp->last_aeq;
  3183. if (qp->term_flags) {
  3184. issue_disconn = 1;
  3185. issue_close = 1;
  3186. iwqp->cm_id = NULL;
  3187. /*When term timer expires after cm_timer, don't want
  3188. *terminate-handler to issue cm_disconn which can re-free
  3189. *a QP even after its refcnt=0.
  3190. */
  3191. i40iw_terminate_del_timer(qp);
  3192. if (!iwqp->flush_issued) {
  3193. iwqp->flush_issued = 1;
  3194. issue_flush = 1;
  3195. }
  3196. } else if ((original_hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) ||
  3197. ((original_ibqp_state == IB_QPS_RTS) &&
  3198. (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {
  3199. issue_disconn = 1;
  3200. if (last_ae == I40IW_AE_LLP_CONNECTION_RESET)
  3201. disconn_status = -ECONNRESET;
  3202. }
  3203. if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) ||
  3204. (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) ||
  3205. (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) ||
  3206. (last_ae == I40IW_AE_LLP_CONNECTION_RESET) ||
  3207. iwdev->reset)) {
  3208. issue_close = 1;
  3209. iwqp->cm_id = NULL;
  3210. if (!iwqp->flush_issued) {
  3211. iwqp->flush_issued = 1;
  3212. issue_flush = 1;
  3213. }
  3214. }
  3215. spin_unlock_irqrestore(&iwqp->lock, flags);
  3216. if (issue_flush && !iwqp->destroyed) {
  3217. /* Flush the queues */
  3218. i40iw_flush_wqes(iwdev, iwqp);
  3219. if (qp->term_flags && iwqp->ibqp.event_handler) {
  3220. ibevent.device = iwqp->ibqp.device;
  3221. ibevent.event = (qp->eventtype == TERM_EVENT_QP_FATAL) ?
  3222. IB_EVENT_QP_FATAL : IB_EVENT_QP_ACCESS_ERR;
  3223. ibevent.element.qp = &iwqp->ibqp;
  3224. iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
  3225. }
  3226. }
  3227. if (cm_id && cm_id->event_handler) {
  3228. if (issue_disconn) {
  3229. ret = i40iw_send_cm_event(NULL,
  3230. cm_id,
  3231. IW_CM_EVENT_DISCONNECT,
  3232. disconn_status);
  3233. if (ret)
  3234. i40iw_debug(&iwdev->sc_dev,
  3235. I40IW_DEBUG_CM,
  3236. "disconnect event failed %s: - cm_id = %p\n",
  3237. __func__, cm_id);
  3238. }
  3239. if (issue_close) {
  3240. i40iw_qp_disconnect(iwqp);
  3241. cm_id->provider_data = iwqp;
  3242. ret = i40iw_send_cm_event(NULL, cm_id, IW_CM_EVENT_CLOSE, 0);
  3243. if (ret)
  3244. i40iw_debug(&iwdev->sc_dev,
  3245. I40IW_DEBUG_CM,
  3246. "close event failed %s: - cm_id = %p\n",
  3247. __func__, cm_id);
  3248. cm_id->rem_ref(cm_id);
  3249. }
  3250. }
  3251. }
  3252. /**
  3253. * i40iw_disconnect_worker - worker for connection close
  3254. * @work: points or disconn structure
  3255. */
  3256. static void i40iw_disconnect_worker(struct work_struct *work)
  3257. {
  3258. struct disconn_work *dwork = container_of(work, struct disconn_work, work);
  3259. struct i40iw_qp *iwqp = dwork->iwqp;
  3260. kfree(dwork);
  3261. i40iw_cm_disconn_true(iwqp);
  3262. i40iw_rem_ref(&iwqp->ibqp);
  3263. }
  3264. /**
  3265. * i40iw_accept - registered call for connection to be accepted
  3266. * @cm_id: cm information for passive connection
  3267. * @conn_param: accpet parameters
  3268. */
  3269. int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
  3270. {
  3271. struct ib_qp *ibqp;
  3272. struct i40iw_qp *iwqp;
  3273. struct i40iw_device *iwdev;
  3274. struct i40iw_sc_dev *dev;
  3275. struct i40iw_cm_core *cm_core;
  3276. struct i40iw_cm_node *cm_node;
  3277. struct ib_qp_attr attr;
  3278. int passive_state;
  3279. struct ib_mr *ibmr;
  3280. struct i40iw_pd *iwpd;
  3281. u16 buf_len = 0;
  3282. struct i40iw_kmem_info accept;
  3283. enum i40iw_status_code status;
  3284. u64 tagged_offset;
  3285. unsigned long flags;
  3286. memset(&attr, 0, sizeof(attr));
  3287. ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
  3288. if (!ibqp)
  3289. return -EINVAL;
  3290. iwqp = to_iwqp(ibqp);
  3291. iwdev = iwqp->iwdev;
  3292. dev = &iwdev->sc_dev;
  3293. cm_core = &iwdev->cm_core;
  3294. cm_node = (struct i40iw_cm_node *)cm_id->provider_data;
  3295. if (((struct sockaddr_in *)&cm_id->local_addr)->sin_family == AF_INET) {
  3296. cm_node->ipv4 = true;
  3297. cm_node->vlan_id = i40iw_get_vlan_ipv4(cm_node->loc_addr);
  3298. } else {
  3299. cm_node->ipv4 = false;
  3300. i40iw_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id);
  3301. }
  3302. i40iw_debug(cm_node->dev,
  3303. I40IW_DEBUG_CM,
  3304. "Accept vlan_id=%d\n",
  3305. cm_node->vlan_id);
  3306. if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
  3307. if (cm_node->loopbackpartner)
  3308. i40iw_rem_ref_cm_node(cm_node->loopbackpartner);
  3309. i40iw_rem_ref_cm_node(cm_node);
  3310. return -EINVAL;
  3311. }
  3312. passive_state = atomic_add_return(1, &cm_node->passive_state);
  3313. if (passive_state == I40IW_SEND_RESET_EVENT) {
  3314. i40iw_rem_ref_cm_node(cm_node);
  3315. return -ECONNRESET;
  3316. }
  3317. cm_node->cm_core->stats_accepts++;
  3318. iwqp->cm_node = (void *)cm_node;
  3319. cm_node->iwqp = iwqp;
  3320. buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE;
  3321. status = i40iw_allocate_dma_mem(dev->hw, &iwqp->ietf_mem, buf_len, 1);
  3322. if (status)
  3323. return -ENOMEM;
  3324. cm_node->pdata.size = conn_param->private_data_len;
  3325. accept.addr = iwqp->ietf_mem.va;
  3326. accept.size = i40iw_cm_build_mpa_frame(cm_node, &accept, MPA_KEY_REPLY);
  3327. memcpy(accept.addr + accept.size, conn_param->private_data,
  3328. conn_param->private_data_len);
  3329. /* setup our first outgoing iWarp send WQE (the IETF frame response) */
  3330. if ((cm_node->ipv4 &&
  3331. !i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
  3332. (!cm_node->ipv4 &&
  3333. !i40iw_ipv6_is_loopback(cm_node->loc_addr, cm_node->rem_addr))) {
  3334. iwpd = iwqp->iwpd;
  3335. tagged_offset = (uintptr_t)iwqp->ietf_mem.va;
  3336. ibmr = i40iw_reg_phys_mr(&iwpd->ibpd,
  3337. iwqp->ietf_mem.pa,
  3338. buf_len,
  3339. IB_ACCESS_LOCAL_WRITE,
  3340. &tagged_offset);
  3341. if (IS_ERR(ibmr)) {
  3342. i40iw_free_dma_mem(dev->hw, &iwqp->ietf_mem);
  3343. return -ENOMEM;
  3344. }
  3345. ibmr->pd = &iwpd->ibpd;
  3346. ibmr->device = iwpd->ibpd.device;
  3347. iwqp->lsmm_mr = ibmr;
  3348. if (iwqp->page)
  3349. iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
  3350. dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp,
  3351. iwqp->ietf_mem.va,
  3352. (accept.size + conn_param->private_data_len),
  3353. ibmr->lkey);
  3354. } else {
  3355. if (iwqp->page)
  3356. iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
  3357. dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp, NULL, 0, 0);
  3358. }
  3359. if (iwqp->page)
  3360. kunmap(iwqp->page);
  3361. iwqp->cm_id = cm_id;
  3362. cm_node->cm_id = cm_id;
  3363. cm_id->provider_data = (void *)iwqp;
  3364. iwqp->active_conn = 0;
  3365. cm_node->lsmm_size = accept.size + conn_param->private_data_len;
  3366. i40iw_cm_init_tsa_conn(iwqp, cm_node);
  3367. cm_id->add_ref(cm_id);
  3368. i40iw_add_ref(&iwqp->ibqp);
  3369. attr.qp_state = IB_QPS_RTS;
  3370. cm_node->qhash_set = false;
  3371. i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
  3372. cm_node->accelerated = true;
  3373. spin_lock_irqsave(&cm_core->ht_lock, flags);
  3374. list_move_tail(&cm_node->list, &cm_core->accelerated_list);
  3375. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  3376. status =
  3377. i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
  3378. if (status)
  3379. i40iw_debug(dev, I40IW_DEBUG_CM, "error sending cm event - ESTABLISHED\n");
  3380. if (cm_node->loopbackpartner) {
  3381. cm_node->loopbackpartner->pdata.size = conn_param->private_data_len;
  3382. /* copy entire MPA frame to our cm_node's frame */
  3383. memcpy(cm_node->loopbackpartner->pdata_buf,
  3384. conn_param->private_data,
  3385. conn_param->private_data_len);
  3386. i40iw_create_event(cm_node->loopbackpartner, I40IW_CM_EVENT_CONNECTED);
  3387. }
  3388. if (cm_node->accept_pend) {
  3389. atomic_dec(&cm_node->listener->pend_accepts_cnt);
  3390. cm_node->accept_pend = 0;
  3391. }
  3392. return 0;
  3393. }
  3394. /**
  3395. * i40iw_reject - registered call for connection to be rejected
  3396. * @cm_id: cm information for passive connection
  3397. * @pdata: private data to be sent
  3398. * @pdata_len: private data length
  3399. */
  3400. int i40iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
  3401. {
  3402. struct i40iw_device *iwdev;
  3403. struct i40iw_cm_node *cm_node;
  3404. struct i40iw_cm_node *loopback;
  3405. cm_node = (struct i40iw_cm_node *)cm_id->provider_data;
  3406. loopback = cm_node->loopbackpartner;
  3407. cm_node->cm_id = cm_id;
  3408. cm_node->pdata.size = pdata_len;
  3409. iwdev = to_iwdev(cm_id->device);
  3410. if (!iwdev)
  3411. return -EINVAL;
  3412. cm_node->cm_core->stats_rejects++;
  3413. if (pdata_len + sizeof(struct ietf_mpa_v2) > MAX_CM_BUFFER)
  3414. return -EINVAL;
  3415. if (loopback) {
  3416. memcpy(&loopback->pdata_buf, pdata, pdata_len);
  3417. loopback->pdata.size = pdata_len;
  3418. }
  3419. return i40iw_cm_reject(cm_node, pdata, pdata_len);
  3420. }
  3421. /**
  3422. * i40iw_connect - registered call for connection to be established
  3423. * @cm_id: cm information for passive connection
  3424. * @conn_param: Information about the connection
  3425. */
  3426. int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
  3427. {
  3428. struct ib_qp *ibqp;
  3429. struct i40iw_qp *iwqp;
  3430. struct i40iw_device *iwdev;
  3431. struct i40iw_cm_node *cm_node;
  3432. struct i40iw_cm_info cm_info;
  3433. struct sockaddr_in *laddr;
  3434. struct sockaddr_in *raddr;
  3435. struct sockaddr_in6 *laddr6;
  3436. struct sockaddr_in6 *raddr6;
  3437. int ret = 0;
  3438. unsigned long flags;
  3439. ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
  3440. if (!ibqp)
  3441. return -EINVAL;
  3442. iwqp = to_iwqp(ibqp);
  3443. if (!iwqp)
  3444. return -EINVAL;
  3445. iwdev = to_iwdev(iwqp->ibqp.device);
  3446. if (!iwdev)
  3447. return -EINVAL;
  3448. laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
  3449. raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
  3450. laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
  3451. raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
  3452. if (!(laddr->sin_port) || !(raddr->sin_port))
  3453. return -EINVAL;
  3454. iwqp->active_conn = 1;
  3455. iwqp->cm_id = NULL;
  3456. cm_id->provider_data = iwqp;
  3457. /* set up the connection params for the node */
  3458. if (cm_id->remote_addr.ss_family == AF_INET) {
  3459. cm_info.ipv4 = true;
  3460. memset(cm_info.loc_addr, 0, sizeof(cm_info.loc_addr));
  3461. memset(cm_info.rem_addr, 0, sizeof(cm_info.rem_addr));
  3462. cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
  3463. cm_info.rem_addr[0] = ntohl(raddr->sin_addr.s_addr);
  3464. cm_info.loc_port = ntohs(laddr->sin_port);
  3465. cm_info.rem_port = ntohs(raddr->sin_port);
  3466. cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr);
  3467. } else {
  3468. cm_info.ipv4 = false;
  3469. i40iw_copy_ip_ntohl(cm_info.loc_addr,
  3470. laddr6->sin6_addr.in6_u.u6_addr32);
  3471. i40iw_copy_ip_ntohl(cm_info.rem_addr,
  3472. raddr6->sin6_addr.in6_u.u6_addr32);
  3473. cm_info.loc_port = ntohs(laddr6->sin6_port);
  3474. cm_info.rem_port = ntohs(raddr6->sin6_port);
  3475. i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id);
  3476. }
  3477. cm_info.cm_id = cm_id;
  3478. cm_info.tos = cm_id->tos;
  3479. cm_info.user_pri = rt_tos2priority(cm_id->tos);
  3480. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n",
  3481. __func__, cm_id->tos, cm_info.user_pri);
  3482. cm_id->add_ref(cm_id);
  3483. cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev,
  3484. conn_param, &cm_info);
  3485. if (IS_ERR(cm_node)) {
  3486. ret = PTR_ERR(cm_node);
  3487. cm_id->rem_ref(cm_id);
  3488. return ret;
  3489. }
  3490. if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
  3491. (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
  3492. raddr6->sin6_addr.in6_u.u6_addr32,
  3493. sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {
  3494. if (i40iw_manage_qhash(iwdev, &cm_info, I40IW_QHASH_TYPE_TCP_ESTABLISHED,
  3495. I40IW_QHASH_MANAGE_TYPE_ADD, NULL, true)) {
  3496. ret = -EINVAL;
  3497. goto err;
  3498. }
  3499. cm_node->qhash_set = true;
  3500. }
  3501. spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
  3502. if (!test_and_set_bit(cm_info.loc_port, iwdev->cm_core.active_side_ports)) {
  3503. spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
  3504. if (i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD)) {
  3505. ret = -EINVAL;
  3506. goto err;
  3507. }
  3508. } else {
  3509. spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
  3510. }
  3511. cm_node->apbvt_set = true;
  3512. iwqp->cm_node = cm_node;
  3513. cm_node->iwqp = iwqp;
  3514. iwqp->cm_id = cm_id;
  3515. i40iw_add_ref(&iwqp->ibqp);
  3516. if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
  3517. cm_node->state = I40IW_CM_STATE_SYN_SENT;
  3518. ret = i40iw_send_syn(cm_node, 0);
  3519. if (ret)
  3520. goto err;
  3521. }
  3522. if (cm_node->loopbackpartner) {
  3523. cm_node->loopbackpartner->state = I40IW_CM_STATE_MPAREQ_RCVD;
  3524. i40iw_create_event(cm_node->loopbackpartner,
  3525. I40IW_CM_EVENT_MPA_REQ);
  3526. }
  3527. i40iw_debug(cm_node->dev,
  3528. I40IW_DEBUG_CM,
  3529. "Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\n",
  3530. cm_node->rem_port,
  3531. cm_node,
  3532. cm_node->cm_id);
  3533. return 0;
  3534. err:
  3535. if (cm_info.ipv4)
  3536. i40iw_debug(&iwdev->sc_dev,
  3537. I40IW_DEBUG_CM,
  3538. "Api - connect() FAILED: dest addr=%pI4",
  3539. cm_info.rem_addr);
  3540. else
  3541. i40iw_debug(&iwdev->sc_dev,
  3542. I40IW_DEBUG_CM,
  3543. "Api - connect() FAILED: dest addr=%pI6",
  3544. cm_info.rem_addr);
  3545. i40iw_rem_ref_cm_node(cm_node);
  3546. cm_id->rem_ref(cm_id);
  3547. iwdev->cm_core.stats_connect_errs++;
  3548. return ret;
  3549. }
  3550. /**
  3551. * i40iw_create_listen - registered call creating listener
  3552. * @cm_id: cm information for passive connection
  3553. * @backlog: to max accept pending count
  3554. */
  3555. int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog)
  3556. {
  3557. struct i40iw_device *iwdev;
  3558. struct i40iw_cm_listener *cm_listen_node;
  3559. struct i40iw_cm_info cm_info;
  3560. enum i40iw_status_code ret;
  3561. struct sockaddr_in *laddr;
  3562. struct sockaddr_in6 *laddr6;
  3563. bool wildcard = false;
  3564. iwdev = to_iwdev(cm_id->device);
  3565. if (!iwdev)
  3566. return -EINVAL;
  3567. laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
  3568. laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
  3569. memset(&cm_info, 0, sizeof(cm_info));
  3570. if (laddr->sin_family == AF_INET) {
  3571. cm_info.ipv4 = true;
  3572. cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
  3573. cm_info.loc_port = ntohs(laddr->sin_port);
  3574. if (laddr->sin_addr.s_addr != INADDR_ANY)
  3575. cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr);
  3576. else
  3577. wildcard = true;
  3578. } else {
  3579. cm_info.ipv4 = false;
  3580. i40iw_copy_ip_ntohl(cm_info.loc_addr,
  3581. laddr6->sin6_addr.in6_u.u6_addr32);
  3582. cm_info.loc_port = ntohs(laddr6->sin6_port);
  3583. if (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY)
  3584. i40iw_netdev_vlan_ipv6(cm_info.loc_addr,
  3585. &cm_info.vlan_id);
  3586. else
  3587. wildcard = true;
  3588. }
  3589. cm_info.backlog = backlog;
  3590. cm_info.cm_id = cm_id;
  3591. cm_listen_node = i40iw_make_listen_node(&iwdev->cm_core, iwdev, &cm_info);
  3592. if (!cm_listen_node) {
  3593. i40iw_pr_err("cm_listen_node == NULL\n");
  3594. return -ENOMEM;
  3595. }
  3596. cm_id->provider_data = cm_listen_node;
  3597. cm_listen_node->tos = cm_id->tos;
  3598. cm_listen_node->user_pri = rt_tos2priority(cm_id->tos);
  3599. cm_info.user_pri = cm_listen_node->user_pri;
  3600. if (!cm_listen_node->reused_node) {
  3601. if (wildcard) {
  3602. if (cm_info.ipv4)
  3603. ret = i40iw_add_mqh_4(iwdev,
  3604. &cm_info,
  3605. cm_listen_node);
  3606. else
  3607. ret = i40iw_add_mqh_6(iwdev,
  3608. &cm_info,
  3609. cm_listen_node);
  3610. if (ret)
  3611. goto error;
  3612. ret = i40iw_manage_apbvt(iwdev,
  3613. cm_info.loc_port,
  3614. I40IW_MANAGE_APBVT_ADD);
  3615. if (ret)
  3616. goto error;
  3617. } else {
  3618. ret = i40iw_manage_qhash(iwdev,
  3619. &cm_info,
  3620. I40IW_QHASH_TYPE_TCP_SYN,
  3621. I40IW_QHASH_MANAGE_TYPE_ADD,
  3622. NULL,
  3623. true);
  3624. if (ret)
  3625. goto error;
  3626. cm_listen_node->qhash_set = true;
  3627. ret = i40iw_manage_apbvt(iwdev,
  3628. cm_info.loc_port,
  3629. I40IW_MANAGE_APBVT_ADD);
  3630. if (ret)
  3631. goto error;
  3632. }
  3633. }
  3634. cm_id->add_ref(cm_id);
  3635. cm_listen_node->cm_core->stats_listen_created++;
  3636. return 0;
  3637. error:
  3638. i40iw_cm_del_listen(&iwdev->cm_core, (void *)cm_listen_node, false);
  3639. return -EINVAL;
  3640. }
  3641. /**
  3642. * i40iw_destroy_listen - registered call to destroy listener
  3643. * @cm_id: cm information for passive connection
  3644. */
  3645. int i40iw_destroy_listen(struct iw_cm_id *cm_id)
  3646. {
  3647. struct i40iw_device *iwdev;
  3648. iwdev = to_iwdev(cm_id->device);
  3649. if (cm_id->provider_data)
  3650. i40iw_cm_del_listen(&iwdev->cm_core, cm_id->provider_data, true);
  3651. else
  3652. i40iw_pr_err("cm_id->provider_data was NULL\n");
  3653. cm_id->rem_ref(cm_id);
  3654. return 0;
  3655. }
  3656. /**
  3657. * i40iw_cm_event_connected - handle connected active node
  3658. * @event: the info for cm_node of connection
  3659. */
  3660. static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
  3661. {
  3662. struct i40iw_qp *iwqp;
  3663. struct i40iw_device *iwdev;
  3664. struct i40iw_cm_core *cm_core;
  3665. struct i40iw_cm_node *cm_node;
  3666. struct i40iw_sc_dev *dev;
  3667. struct ib_qp_attr attr;
  3668. struct iw_cm_id *cm_id;
  3669. unsigned long flags;
  3670. int status;
  3671. bool read0;
  3672. cm_node = event->cm_node;
  3673. cm_id = cm_node->cm_id;
  3674. iwqp = (struct i40iw_qp *)cm_id->provider_data;
  3675. iwdev = to_iwdev(iwqp->ibqp.device);
  3676. dev = &iwdev->sc_dev;
  3677. cm_core = &iwdev->cm_core;
  3678. if (iwqp->destroyed) {
  3679. status = -ETIMEDOUT;
  3680. goto error;
  3681. }
  3682. i40iw_cm_init_tsa_conn(iwqp, cm_node);
  3683. read0 = (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO);
  3684. if (iwqp->page)
  3685. iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
  3686. dev->iw_priv_qp_ops->qp_send_rtt(&iwqp->sc_qp, read0);
  3687. if (iwqp->page)
  3688. kunmap(iwqp->page);
  3689. memset(&attr, 0, sizeof(attr));
  3690. attr.qp_state = IB_QPS_RTS;
  3691. cm_node->qhash_set = false;
  3692. i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
  3693. cm_node->accelerated = true;
  3694. spin_lock_irqsave(&cm_core->ht_lock, flags);
  3695. list_move_tail(&cm_node->list, &cm_core->accelerated_list);
  3696. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  3697. status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
  3698. 0);
  3699. if (status)
  3700. i40iw_debug(dev, I40IW_DEBUG_CM, "error sending cm event - CONNECT_REPLY\n");
  3701. return;
  3702. error:
  3703. iwqp->cm_id = NULL;
  3704. cm_id->provider_data = NULL;
  3705. i40iw_send_cm_event(event->cm_node,
  3706. cm_id,
  3707. IW_CM_EVENT_CONNECT_REPLY,
  3708. status);
  3709. cm_id->rem_ref(cm_id);
  3710. i40iw_rem_ref_cm_node(event->cm_node);
  3711. }
  3712. /**
  3713. * i40iw_cm_event_reset - handle reset
  3714. * @event: the info for cm_node of connection
  3715. */
  3716. static void i40iw_cm_event_reset(struct i40iw_cm_event *event)
  3717. {
  3718. struct i40iw_cm_node *cm_node = event->cm_node;
  3719. struct iw_cm_id *cm_id = cm_node->cm_id;
  3720. struct i40iw_qp *iwqp;
  3721. if (!cm_id)
  3722. return;
  3723. iwqp = cm_id->provider_data;
  3724. if (!iwqp)
  3725. return;
  3726. i40iw_debug(cm_node->dev,
  3727. I40IW_DEBUG_CM,
  3728. "reset event %p - cm_id = %p\n",
  3729. event->cm_node, cm_id);
  3730. iwqp->cm_id = NULL;
  3731. i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_DISCONNECT, -ECONNRESET);
  3732. i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CLOSE, 0);
  3733. }
  3734. /**
  3735. * i40iw_cm_event_handler - worker thread callback to send event to cm upper layer
  3736. * @work: pointer of cm event info.
  3737. */
  3738. static void i40iw_cm_event_handler(struct work_struct *work)
  3739. {
  3740. struct i40iw_cm_event *event = container_of(work,
  3741. struct i40iw_cm_event,
  3742. event_work);
  3743. struct i40iw_cm_node *cm_node;
  3744. if (!event || !event->cm_node || !event->cm_node->cm_core)
  3745. return;
  3746. cm_node = event->cm_node;
  3747. switch (event->type) {
  3748. case I40IW_CM_EVENT_MPA_REQ:
  3749. i40iw_send_cm_event(cm_node,
  3750. cm_node->cm_id,
  3751. IW_CM_EVENT_CONNECT_REQUEST,
  3752. 0);
  3753. break;
  3754. case I40IW_CM_EVENT_RESET:
  3755. i40iw_cm_event_reset(event);
  3756. break;
  3757. case I40IW_CM_EVENT_CONNECTED:
  3758. if (!event->cm_node->cm_id ||
  3759. (event->cm_node->state != I40IW_CM_STATE_OFFLOADED))
  3760. break;
  3761. i40iw_cm_event_connected(event);
  3762. break;
  3763. case I40IW_CM_EVENT_MPA_REJECT:
  3764. if (!event->cm_node->cm_id ||
  3765. (cm_node->state == I40IW_CM_STATE_OFFLOADED))
  3766. break;
  3767. i40iw_send_cm_event(cm_node,
  3768. cm_node->cm_id,
  3769. IW_CM_EVENT_CONNECT_REPLY,
  3770. -ECONNREFUSED);
  3771. break;
  3772. case I40IW_CM_EVENT_ABORTED:
  3773. if (!event->cm_node->cm_id ||
  3774. (event->cm_node->state == I40IW_CM_STATE_OFFLOADED))
  3775. break;
  3776. i40iw_event_connect_error(event);
  3777. break;
  3778. default:
  3779. i40iw_pr_err("event type = %d\n", event->type);
  3780. break;
  3781. }
  3782. event->cm_info.cm_id->rem_ref(event->cm_info.cm_id);
  3783. i40iw_rem_ref_cm_node(event->cm_node);
  3784. kfree(event);
  3785. }
  3786. /**
  3787. * i40iw_cm_post_event - queue event request for worker thread
  3788. * @event: cm node's info for up event call
  3789. */
  3790. static void i40iw_cm_post_event(struct i40iw_cm_event *event)
  3791. {
  3792. atomic_inc(&event->cm_node->ref_count);
  3793. event->cm_info.cm_id->add_ref(event->cm_info.cm_id);
  3794. INIT_WORK(&event->event_work, i40iw_cm_event_handler);
  3795. queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
  3796. }
  3797. /**
  3798. * i40iw_qhash_ctrl - enable/disable qhash for list
  3799. * @iwdev: device pointer
  3800. * @parent_listen_node: parent listen node
  3801. * @nfo: cm info node
  3802. * @ipaddr: Pointer to IPv4 or IPv6 address
  3803. * @ipv4: flag indicating IPv4 when true
  3804. * @ifup: flag indicating interface up when true
  3805. *
  3806. * Enables or disables the qhash for the node in the child
  3807. * listen list that matches ipaddr. If no matching IP was found
  3808. * it will allocate and add a new child listen node to the
  3809. * parent listen node. The listen_list_lock is assumed to be
  3810. * held when called.
  3811. */
  3812. static void i40iw_qhash_ctrl(struct i40iw_device *iwdev,
  3813. struct i40iw_cm_listener *parent_listen_node,
  3814. struct i40iw_cm_info *nfo,
  3815. u32 *ipaddr, bool ipv4, bool ifup)
  3816. {
  3817. struct list_head *child_listen_list = &parent_listen_node->child_listen_list;
  3818. struct i40iw_cm_listener *child_listen_node;
  3819. struct list_head *pos, *tpos;
  3820. enum i40iw_status_code ret;
  3821. bool node_allocated = false;
  3822. enum i40iw_quad_hash_manage_type op =
  3823. ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
  3824. list_for_each_safe(pos, tpos, child_listen_list) {
  3825. child_listen_node =
  3826. list_entry(pos,
  3827. struct i40iw_cm_listener,
  3828. child_listen_list);
  3829. if (!memcmp(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16))
  3830. goto set_qhash;
  3831. }
  3832. /* if not found then add a child listener if interface is going up */
  3833. if (!ifup)
  3834. return;
  3835. child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
  3836. if (!child_listen_node)
  3837. return;
  3838. node_allocated = true;
  3839. memcpy(child_listen_node, parent_listen_node, sizeof(*child_listen_node));
  3840. memcpy(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16);
  3841. set_qhash:
  3842. memcpy(nfo->loc_addr,
  3843. child_listen_node->loc_addr,
  3844. sizeof(nfo->loc_addr));
  3845. nfo->vlan_id = child_listen_node->vlan_id;
  3846. ret = i40iw_manage_qhash(iwdev, nfo,
  3847. I40IW_QHASH_TYPE_TCP_SYN,
  3848. op,
  3849. NULL, false);
  3850. if (!ret) {
  3851. child_listen_node->qhash_set = ifup;
  3852. if (node_allocated)
  3853. list_add(&child_listen_node->child_listen_list,
  3854. &parent_listen_node->child_listen_list);
  3855. } else if (node_allocated) {
  3856. kfree(child_listen_node);
  3857. }
  3858. }
  3859. /**
  3860. * i40iw_cm_teardown_connections - teardown QPs
  3861. * @iwdev: device pointer
  3862. * @ipaddr: Pointer to IPv4 or IPv6 address
  3863. * @ipv4: flag indicating IPv4 when true
  3864. * @disconnect_all: flag indicating disconnect all QPs
  3865. * teardown QPs where source or destination addr matches ip addr
  3866. */
  3867. void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,
  3868. struct i40iw_cm_info *nfo,
  3869. bool disconnect_all)
  3870. {
  3871. struct i40iw_cm_core *cm_core = &iwdev->cm_core;
  3872. struct list_head *list_core_temp;
  3873. struct list_head *list_node;
  3874. struct i40iw_cm_node *cm_node;
  3875. unsigned long flags;
  3876. struct list_head teardown_list;
  3877. struct ib_qp_attr attr;
  3878. INIT_LIST_HEAD(&teardown_list);
  3879. spin_lock_irqsave(&cm_core->ht_lock, flags);
  3880. list_for_each_safe(list_node, list_core_temp,
  3881. &cm_core->accelerated_list) {
  3882. cm_node = container_of(list_node, struct i40iw_cm_node, list);
  3883. if (disconnect_all ||
  3884. (nfo->vlan_id == cm_node->vlan_id &&
  3885. (!memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16) ||
  3886. !memcmp(cm_node->rem_addr, ipaddr, nfo->ipv4 ? 4 : 16)))) {
  3887. atomic_inc(&cm_node->ref_count);
  3888. list_add(&cm_node->teardown_entry, &teardown_list);
  3889. }
  3890. }
  3891. list_for_each_safe(list_node, list_core_temp,
  3892. &cm_core->non_accelerated_list) {
  3893. cm_node = container_of(list_node, struct i40iw_cm_node, list);
  3894. if (disconnect_all ||
  3895. (nfo->vlan_id == cm_node->vlan_id &&
  3896. (!memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16) ||
  3897. !memcmp(cm_node->rem_addr, ipaddr, nfo->ipv4 ? 4 : 16)))) {
  3898. atomic_inc(&cm_node->ref_count);
  3899. list_add(&cm_node->teardown_entry, &teardown_list);
  3900. }
  3901. }
  3902. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  3903. list_for_each_safe(list_node, list_core_temp, &teardown_list) {
  3904. cm_node = container_of(list_node, struct i40iw_cm_node,
  3905. teardown_entry);
  3906. attr.qp_state = IB_QPS_ERR;
  3907. i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
  3908. if (iwdev->reset)
  3909. i40iw_cm_disconn(cm_node->iwqp);
  3910. i40iw_rem_ref_cm_node(cm_node);
  3911. }
  3912. }
  3913. /**
  3914. * i40iw_ifdown_notify - process an ifdown on an interface
  3915. * @iwdev: device pointer
  3916. * @ipaddr: Pointer to IPv4 or IPv6 address
  3917. * @ipv4: flag indicating IPv4 when true
  3918. * @ifup: flag indicating interface up when true
  3919. */
  3920. void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
  3921. u32 *ipaddr, bool ipv4, bool ifup)
  3922. {
  3923. struct i40iw_cm_core *cm_core = &iwdev->cm_core;
  3924. unsigned long flags;
  3925. struct i40iw_cm_listener *listen_node;
  3926. static const u32 ip_zero[4] = { 0, 0, 0, 0 };
  3927. struct i40iw_cm_info nfo;
  3928. u16 vlan_id = rdma_vlan_dev_vlan_id(netdev);
  3929. enum i40iw_status_code ret;
  3930. enum i40iw_quad_hash_manage_type op =
  3931. ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
  3932. nfo.vlan_id = vlan_id;
  3933. nfo.ipv4 = ipv4;
  3934. /* Disable or enable qhash for listeners */
  3935. spin_lock_irqsave(&cm_core->listen_list_lock, flags);
  3936. list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
  3937. if (vlan_id == listen_node->vlan_id &&
  3938. (!memcmp(listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16) ||
  3939. !memcmp(listen_node->loc_addr, ip_zero, ipv4 ? 4 : 16))) {
  3940. memcpy(nfo.loc_addr, listen_node->loc_addr,
  3941. sizeof(nfo.loc_addr));
  3942. nfo.loc_port = listen_node->loc_port;
  3943. nfo.user_pri = listen_node->user_pri;
  3944. if (!list_empty(&listen_node->child_listen_list)) {
  3945. i40iw_qhash_ctrl(iwdev,
  3946. listen_node,
  3947. &nfo,
  3948. ipaddr, ipv4, ifup);
  3949. } else if (memcmp(listen_node->loc_addr, ip_zero,
  3950. ipv4 ? 4 : 16)) {
  3951. ret = i40iw_manage_qhash(iwdev,
  3952. &nfo,
  3953. I40IW_QHASH_TYPE_TCP_SYN,
  3954. op,
  3955. NULL,
  3956. false);
  3957. if (!ret)
  3958. listen_node->qhash_set = ifup;
  3959. }
  3960. }
  3961. }
  3962. spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
  3963. /* teardown connected qp's on ifdown */
  3964. if (!ifup)
  3965. i40iw_cm_teardown_connections(iwdev, ipaddr, &nfo, false);
  3966. }