rocker.c 131 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973
  1. /*
  2. * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
  3. * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
  4. * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/pci.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/sched.h>
  16. #include <linux/wait.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/hashtable.h>
  19. #include <linux/crc32.h>
  20. #include <linux/sort.h>
  21. #include <linux/random.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/inetdevice.h>
  24. #include <linux/skbuff.h>
  25. #include <linux/socket.h>
  26. #include <linux/etherdevice.h>
  27. #include <linux/ethtool.h>
  28. #include <linux/if_ether.h>
  29. #include <linux/if_vlan.h>
  30. #include <linux/if_bridge.h>
  31. #include <linux/bitops.h>
  32. #include <net/switchdev.h>
  33. #include <net/rtnetlink.h>
  34. #include <net/ip_fib.h>
  35. #include <net/netevent.h>
  36. #include <net/arp.h>
  37. #include <asm-generic/io-64-nonatomic-lo-hi.h>
  38. #include <generated/utsrelease.h>
  39. #include "rocker.h"
  40. static const char rocker_driver_name[] = "rocker";
  41. static const struct pci_device_id rocker_pci_id_table[] = {
  42. {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
  43. {0, }
  44. };
  45. struct rocker_flow_tbl_key {
  46. u32 priority;
  47. enum rocker_of_dpa_table_id tbl_id;
  48. union {
  49. struct {
  50. u32 in_pport;
  51. u32 in_pport_mask;
  52. enum rocker_of_dpa_table_id goto_tbl;
  53. } ig_port;
  54. struct {
  55. u32 in_pport;
  56. __be16 vlan_id;
  57. __be16 vlan_id_mask;
  58. enum rocker_of_dpa_table_id goto_tbl;
  59. bool untagged;
  60. __be16 new_vlan_id;
  61. } vlan;
  62. struct {
  63. u32 in_pport;
  64. u32 in_pport_mask;
  65. __be16 eth_type;
  66. u8 eth_dst[ETH_ALEN];
  67. u8 eth_dst_mask[ETH_ALEN];
  68. __be16 vlan_id;
  69. __be16 vlan_id_mask;
  70. enum rocker_of_dpa_table_id goto_tbl;
  71. bool copy_to_cpu;
  72. } term_mac;
  73. struct {
  74. __be16 eth_type;
  75. __be32 dst4;
  76. __be32 dst4_mask;
  77. enum rocker_of_dpa_table_id goto_tbl;
  78. u32 group_id;
  79. } ucast_routing;
  80. struct {
  81. u8 eth_dst[ETH_ALEN];
  82. u8 eth_dst_mask[ETH_ALEN];
  83. int has_eth_dst;
  84. int has_eth_dst_mask;
  85. __be16 vlan_id;
  86. u32 tunnel_id;
  87. enum rocker_of_dpa_table_id goto_tbl;
  88. u32 group_id;
  89. bool copy_to_cpu;
  90. } bridge;
  91. struct {
  92. u32 in_pport;
  93. u32 in_pport_mask;
  94. u8 eth_src[ETH_ALEN];
  95. u8 eth_src_mask[ETH_ALEN];
  96. u8 eth_dst[ETH_ALEN];
  97. u8 eth_dst_mask[ETH_ALEN];
  98. __be16 eth_type;
  99. __be16 vlan_id;
  100. __be16 vlan_id_mask;
  101. u8 ip_proto;
  102. u8 ip_proto_mask;
  103. u8 ip_tos;
  104. u8 ip_tos_mask;
  105. u32 group_id;
  106. } acl;
  107. };
  108. };
  109. struct rocker_flow_tbl_entry {
  110. struct hlist_node entry;
  111. u32 cmd;
  112. u64 cookie;
  113. struct rocker_flow_tbl_key key;
  114. size_t key_len;
  115. u32 key_crc32; /* key */
  116. };
  117. struct rocker_group_tbl_entry {
  118. struct hlist_node entry;
  119. u32 cmd;
  120. u32 group_id; /* key */
  121. u16 group_count;
  122. u32 *group_ids;
  123. union {
  124. struct {
  125. u8 pop_vlan;
  126. } l2_interface;
  127. struct {
  128. u8 eth_src[ETH_ALEN];
  129. u8 eth_dst[ETH_ALEN];
  130. __be16 vlan_id;
  131. u32 group_id;
  132. } l2_rewrite;
  133. struct {
  134. u8 eth_src[ETH_ALEN];
  135. u8 eth_dst[ETH_ALEN];
  136. __be16 vlan_id;
  137. bool ttl_check;
  138. u32 group_id;
  139. } l3_unicast;
  140. };
  141. };
  142. struct rocker_fdb_tbl_entry {
  143. struct hlist_node entry;
  144. u32 key_crc32; /* key */
  145. bool learned;
  146. struct rocker_fdb_tbl_key {
  147. u32 pport;
  148. u8 addr[ETH_ALEN];
  149. __be16 vlan_id;
  150. } key;
  151. };
  152. struct rocker_internal_vlan_tbl_entry {
  153. struct hlist_node entry;
  154. int ifindex; /* key */
  155. u32 ref_count;
  156. __be16 vlan_id;
  157. };
  158. struct rocker_neigh_tbl_entry {
  159. struct hlist_node entry;
  160. __be32 ip_addr; /* key */
  161. struct net_device *dev;
  162. u32 ref_count;
  163. u32 index;
  164. u8 eth_dst[ETH_ALEN];
  165. bool ttl_check;
  166. };
  167. struct rocker_desc_info {
  168. char *data; /* mapped */
  169. size_t data_size;
  170. size_t tlv_size;
  171. struct rocker_desc *desc;
  172. DEFINE_DMA_UNMAP_ADDR(mapaddr);
  173. };
  174. struct rocker_dma_ring_info {
  175. size_t size;
  176. u32 head;
  177. u32 tail;
  178. struct rocker_desc *desc; /* mapped */
  179. dma_addr_t mapaddr;
  180. struct rocker_desc_info *desc_info;
  181. unsigned int type;
  182. };
  183. struct rocker;
  184. enum {
  185. ROCKER_CTRL_LINK_LOCAL_MCAST,
  186. ROCKER_CTRL_LOCAL_ARP,
  187. ROCKER_CTRL_IPV4_MCAST,
  188. ROCKER_CTRL_IPV6_MCAST,
  189. ROCKER_CTRL_DFLT_BRIDGING,
  190. ROCKER_CTRL_MAX,
  191. };
  192. #define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00
  193. #define ROCKER_N_INTERNAL_VLANS 255
  194. #define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
  195. #define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
  196. struct rocker_port {
  197. struct net_device *dev;
  198. struct net_device *bridge_dev;
  199. struct rocker *rocker;
  200. unsigned int port_number;
  201. u32 pport;
  202. __be16 internal_vlan_id;
  203. int stp_state;
  204. u32 brport_flags;
  205. bool ctrls[ROCKER_CTRL_MAX];
  206. unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
  207. struct napi_struct napi_tx;
  208. struct napi_struct napi_rx;
  209. struct rocker_dma_ring_info tx_ring;
  210. struct rocker_dma_ring_info rx_ring;
  211. };
  212. struct rocker {
  213. struct pci_dev *pdev;
  214. u8 __iomem *hw_addr;
  215. struct msix_entry *msix_entries;
  216. unsigned int port_count;
  217. struct rocker_port **ports;
  218. struct {
  219. u64 id;
  220. } hw;
  221. spinlock_t cmd_ring_lock;
  222. struct rocker_dma_ring_info cmd_ring;
  223. struct rocker_dma_ring_info event_ring;
  224. DECLARE_HASHTABLE(flow_tbl, 16);
  225. spinlock_t flow_tbl_lock;
  226. u64 flow_tbl_next_cookie;
  227. DECLARE_HASHTABLE(group_tbl, 16);
  228. spinlock_t group_tbl_lock;
  229. DECLARE_HASHTABLE(fdb_tbl, 16);
  230. spinlock_t fdb_tbl_lock;
  231. unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
  232. DECLARE_HASHTABLE(internal_vlan_tbl, 8);
  233. spinlock_t internal_vlan_tbl_lock;
  234. DECLARE_HASHTABLE(neigh_tbl, 16);
  235. spinlock_t neigh_tbl_lock;
  236. u32 neigh_tbl_next_index;
  237. };
  238. static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
  239. static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
  240. static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
  241. static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
  242. static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
  243. static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
  244. static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
  245. static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
  246. static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
  247. /* Rocker priority levels for flow table entries. Higher
  248. * priority match takes precedence over lower priority match.
  249. */
  250. enum {
  251. ROCKER_PRIORITY_UNKNOWN = 0,
  252. ROCKER_PRIORITY_IG_PORT = 1,
  253. ROCKER_PRIORITY_VLAN = 1,
  254. ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
  255. ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
  256. ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
  257. ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
  258. ROCKER_PRIORITY_BRIDGING_VLAN = 3,
  259. ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
  260. ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
  261. ROCKER_PRIORITY_BRIDGING_TENANT = 3,
  262. ROCKER_PRIORITY_ACL_CTRL = 3,
  263. ROCKER_PRIORITY_ACL_NORMAL = 2,
  264. ROCKER_PRIORITY_ACL_DFLT = 1,
  265. };
  266. static bool rocker_vlan_id_is_internal(__be16 vlan_id)
  267. {
  268. u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
  269. u16 end = 0xffe;
  270. u16 _vlan_id = ntohs(vlan_id);
  271. return (_vlan_id >= start && _vlan_id <= end);
  272. }
  273. static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port,
  274. u16 vid, bool *pop_vlan)
  275. {
  276. __be16 vlan_id;
  277. if (pop_vlan)
  278. *pop_vlan = false;
  279. vlan_id = htons(vid);
  280. if (!vlan_id) {
  281. vlan_id = rocker_port->internal_vlan_id;
  282. if (pop_vlan)
  283. *pop_vlan = true;
  284. }
  285. return vlan_id;
  286. }
  287. static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port,
  288. __be16 vlan_id)
  289. {
  290. if (rocker_vlan_id_is_internal(vlan_id))
  291. return 0;
  292. return ntohs(vlan_id);
  293. }
  294. static bool rocker_port_is_bridged(struct rocker_port *rocker_port)
  295. {
  296. return !!rocker_port->bridge_dev;
  297. }
  298. struct rocker_wait {
  299. wait_queue_head_t wait;
  300. bool done;
  301. bool nowait;
  302. };
  303. static void rocker_wait_reset(struct rocker_wait *wait)
  304. {
  305. wait->done = false;
  306. wait->nowait = false;
  307. }
  308. static void rocker_wait_init(struct rocker_wait *wait)
  309. {
  310. init_waitqueue_head(&wait->wait);
  311. rocker_wait_reset(wait);
  312. }
  313. static struct rocker_wait *rocker_wait_create(gfp_t gfp)
  314. {
  315. struct rocker_wait *wait;
  316. wait = kmalloc(sizeof(*wait), gfp);
  317. if (!wait)
  318. return NULL;
  319. rocker_wait_init(wait);
  320. return wait;
  321. }
  322. static void rocker_wait_destroy(struct rocker_wait *work)
  323. {
  324. kfree(work);
  325. }
  326. static bool rocker_wait_event_timeout(struct rocker_wait *wait,
  327. unsigned long timeout)
  328. {
  329. wait_event_timeout(wait->wait, wait->done, HZ / 10);
  330. if (!wait->done)
  331. return false;
  332. return true;
  333. }
  334. static void rocker_wait_wake_up(struct rocker_wait *wait)
  335. {
  336. wait->done = true;
  337. wake_up(&wait->wait);
  338. }
  339. static u32 rocker_msix_vector(struct rocker *rocker, unsigned int vector)
  340. {
  341. return rocker->msix_entries[vector].vector;
  342. }
  343. static u32 rocker_msix_tx_vector(struct rocker_port *rocker_port)
  344. {
  345. return rocker_msix_vector(rocker_port->rocker,
  346. ROCKER_MSIX_VEC_TX(rocker_port->port_number));
  347. }
  348. static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port)
  349. {
  350. return rocker_msix_vector(rocker_port->rocker,
  351. ROCKER_MSIX_VEC_RX(rocker_port->port_number));
  352. }
  353. #define rocker_write32(rocker, reg, val) \
  354. writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
  355. #define rocker_read32(rocker, reg) \
  356. readl((rocker)->hw_addr + (ROCKER_ ## reg))
  357. #define rocker_write64(rocker, reg, val) \
  358. writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
  359. #define rocker_read64(rocker, reg) \
  360. readq((rocker)->hw_addr + (ROCKER_ ## reg))
  361. /*****************************
  362. * HW basic testing functions
  363. *****************************/
  364. static int rocker_reg_test(struct rocker *rocker)
  365. {
  366. struct pci_dev *pdev = rocker->pdev;
  367. u64 test_reg;
  368. u64 rnd;
  369. rnd = prandom_u32();
  370. rnd >>= 1;
  371. rocker_write32(rocker, TEST_REG, rnd);
  372. test_reg = rocker_read32(rocker, TEST_REG);
  373. if (test_reg != rnd * 2) {
  374. dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
  375. test_reg, rnd * 2);
  376. return -EIO;
  377. }
  378. rnd = prandom_u32();
  379. rnd <<= 31;
  380. rnd |= prandom_u32();
  381. rocker_write64(rocker, TEST_REG64, rnd);
  382. test_reg = rocker_read64(rocker, TEST_REG64);
  383. if (test_reg != rnd * 2) {
  384. dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
  385. test_reg, rnd * 2);
  386. return -EIO;
  387. }
  388. return 0;
  389. }
  390. static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait,
  391. u32 test_type, dma_addr_t dma_handle,
  392. unsigned char *buf, unsigned char *expect,
  393. size_t size)
  394. {
  395. struct pci_dev *pdev = rocker->pdev;
  396. int i;
  397. rocker_wait_reset(wait);
  398. rocker_write32(rocker, TEST_DMA_CTRL, test_type);
  399. if (!rocker_wait_event_timeout(wait, HZ / 10)) {
  400. dev_err(&pdev->dev, "no interrupt received within a timeout\n");
  401. return -EIO;
  402. }
  403. for (i = 0; i < size; i++) {
  404. if (buf[i] != expect[i]) {
  405. dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
  406. buf[i], i, expect[i]);
  407. return -EIO;
  408. }
  409. }
  410. return 0;
  411. }
  412. #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
  413. #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
  414. static int rocker_dma_test_offset(struct rocker *rocker,
  415. struct rocker_wait *wait, int offset)
  416. {
  417. struct pci_dev *pdev = rocker->pdev;
  418. unsigned char *alloc;
  419. unsigned char *buf;
  420. unsigned char *expect;
  421. dma_addr_t dma_handle;
  422. int i;
  423. int err;
  424. alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
  425. GFP_KERNEL | GFP_DMA);
  426. if (!alloc)
  427. return -ENOMEM;
  428. buf = alloc + offset;
  429. expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
  430. dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
  431. PCI_DMA_BIDIRECTIONAL);
  432. if (pci_dma_mapping_error(pdev, dma_handle)) {
  433. err = -EIO;
  434. goto free_alloc;
  435. }
  436. rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
  437. rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
  438. memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
  439. err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
  440. dma_handle, buf, expect,
  441. ROCKER_TEST_DMA_BUF_SIZE);
  442. if (err)
  443. goto unmap;
  444. memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
  445. err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
  446. dma_handle, buf, expect,
  447. ROCKER_TEST_DMA_BUF_SIZE);
  448. if (err)
  449. goto unmap;
  450. prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
  451. for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
  452. expect[i] = ~buf[i];
  453. err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
  454. dma_handle, buf, expect,
  455. ROCKER_TEST_DMA_BUF_SIZE);
  456. if (err)
  457. goto unmap;
  458. unmap:
  459. pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
  460. PCI_DMA_BIDIRECTIONAL);
  461. free_alloc:
  462. kfree(alloc);
  463. return err;
  464. }
  465. static int rocker_dma_test(struct rocker *rocker, struct rocker_wait *wait)
  466. {
  467. int i;
  468. int err;
  469. for (i = 0; i < 8; i++) {
  470. err = rocker_dma_test_offset(rocker, wait, i);
  471. if (err)
  472. return err;
  473. }
  474. return 0;
  475. }
  476. static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
  477. {
  478. struct rocker_wait *wait = dev_id;
  479. rocker_wait_wake_up(wait);
  480. return IRQ_HANDLED;
  481. }
  482. static int rocker_basic_hw_test(struct rocker *rocker)
  483. {
  484. struct pci_dev *pdev = rocker->pdev;
  485. struct rocker_wait wait;
  486. int err;
  487. err = rocker_reg_test(rocker);
  488. if (err) {
  489. dev_err(&pdev->dev, "reg test failed\n");
  490. return err;
  491. }
  492. err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
  493. rocker_test_irq_handler, 0,
  494. rocker_driver_name, &wait);
  495. if (err) {
  496. dev_err(&pdev->dev, "cannot assign test irq\n");
  497. return err;
  498. }
  499. rocker_wait_init(&wait);
  500. rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
  501. if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
  502. dev_err(&pdev->dev, "no interrupt received within a timeout\n");
  503. err = -EIO;
  504. goto free_irq;
  505. }
  506. err = rocker_dma_test(rocker, &wait);
  507. if (err)
  508. dev_err(&pdev->dev, "dma test failed\n");
  509. free_irq:
  510. free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
  511. return err;
  512. }
  513. /******
  514. * TLV
  515. ******/
  516. #define ROCKER_TLV_ALIGNTO 8U
  517. #define ROCKER_TLV_ALIGN(len) \
  518. (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
  519. #define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
  520. /* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
  521. * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
  522. * | Header | Pad | Payload | Pad |
  523. * | (struct rocker_tlv) | ing | | ing |
  524. * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
  525. * <--------------------------- tlv->len -------------------------->
  526. */
  527. static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
  528. int *remaining)
  529. {
  530. int totlen = ROCKER_TLV_ALIGN(tlv->len);
  531. *remaining -= totlen;
  532. return (struct rocker_tlv *) ((char *) tlv + totlen);
  533. }
  534. static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
  535. {
  536. return remaining >= (int) ROCKER_TLV_HDRLEN &&
  537. tlv->len >= ROCKER_TLV_HDRLEN &&
  538. tlv->len <= remaining;
  539. }
  540. #define rocker_tlv_for_each(pos, head, len, rem) \
  541. for (pos = head, rem = len; \
  542. rocker_tlv_ok(pos, rem); \
  543. pos = rocker_tlv_next(pos, &(rem)))
  544. #define rocker_tlv_for_each_nested(pos, tlv, rem) \
  545. rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \
  546. rocker_tlv_len(tlv), rem)
  547. static int rocker_tlv_attr_size(int payload)
  548. {
  549. return ROCKER_TLV_HDRLEN + payload;
  550. }
  551. static int rocker_tlv_total_size(int payload)
  552. {
  553. return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
  554. }
  555. static int rocker_tlv_padlen(int payload)
  556. {
  557. return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
  558. }
  559. static int rocker_tlv_type(const struct rocker_tlv *tlv)
  560. {
  561. return tlv->type;
  562. }
  563. static void *rocker_tlv_data(const struct rocker_tlv *tlv)
  564. {
  565. return (char *) tlv + ROCKER_TLV_HDRLEN;
  566. }
  567. static int rocker_tlv_len(const struct rocker_tlv *tlv)
  568. {
  569. return tlv->len - ROCKER_TLV_HDRLEN;
  570. }
  571. static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
  572. {
  573. return *(u8 *) rocker_tlv_data(tlv);
  574. }
  575. static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
  576. {
  577. return *(u16 *) rocker_tlv_data(tlv);
  578. }
  579. static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv)
  580. {
  581. return *(__be16 *) rocker_tlv_data(tlv);
  582. }
  583. static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
  584. {
  585. return *(u32 *) rocker_tlv_data(tlv);
  586. }
  587. static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
  588. {
  589. return *(u64 *) rocker_tlv_data(tlv);
  590. }
  591. static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype,
  592. const char *buf, int buf_len)
  593. {
  594. const struct rocker_tlv *tlv;
  595. const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
  596. int rem;
  597. memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
  598. rocker_tlv_for_each(tlv, head, buf_len, rem) {
  599. u32 type = rocker_tlv_type(tlv);
  600. if (type > 0 && type <= maxtype)
  601. tb[type] = (struct rocker_tlv *) tlv;
  602. }
  603. }
  604. static void rocker_tlv_parse_nested(struct rocker_tlv **tb, int maxtype,
  605. const struct rocker_tlv *tlv)
  606. {
  607. rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
  608. rocker_tlv_len(tlv));
  609. }
  610. static void rocker_tlv_parse_desc(struct rocker_tlv **tb, int maxtype,
  611. struct rocker_desc_info *desc_info)
  612. {
  613. rocker_tlv_parse(tb, maxtype, desc_info->data,
  614. desc_info->desc->tlv_size);
  615. }
  616. static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info)
  617. {
  618. return (struct rocker_tlv *) ((char *) desc_info->data +
  619. desc_info->tlv_size);
  620. }
  621. static int rocker_tlv_put(struct rocker_desc_info *desc_info,
  622. int attrtype, int attrlen, const void *data)
  623. {
  624. int tail_room = desc_info->data_size - desc_info->tlv_size;
  625. int total_size = rocker_tlv_total_size(attrlen);
  626. struct rocker_tlv *tlv;
  627. if (unlikely(tail_room < total_size))
  628. return -EMSGSIZE;
  629. tlv = rocker_tlv_start(desc_info);
  630. desc_info->tlv_size += total_size;
  631. tlv->type = attrtype;
  632. tlv->len = rocker_tlv_attr_size(attrlen);
  633. memcpy(rocker_tlv_data(tlv), data, attrlen);
  634. memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
  635. return 0;
  636. }
  637. static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
  638. int attrtype, u8 value)
  639. {
  640. return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
  641. }
  642. static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
  643. int attrtype, u16 value)
  644. {
  645. return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
  646. }
  647. static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
  648. int attrtype, __be16 value)
  649. {
  650. return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
  651. }
  652. static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
  653. int attrtype, u32 value)
  654. {
  655. return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
  656. }
  657. static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
  658. int attrtype, __be32 value)
  659. {
  660. return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
  661. }
  662. static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
  663. int attrtype, u64 value)
  664. {
  665. return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
  666. }
  667. static struct rocker_tlv *
  668. rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
  669. {
  670. struct rocker_tlv *start = rocker_tlv_start(desc_info);
  671. if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
  672. return NULL;
  673. return start;
  674. }
  675. static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
  676. struct rocker_tlv *start)
  677. {
  678. start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
  679. }
  680. static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
  681. struct rocker_tlv *start)
  682. {
  683. desc_info->tlv_size = (char *) start - desc_info->data;
  684. }
  685. /******************************************
  686. * DMA rings and descriptors manipulations
  687. ******************************************/
  688. static u32 __pos_inc(u32 pos, size_t limit)
  689. {
  690. return ++pos == limit ? 0 : pos;
  691. }
  692. static int rocker_desc_err(struct rocker_desc_info *desc_info)
  693. {
  694. int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
  695. switch (err) {
  696. case ROCKER_OK:
  697. return 0;
  698. case -ROCKER_ENOENT:
  699. return -ENOENT;
  700. case -ROCKER_ENXIO:
  701. return -ENXIO;
  702. case -ROCKER_ENOMEM:
  703. return -ENOMEM;
  704. case -ROCKER_EEXIST:
  705. return -EEXIST;
  706. case -ROCKER_EINVAL:
  707. return -EINVAL;
  708. case -ROCKER_EMSGSIZE:
  709. return -EMSGSIZE;
  710. case -ROCKER_ENOTSUP:
  711. return -EOPNOTSUPP;
  712. case -ROCKER_ENOBUFS:
  713. return -ENOBUFS;
  714. }
  715. return -EINVAL;
  716. }
  717. static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info)
  718. {
  719. desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
  720. }
  721. static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
  722. {
  723. u32 comp_err = desc_info->desc->comp_err;
  724. return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
  725. }
  726. static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
  727. {
  728. return (void *)(uintptr_t)desc_info->desc->cookie;
  729. }
  730. static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
  731. void *ptr)
  732. {
  733. desc_info->desc->cookie = (uintptr_t) ptr;
  734. }
  735. static struct rocker_desc_info *
  736. rocker_desc_head_get(struct rocker_dma_ring_info *info)
  737. {
  738. static struct rocker_desc_info *desc_info;
  739. u32 head = __pos_inc(info->head, info->size);
  740. desc_info = &info->desc_info[info->head];
  741. if (head == info->tail)
  742. return NULL; /* ring full */
  743. desc_info->tlv_size = 0;
  744. return desc_info;
  745. }
  746. static void rocker_desc_commit(struct rocker_desc_info *desc_info)
  747. {
  748. desc_info->desc->buf_size = desc_info->data_size;
  749. desc_info->desc->tlv_size = desc_info->tlv_size;
  750. }
  751. static void rocker_desc_head_set(struct rocker *rocker,
  752. struct rocker_dma_ring_info *info,
  753. struct rocker_desc_info *desc_info)
  754. {
  755. u32 head = __pos_inc(info->head, info->size);
  756. BUG_ON(head == info->tail);
  757. rocker_desc_commit(desc_info);
  758. info->head = head;
  759. rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
  760. }
  761. static struct rocker_desc_info *
  762. rocker_desc_tail_get(struct rocker_dma_ring_info *info)
  763. {
  764. static struct rocker_desc_info *desc_info;
  765. if (info->tail == info->head)
  766. return NULL; /* nothing to be done between head and tail */
  767. desc_info = &info->desc_info[info->tail];
  768. if (!rocker_desc_gen(desc_info))
  769. return NULL; /* gen bit not set, desc is not ready yet */
  770. info->tail = __pos_inc(info->tail, info->size);
  771. desc_info->tlv_size = desc_info->desc->tlv_size;
  772. return desc_info;
  773. }
  774. static void rocker_dma_ring_credits_set(struct rocker *rocker,
  775. struct rocker_dma_ring_info *info,
  776. u32 credits)
  777. {
  778. if (credits)
  779. rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
  780. }
  781. static unsigned long rocker_dma_ring_size_fix(size_t size)
  782. {
  783. return max(ROCKER_DMA_SIZE_MIN,
  784. min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
  785. }
  786. static int rocker_dma_ring_create(struct rocker *rocker,
  787. unsigned int type,
  788. size_t size,
  789. struct rocker_dma_ring_info *info)
  790. {
  791. int i;
  792. BUG_ON(size != rocker_dma_ring_size_fix(size));
  793. info->size = size;
  794. info->type = type;
  795. info->head = 0;
  796. info->tail = 0;
  797. info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
  798. GFP_KERNEL);
  799. if (!info->desc_info)
  800. return -ENOMEM;
  801. info->desc = pci_alloc_consistent(rocker->pdev,
  802. info->size * sizeof(*info->desc),
  803. &info->mapaddr);
  804. if (!info->desc) {
  805. kfree(info->desc_info);
  806. return -ENOMEM;
  807. }
  808. for (i = 0; i < info->size; i++)
  809. info->desc_info[i].desc = &info->desc[i];
  810. rocker_write32(rocker, DMA_DESC_CTRL(info->type),
  811. ROCKER_DMA_DESC_CTRL_RESET);
  812. rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
  813. rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
  814. return 0;
  815. }
  816. static void rocker_dma_ring_destroy(struct rocker *rocker,
  817. struct rocker_dma_ring_info *info)
  818. {
  819. rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
  820. pci_free_consistent(rocker->pdev,
  821. info->size * sizeof(struct rocker_desc),
  822. info->desc, info->mapaddr);
  823. kfree(info->desc_info);
  824. }
  825. static void rocker_dma_ring_pass_to_producer(struct rocker *rocker,
  826. struct rocker_dma_ring_info *info)
  827. {
  828. int i;
  829. BUG_ON(info->head || info->tail);
  830. /* When ring is consumer, we need to advance head for each desc.
  831. * That tells hw that the desc is ready to be used by it.
  832. */
  833. for (i = 0; i < info->size - 1; i++)
  834. rocker_desc_head_set(rocker, info, &info->desc_info[i]);
  835. rocker_desc_commit(&info->desc_info[i]);
  836. }
  837. static int rocker_dma_ring_bufs_alloc(struct rocker *rocker,
  838. struct rocker_dma_ring_info *info,
  839. int direction, size_t buf_size)
  840. {
  841. struct pci_dev *pdev = rocker->pdev;
  842. int i;
  843. int err;
  844. for (i = 0; i < info->size; i++) {
  845. struct rocker_desc_info *desc_info = &info->desc_info[i];
  846. struct rocker_desc *desc = &info->desc[i];
  847. dma_addr_t dma_handle;
  848. char *buf;
  849. buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
  850. if (!buf) {
  851. err = -ENOMEM;
  852. goto rollback;
  853. }
  854. dma_handle = pci_map_single(pdev, buf, buf_size, direction);
  855. if (pci_dma_mapping_error(pdev, dma_handle)) {
  856. kfree(buf);
  857. err = -EIO;
  858. goto rollback;
  859. }
  860. desc_info->data = buf;
  861. desc_info->data_size = buf_size;
  862. dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
  863. desc->buf_addr = dma_handle;
  864. desc->buf_size = buf_size;
  865. }
  866. return 0;
  867. rollback:
  868. for (i--; i >= 0; i--) {
  869. struct rocker_desc_info *desc_info = &info->desc_info[i];
  870. pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
  871. desc_info->data_size, direction);
  872. kfree(desc_info->data);
  873. }
  874. return err;
  875. }
  876. static void rocker_dma_ring_bufs_free(struct rocker *rocker,
  877. struct rocker_dma_ring_info *info,
  878. int direction)
  879. {
  880. struct pci_dev *pdev = rocker->pdev;
  881. int i;
  882. for (i = 0; i < info->size; i++) {
  883. struct rocker_desc_info *desc_info = &info->desc_info[i];
  884. struct rocker_desc *desc = &info->desc[i];
  885. desc->buf_addr = 0;
  886. desc->buf_size = 0;
  887. pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
  888. desc_info->data_size, direction);
  889. kfree(desc_info->data);
  890. }
  891. }
  892. static int rocker_dma_rings_init(struct rocker *rocker)
  893. {
  894. struct pci_dev *pdev = rocker->pdev;
  895. int err;
  896. err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
  897. ROCKER_DMA_CMD_DEFAULT_SIZE,
  898. &rocker->cmd_ring);
  899. if (err) {
  900. dev_err(&pdev->dev, "failed to create command dma ring\n");
  901. return err;
  902. }
  903. spin_lock_init(&rocker->cmd_ring_lock);
  904. err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
  905. PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
  906. if (err) {
  907. dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
  908. goto err_dma_cmd_ring_bufs_alloc;
  909. }
  910. err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
  911. ROCKER_DMA_EVENT_DEFAULT_SIZE,
  912. &rocker->event_ring);
  913. if (err) {
  914. dev_err(&pdev->dev, "failed to create event dma ring\n");
  915. goto err_dma_event_ring_create;
  916. }
  917. err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
  918. PCI_DMA_FROMDEVICE, PAGE_SIZE);
  919. if (err) {
  920. dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
  921. goto err_dma_event_ring_bufs_alloc;
  922. }
  923. rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
  924. return 0;
  925. err_dma_event_ring_bufs_alloc:
  926. rocker_dma_ring_destroy(rocker, &rocker->event_ring);
  927. err_dma_event_ring_create:
  928. rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
  929. PCI_DMA_BIDIRECTIONAL);
  930. err_dma_cmd_ring_bufs_alloc:
  931. rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
  932. return err;
  933. }
  934. static void rocker_dma_rings_fini(struct rocker *rocker)
  935. {
  936. rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
  937. PCI_DMA_BIDIRECTIONAL);
  938. rocker_dma_ring_destroy(rocker, &rocker->event_ring);
  939. rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
  940. PCI_DMA_BIDIRECTIONAL);
  941. rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
  942. }
  943. static int rocker_dma_rx_ring_skb_map(struct rocker *rocker,
  944. struct rocker_port *rocker_port,
  945. struct rocker_desc_info *desc_info,
  946. struct sk_buff *skb, size_t buf_len)
  947. {
  948. struct pci_dev *pdev = rocker->pdev;
  949. dma_addr_t dma_handle;
  950. dma_handle = pci_map_single(pdev, skb->data, buf_len,
  951. PCI_DMA_FROMDEVICE);
  952. if (pci_dma_mapping_error(pdev, dma_handle))
  953. return -EIO;
  954. if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
  955. goto tlv_put_failure;
  956. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
  957. goto tlv_put_failure;
  958. return 0;
  959. tlv_put_failure:
  960. pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
  961. desc_info->tlv_size = 0;
  962. return -EMSGSIZE;
  963. }
  964. static size_t rocker_port_rx_buf_len(struct rocker_port *rocker_port)
  965. {
  966. return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
  967. }
  968. static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
  969. struct rocker_port *rocker_port,
  970. struct rocker_desc_info *desc_info)
  971. {
  972. struct net_device *dev = rocker_port->dev;
  973. struct sk_buff *skb;
  974. size_t buf_len = rocker_port_rx_buf_len(rocker_port);
  975. int err;
  976. /* Ensure that hw will see tlv_size zero in case of an error.
  977. * That tells hw to use another descriptor.
  978. */
  979. rocker_desc_cookie_ptr_set(desc_info, NULL);
  980. desc_info->tlv_size = 0;
  981. skb = netdev_alloc_skb_ip_align(dev, buf_len);
  982. if (!skb)
  983. return -ENOMEM;
  984. err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info,
  985. skb, buf_len);
  986. if (err) {
  987. dev_kfree_skb_any(skb);
  988. return err;
  989. }
  990. rocker_desc_cookie_ptr_set(desc_info, skb);
  991. return 0;
  992. }
  993. static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker,
  994. struct rocker_tlv **attrs)
  995. {
  996. struct pci_dev *pdev = rocker->pdev;
  997. dma_addr_t dma_handle;
  998. size_t len;
  999. if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
  1000. !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
  1001. return;
  1002. dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
  1003. len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
  1004. pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
  1005. }
  1006. static void rocker_dma_rx_ring_skb_free(struct rocker *rocker,
  1007. struct rocker_desc_info *desc_info)
  1008. {
  1009. struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
  1010. struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
  1011. if (!skb)
  1012. return;
  1013. rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
  1014. rocker_dma_rx_ring_skb_unmap(rocker, attrs);
  1015. dev_kfree_skb_any(skb);
  1016. }
  1017. static int rocker_dma_rx_ring_skbs_alloc(struct rocker *rocker,
  1018. struct rocker_port *rocker_port)
  1019. {
  1020. struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
  1021. int i;
  1022. int err;
  1023. for (i = 0; i < rx_ring->size; i++) {
  1024. err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port,
  1025. &rx_ring->desc_info[i]);
  1026. if (err)
  1027. goto rollback;
  1028. }
  1029. return 0;
  1030. rollback:
  1031. for (i--; i >= 0; i--)
  1032. rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
  1033. return err;
  1034. }
  1035. static void rocker_dma_rx_ring_skbs_free(struct rocker *rocker,
  1036. struct rocker_port *rocker_port)
  1037. {
  1038. struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
  1039. int i;
  1040. for (i = 0; i < rx_ring->size; i++)
  1041. rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
  1042. }
  1043. static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
  1044. {
  1045. struct rocker *rocker = rocker_port->rocker;
  1046. int err;
  1047. err = rocker_dma_ring_create(rocker,
  1048. ROCKER_DMA_TX(rocker_port->port_number),
  1049. ROCKER_DMA_TX_DEFAULT_SIZE,
  1050. &rocker_port->tx_ring);
  1051. if (err) {
  1052. netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
  1053. return err;
  1054. }
  1055. err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
  1056. PCI_DMA_TODEVICE,
  1057. ROCKER_DMA_TX_DESC_SIZE);
  1058. if (err) {
  1059. netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
  1060. goto err_dma_tx_ring_bufs_alloc;
  1061. }
  1062. err = rocker_dma_ring_create(rocker,
  1063. ROCKER_DMA_RX(rocker_port->port_number),
  1064. ROCKER_DMA_RX_DEFAULT_SIZE,
  1065. &rocker_port->rx_ring);
  1066. if (err) {
  1067. netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
  1068. goto err_dma_rx_ring_create;
  1069. }
  1070. err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
  1071. PCI_DMA_BIDIRECTIONAL,
  1072. ROCKER_DMA_RX_DESC_SIZE);
  1073. if (err) {
  1074. netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
  1075. goto err_dma_rx_ring_bufs_alloc;
  1076. }
  1077. err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port);
  1078. if (err) {
  1079. netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
  1080. goto err_dma_rx_ring_skbs_alloc;
  1081. }
  1082. rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
  1083. return 0;
  1084. err_dma_rx_ring_skbs_alloc:
  1085. rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
  1086. PCI_DMA_BIDIRECTIONAL);
  1087. err_dma_rx_ring_bufs_alloc:
  1088. rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
  1089. err_dma_rx_ring_create:
  1090. rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
  1091. PCI_DMA_TODEVICE);
  1092. err_dma_tx_ring_bufs_alloc:
  1093. rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
  1094. return err;
  1095. }
  1096. static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
  1097. {
  1098. struct rocker *rocker = rocker_port->rocker;
  1099. rocker_dma_rx_ring_skbs_free(rocker, rocker_port);
  1100. rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
  1101. PCI_DMA_BIDIRECTIONAL);
  1102. rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
  1103. rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
  1104. PCI_DMA_TODEVICE);
  1105. rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
  1106. }
  1107. static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
  1108. {
  1109. u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
  1110. if (enable)
  1111. val |= 1ULL << rocker_port->pport;
  1112. else
  1113. val &= ~(1ULL << rocker_port->pport);
  1114. rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
  1115. }
  1116. /********************************
  1117. * Interrupt handler and helpers
  1118. ********************************/
  1119. static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
  1120. {
  1121. struct rocker *rocker = dev_id;
  1122. struct rocker_desc_info *desc_info;
  1123. struct rocker_wait *wait;
  1124. u32 credits = 0;
  1125. spin_lock(&rocker->cmd_ring_lock);
  1126. while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
  1127. wait = rocker_desc_cookie_ptr_get(desc_info);
  1128. if (wait->nowait) {
  1129. rocker_desc_gen_clear(desc_info);
  1130. rocker_wait_destroy(wait);
  1131. } else {
  1132. rocker_wait_wake_up(wait);
  1133. }
  1134. credits++;
  1135. }
  1136. spin_unlock(&rocker->cmd_ring_lock);
  1137. rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
  1138. return IRQ_HANDLED;
  1139. }
  1140. static void rocker_port_link_up(struct rocker_port *rocker_port)
  1141. {
  1142. netif_carrier_on(rocker_port->dev);
  1143. netdev_info(rocker_port->dev, "Link is up\n");
  1144. }
  1145. static void rocker_port_link_down(struct rocker_port *rocker_port)
  1146. {
  1147. netif_carrier_off(rocker_port->dev);
  1148. netdev_info(rocker_port->dev, "Link is down\n");
  1149. }
  1150. static int rocker_event_link_change(struct rocker *rocker,
  1151. const struct rocker_tlv *info)
  1152. {
  1153. struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
  1154. unsigned int port_number;
  1155. bool link_up;
  1156. struct rocker_port *rocker_port;
  1157. rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
  1158. if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
  1159. !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
  1160. return -EIO;
  1161. port_number =
  1162. rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
  1163. link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
  1164. if (port_number >= rocker->port_count)
  1165. return -EINVAL;
  1166. rocker_port = rocker->ports[port_number];
  1167. if (netif_carrier_ok(rocker_port->dev) != link_up) {
  1168. if (link_up)
  1169. rocker_port_link_up(rocker_port);
  1170. else
  1171. rocker_port_link_down(rocker_port);
  1172. }
  1173. return 0;
  1174. }
  1175. #define ROCKER_OP_FLAG_REMOVE BIT(0)
  1176. #define ROCKER_OP_FLAG_NOWAIT BIT(1)
  1177. #define ROCKER_OP_FLAG_LEARNED BIT(2)
  1178. #define ROCKER_OP_FLAG_REFRESH BIT(3)
  1179. static int rocker_port_fdb(struct rocker_port *rocker_port,
  1180. const unsigned char *addr,
  1181. __be16 vlan_id, int flags);
  1182. static int rocker_event_mac_vlan_seen(struct rocker *rocker,
  1183. const struct rocker_tlv *info)
  1184. {
  1185. struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
  1186. unsigned int port_number;
  1187. struct rocker_port *rocker_port;
  1188. unsigned char *addr;
  1189. int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
  1190. __be16 vlan_id;
  1191. rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
  1192. if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
  1193. !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
  1194. !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
  1195. return -EIO;
  1196. port_number =
  1197. rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
  1198. addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
  1199. vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
  1200. if (port_number >= rocker->port_count)
  1201. return -EINVAL;
  1202. rocker_port = rocker->ports[port_number];
  1203. if (rocker_port->stp_state != BR_STATE_LEARNING &&
  1204. rocker_port->stp_state != BR_STATE_FORWARDING)
  1205. return 0;
  1206. return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
  1207. }
  1208. static int rocker_event_process(struct rocker *rocker,
  1209. struct rocker_desc_info *desc_info)
  1210. {
  1211. struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
  1212. struct rocker_tlv *info;
  1213. u16 type;
  1214. rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
  1215. if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
  1216. !attrs[ROCKER_TLV_EVENT_INFO])
  1217. return -EIO;
  1218. type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
  1219. info = attrs[ROCKER_TLV_EVENT_INFO];
  1220. switch (type) {
  1221. case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
  1222. return rocker_event_link_change(rocker, info);
  1223. case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
  1224. return rocker_event_mac_vlan_seen(rocker, info);
  1225. }
  1226. return -EOPNOTSUPP;
  1227. }
  1228. static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
  1229. {
  1230. struct rocker *rocker = dev_id;
  1231. struct pci_dev *pdev = rocker->pdev;
  1232. struct rocker_desc_info *desc_info;
  1233. u32 credits = 0;
  1234. int err;
  1235. while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
  1236. err = rocker_desc_err(desc_info);
  1237. if (err) {
  1238. dev_err(&pdev->dev, "event desc received with err %d\n",
  1239. err);
  1240. } else {
  1241. err = rocker_event_process(rocker, desc_info);
  1242. if (err)
  1243. dev_err(&pdev->dev, "event processing failed with err %d\n",
  1244. err);
  1245. }
  1246. rocker_desc_gen_clear(desc_info);
  1247. rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
  1248. credits++;
  1249. }
  1250. rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
  1251. return IRQ_HANDLED;
  1252. }
  1253. static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
  1254. {
  1255. struct rocker_port *rocker_port = dev_id;
  1256. napi_schedule(&rocker_port->napi_tx);
  1257. return IRQ_HANDLED;
  1258. }
  1259. static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
  1260. {
  1261. struct rocker_port *rocker_port = dev_id;
  1262. napi_schedule(&rocker_port->napi_rx);
  1263. return IRQ_HANDLED;
  1264. }
  1265. /********************
  1266. * Command interface
  1267. ********************/
  1268. typedef int (*rocker_cmd_cb_t)(struct rocker *rocker,
  1269. struct rocker_port *rocker_port,
  1270. struct rocker_desc_info *desc_info,
  1271. void *priv);
  1272. static int rocker_cmd_exec(struct rocker *rocker,
  1273. struct rocker_port *rocker_port,
  1274. rocker_cmd_cb_t prepare, void *prepare_priv,
  1275. rocker_cmd_cb_t process, void *process_priv,
  1276. bool nowait)
  1277. {
  1278. struct rocker_desc_info *desc_info;
  1279. struct rocker_wait *wait;
  1280. unsigned long flags;
  1281. int err;
  1282. wait = rocker_wait_create(nowait ? GFP_ATOMIC : GFP_KERNEL);
  1283. if (!wait)
  1284. return -ENOMEM;
  1285. wait->nowait = nowait;
  1286. spin_lock_irqsave(&rocker->cmd_ring_lock, flags);
  1287. desc_info = rocker_desc_head_get(&rocker->cmd_ring);
  1288. if (!desc_info) {
  1289. spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
  1290. err = -EAGAIN;
  1291. goto out;
  1292. }
  1293. err = prepare(rocker, rocker_port, desc_info, prepare_priv);
  1294. if (err) {
  1295. spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
  1296. goto out;
  1297. }
  1298. rocker_desc_cookie_ptr_set(desc_info, wait);
  1299. rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
  1300. spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
  1301. if (nowait)
  1302. return 0;
  1303. if (!rocker_wait_event_timeout(wait, HZ / 10))
  1304. return -EIO;
  1305. err = rocker_desc_err(desc_info);
  1306. if (err)
  1307. return err;
  1308. if (process)
  1309. err = process(rocker, rocker_port, desc_info, process_priv);
  1310. rocker_desc_gen_clear(desc_info);
  1311. out:
  1312. rocker_wait_destroy(wait);
  1313. return err;
  1314. }
  1315. static int
  1316. rocker_cmd_get_port_settings_prep(struct rocker *rocker,
  1317. struct rocker_port *rocker_port,
  1318. struct rocker_desc_info *desc_info,
  1319. void *priv)
  1320. {
  1321. struct rocker_tlv *cmd_info;
  1322. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
  1323. ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
  1324. return -EMSGSIZE;
  1325. cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
  1326. if (!cmd_info)
  1327. return -EMSGSIZE;
  1328. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
  1329. rocker_port->pport))
  1330. return -EMSGSIZE;
  1331. rocker_tlv_nest_end(desc_info, cmd_info);
  1332. return 0;
  1333. }
  1334. static int
  1335. rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker,
  1336. struct rocker_port *rocker_port,
  1337. struct rocker_desc_info *desc_info,
  1338. void *priv)
  1339. {
  1340. struct ethtool_cmd *ecmd = priv;
  1341. struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
  1342. struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
  1343. u32 speed;
  1344. u8 duplex;
  1345. u8 autoneg;
  1346. rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
  1347. if (!attrs[ROCKER_TLV_CMD_INFO])
  1348. return -EIO;
  1349. rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
  1350. attrs[ROCKER_TLV_CMD_INFO]);
  1351. if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
  1352. !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
  1353. !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
  1354. return -EIO;
  1355. speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
  1356. duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
  1357. autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
  1358. ecmd->transceiver = XCVR_INTERNAL;
  1359. ecmd->supported = SUPPORTED_TP;
  1360. ecmd->phy_address = 0xff;
  1361. ecmd->port = PORT_TP;
  1362. ethtool_cmd_speed_set(ecmd, speed);
  1363. ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
  1364. ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
  1365. return 0;
  1366. }
  1367. static int
  1368. rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker,
  1369. struct rocker_port *rocker_port,
  1370. struct rocker_desc_info *desc_info,
  1371. void *priv)
  1372. {
  1373. unsigned char *macaddr = priv;
  1374. struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
  1375. struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
  1376. struct rocker_tlv *attr;
  1377. rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
  1378. if (!attrs[ROCKER_TLV_CMD_INFO])
  1379. return -EIO;
  1380. rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
  1381. attrs[ROCKER_TLV_CMD_INFO]);
  1382. attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
  1383. if (!attr)
  1384. return -EIO;
  1385. if (rocker_tlv_len(attr) != ETH_ALEN)
  1386. return -EINVAL;
  1387. ether_addr_copy(macaddr, rocker_tlv_data(attr));
  1388. return 0;
  1389. }
  1390. static int
  1391. rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
  1392. struct rocker_port *rocker_port,
  1393. struct rocker_desc_info *desc_info,
  1394. void *priv)
  1395. {
  1396. struct ethtool_cmd *ecmd = priv;
  1397. struct rocker_tlv *cmd_info;
  1398. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
  1399. ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
  1400. return -EMSGSIZE;
  1401. cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
  1402. if (!cmd_info)
  1403. return -EMSGSIZE;
  1404. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
  1405. rocker_port->pport))
  1406. return -EMSGSIZE;
  1407. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
  1408. ethtool_cmd_speed(ecmd)))
  1409. return -EMSGSIZE;
  1410. if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
  1411. ecmd->duplex))
  1412. return -EMSGSIZE;
  1413. if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
  1414. ecmd->autoneg))
  1415. return -EMSGSIZE;
  1416. rocker_tlv_nest_end(desc_info, cmd_info);
  1417. return 0;
  1418. }
  1419. static int
  1420. rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
  1421. struct rocker_port *rocker_port,
  1422. struct rocker_desc_info *desc_info,
  1423. void *priv)
  1424. {
  1425. unsigned char *macaddr = priv;
  1426. struct rocker_tlv *cmd_info;
  1427. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
  1428. ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
  1429. return -EMSGSIZE;
  1430. cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
  1431. if (!cmd_info)
  1432. return -EMSGSIZE;
  1433. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
  1434. rocker_port->pport))
  1435. return -EMSGSIZE;
  1436. if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
  1437. ETH_ALEN, macaddr))
  1438. return -EMSGSIZE;
  1439. rocker_tlv_nest_end(desc_info, cmd_info);
  1440. return 0;
  1441. }
  1442. static int
  1443. rocker_cmd_set_port_learning_prep(struct rocker *rocker,
  1444. struct rocker_port *rocker_port,
  1445. struct rocker_desc_info *desc_info,
  1446. void *priv)
  1447. {
  1448. struct rocker_tlv *cmd_info;
  1449. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
  1450. ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
  1451. return -EMSGSIZE;
  1452. cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
  1453. if (!cmd_info)
  1454. return -EMSGSIZE;
  1455. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
  1456. rocker_port->pport))
  1457. return -EMSGSIZE;
  1458. if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
  1459. !!(rocker_port->brport_flags & BR_LEARNING)))
  1460. return -EMSGSIZE;
  1461. rocker_tlv_nest_end(desc_info, cmd_info);
  1462. return 0;
  1463. }
  1464. static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
  1465. struct ethtool_cmd *ecmd)
  1466. {
  1467. return rocker_cmd_exec(rocker_port->rocker, rocker_port,
  1468. rocker_cmd_get_port_settings_prep, NULL,
  1469. rocker_cmd_get_port_settings_ethtool_proc,
  1470. ecmd, false);
  1471. }
  1472. static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
  1473. unsigned char *macaddr)
  1474. {
  1475. return rocker_cmd_exec(rocker_port->rocker, rocker_port,
  1476. rocker_cmd_get_port_settings_prep, NULL,
  1477. rocker_cmd_get_port_settings_macaddr_proc,
  1478. macaddr, false);
  1479. }
  1480. static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
  1481. struct ethtool_cmd *ecmd)
  1482. {
  1483. return rocker_cmd_exec(rocker_port->rocker, rocker_port,
  1484. rocker_cmd_set_port_settings_ethtool_prep,
  1485. ecmd, NULL, NULL, false);
  1486. }
  1487. static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
  1488. unsigned char *macaddr)
  1489. {
  1490. return rocker_cmd_exec(rocker_port->rocker, rocker_port,
  1491. rocker_cmd_set_port_settings_macaddr_prep,
  1492. macaddr, NULL, NULL, false);
  1493. }
  1494. static int rocker_port_set_learning(struct rocker_port *rocker_port)
  1495. {
  1496. return rocker_cmd_exec(rocker_port->rocker, rocker_port,
  1497. rocker_cmd_set_port_learning_prep,
  1498. NULL, NULL, NULL, false);
  1499. }
  1500. static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
  1501. struct rocker_flow_tbl_entry *entry)
  1502. {
  1503. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
  1504. entry->key.ig_port.in_pport))
  1505. return -EMSGSIZE;
  1506. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
  1507. entry->key.ig_port.in_pport_mask))
  1508. return -EMSGSIZE;
  1509. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
  1510. entry->key.ig_port.goto_tbl))
  1511. return -EMSGSIZE;
  1512. return 0;
  1513. }
  1514. static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
  1515. struct rocker_flow_tbl_entry *entry)
  1516. {
  1517. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
  1518. entry->key.vlan.in_pport))
  1519. return -EMSGSIZE;
  1520. if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
  1521. entry->key.vlan.vlan_id))
  1522. return -EMSGSIZE;
  1523. if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
  1524. entry->key.vlan.vlan_id_mask))
  1525. return -EMSGSIZE;
  1526. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
  1527. entry->key.vlan.goto_tbl))
  1528. return -EMSGSIZE;
  1529. if (entry->key.vlan.untagged &&
  1530. rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
  1531. entry->key.vlan.new_vlan_id))
  1532. return -EMSGSIZE;
  1533. return 0;
  1534. }
  1535. static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
  1536. struct rocker_flow_tbl_entry *entry)
  1537. {
  1538. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
  1539. entry->key.term_mac.in_pport))
  1540. return -EMSGSIZE;
  1541. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
  1542. entry->key.term_mac.in_pport_mask))
  1543. return -EMSGSIZE;
  1544. if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
  1545. entry->key.term_mac.eth_type))
  1546. return -EMSGSIZE;
  1547. if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
  1548. ETH_ALEN, entry->key.term_mac.eth_dst))
  1549. return -EMSGSIZE;
  1550. if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
  1551. ETH_ALEN, entry->key.term_mac.eth_dst_mask))
  1552. return -EMSGSIZE;
  1553. if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
  1554. entry->key.term_mac.vlan_id))
  1555. return -EMSGSIZE;
  1556. if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
  1557. entry->key.term_mac.vlan_id_mask))
  1558. return -EMSGSIZE;
  1559. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
  1560. entry->key.term_mac.goto_tbl))
  1561. return -EMSGSIZE;
  1562. if (entry->key.term_mac.copy_to_cpu &&
  1563. rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
  1564. entry->key.term_mac.copy_to_cpu))
  1565. return -EMSGSIZE;
  1566. return 0;
  1567. }
  1568. static int
  1569. rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
  1570. struct rocker_flow_tbl_entry *entry)
  1571. {
  1572. if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
  1573. entry->key.ucast_routing.eth_type))
  1574. return -EMSGSIZE;
  1575. if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
  1576. entry->key.ucast_routing.dst4))
  1577. return -EMSGSIZE;
  1578. if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
  1579. entry->key.ucast_routing.dst4_mask))
  1580. return -EMSGSIZE;
  1581. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
  1582. entry->key.ucast_routing.goto_tbl))
  1583. return -EMSGSIZE;
  1584. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
  1585. entry->key.ucast_routing.group_id))
  1586. return -EMSGSIZE;
  1587. return 0;
  1588. }
  1589. static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
  1590. struct rocker_flow_tbl_entry *entry)
  1591. {
  1592. if (entry->key.bridge.has_eth_dst &&
  1593. rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
  1594. ETH_ALEN, entry->key.bridge.eth_dst))
  1595. return -EMSGSIZE;
  1596. if (entry->key.bridge.has_eth_dst_mask &&
  1597. rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
  1598. ETH_ALEN, entry->key.bridge.eth_dst_mask))
  1599. return -EMSGSIZE;
  1600. if (entry->key.bridge.vlan_id &&
  1601. rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
  1602. entry->key.bridge.vlan_id))
  1603. return -EMSGSIZE;
  1604. if (entry->key.bridge.tunnel_id &&
  1605. rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
  1606. entry->key.bridge.tunnel_id))
  1607. return -EMSGSIZE;
  1608. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
  1609. entry->key.bridge.goto_tbl))
  1610. return -EMSGSIZE;
  1611. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
  1612. entry->key.bridge.group_id))
  1613. return -EMSGSIZE;
  1614. if (entry->key.bridge.copy_to_cpu &&
  1615. rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
  1616. entry->key.bridge.copy_to_cpu))
  1617. return -EMSGSIZE;
  1618. return 0;
  1619. }
  1620. static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
  1621. struct rocker_flow_tbl_entry *entry)
  1622. {
  1623. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
  1624. entry->key.acl.in_pport))
  1625. return -EMSGSIZE;
  1626. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
  1627. entry->key.acl.in_pport_mask))
  1628. return -EMSGSIZE;
  1629. if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
  1630. ETH_ALEN, entry->key.acl.eth_src))
  1631. return -EMSGSIZE;
  1632. if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
  1633. ETH_ALEN, entry->key.acl.eth_src_mask))
  1634. return -EMSGSIZE;
  1635. if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
  1636. ETH_ALEN, entry->key.acl.eth_dst))
  1637. return -EMSGSIZE;
  1638. if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
  1639. ETH_ALEN, entry->key.acl.eth_dst_mask))
  1640. return -EMSGSIZE;
  1641. if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
  1642. entry->key.acl.eth_type))
  1643. return -EMSGSIZE;
  1644. if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
  1645. entry->key.acl.vlan_id))
  1646. return -EMSGSIZE;
  1647. if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
  1648. entry->key.acl.vlan_id_mask))
  1649. return -EMSGSIZE;
  1650. switch (ntohs(entry->key.acl.eth_type)) {
  1651. case ETH_P_IP:
  1652. case ETH_P_IPV6:
  1653. if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
  1654. entry->key.acl.ip_proto))
  1655. return -EMSGSIZE;
  1656. if (rocker_tlv_put_u8(desc_info,
  1657. ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
  1658. entry->key.acl.ip_proto_mask))
  1659. return -EMSGSIZE;
  1660. if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
  1661. entry->key.acl.ip_tos & 0x3f))
  1662. return -EMSGSIZE;
  1663. if (rocker_tlv_put_u8(desc_info,
  1664. ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
  1665. entry->key.acl.ip_tos_mask & 0x3f))
  1666. return -EMSGSIZE;
  1667. if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
  1668. (entry->key.acl.ip_tos & 0xc0) >> 6))
  1669. return -EMSGSIZE;
  1670. if (rocker_tlv_put_u8(desc_info,
  1671. ROCKER_TLV_OF_DPA_IP_ECN_MASK,
  1672. (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
  1673. return -EMSGSIZE;
  1674. break;
  1675. }
  1676. if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
  1677. rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
  1678. entry->key.acl.group_id))
  1679. return -EMSGSIZE;
  1680. return 0;
  1681. }
  1682. static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
  1683. struct rocker_port *rocker_port,
  1684. struct rocker_desc_info *desc_info,
  1685. void *priv)
  1686. {
  1687. struct rocker_flow_tbl_entry *entry = priv;
  1688. struct rocker_tlv *cmd_info;
  1689. int err = 0;
  1690. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
  1691. return -EMSGSIZE;
  1692. cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
  1693. if (!cmd_info)
  1694. return -EMSGSIZE;
  1695. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
  1696. entry->key.tbl_id))
  1697. return -EMSGSIZE;
  1698. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
  1699. entry->key.priority))
  1700. return -EMSGSIZE;
  1701. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
  1702. return -EMSGSIZE;
  1703. if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
  1704. entry->cookie))
  1705. return -EMSGSIZE;
  1706. switch (entry->key.tbl_id) {
  1707. case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
  1708. err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
  1709. break;
  1710. case ROCKER_OF_DPA_TABLE_ID_VLAN:
  1711. err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
  1712. break;
  1713. case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
  1714. err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
  1715. break;
  1716. case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
  1717. err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
  1718. break;
  1719. case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
  1720. err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
  1721. break;
  1722. case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
  1723. err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
  1724. break;
  1725. default:
  1726. err = -ENOTSUPP;
  1727. break;
  1728. }
  1729. if (err)
  1730. return err;
  1731. rocker_tlv_nest_end(desc_info, cmd_info);
  1732. return 0;
  1733. }
  1734. static int rocker_cmd_flow_tbl_del(struct rocker *rocker,
  1735. struct rocker_port *rocker_port,
  1736. struct rocker_desc_info *desc_info,
  1737. void *priv)
  1738. {
  1739. const struct rocker_flow_tbl_entry *entry = priv;
  1740. struct rocker_tlv *cmd_info;
  1741. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
  1742. return -EMSGSIZE;
  1743. cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
  1744. if (!cmd_info)
  1745. return -EMSGSIZE;
  1746. if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
  1747. entry->cookie))
  1748. return -EMSGSIZE;
  1749. rocker_tlv_nest_end(desc_info, cmd_info);
  1750. return 0;
  1751. }
  1752. static int
  1753. rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
  1754. struct rocker_group_tbl_entry *entry)
  1755. {
  1756. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
  1757. ROCKER_GROUP_PORT_GET(entry->group_id)))
  1758. return -EMSGSIZE;
  1759. if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
  1760. entry->l2_interface.pop_vlan))
  1761. return -EMSGSIZE;
  1762. return 0;
  1763. }
  1764. static int
  1765. rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
  1766. struct rocker_group_tbl_entry *entry)
  1767. {
  1768. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
  1769. entry->l2_rewrite.group_id))
  1770. return -EMSGSIZE;
  1771. if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
  1772. rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
  1773. ETH_ALEN, entry->l2_rewrite.eth_src))
  1774. return -EMSGSIZE;
  1775. if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
  1776. rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
  1777. ETH_ALEN, entry->l2_rewrite.eth_dst))
  1778. return -EMSGSIZE;
  1779. if (entry->l2_rewrite.vlan_id &&
  1780. rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
  1781. entry->l2_rewrite.vlan_id))
  1782. return -EMSGSIZE;
  1783. return 0;
  1784. }
  1785. static int
  1786. rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
  1787. struct rocker_group_tbl_entry *entry)
  1788. {
  1789. int i;
  1790. struct rocker_tlv *group_ids;
  1791. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
  1792. entry->group_count))
  1793. return -EMSGSIZE;
  1794. group_ids = rocker_tlv_nest_start(desc_info,
  1795. ROCKER_TLV_OF_DPA_GROUP_IDS);
  1796. if (!group_ids)
  1797. return -EMSGSIZE;
  1798. for (i = 0; i < entry->group_count; i++)
  1799. /* Note TLV array is 1-based */
  1800. if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
  1801. return -EMSGSIZE;
  1802. rocker_tlv_nest_end(desc_info, group_ids);
  1803. return 0;
  1804. }
  1805. static int
  1806. rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
  1807. struct rocker_group_tbl_entry *entry)
  1808. {
  1809. if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
  1810. rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
  1811. ETH_ALEN, entry->l3_unicast.eth_src))
  1812. return -EMSGSIZE;
  1813. if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
  1814. rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
  1815. ETH_ALEN, entry->l3_unicast.eth_dst))
  1816. return -EMSGSIZE;
  1817. if (entry->l3_unicast.vlan_id &&
  1818. rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
  1819. entry->l3_unicast.vlan_id))
  1820. return -EMSGSIZE;
  1821. if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
  1822. entry->l3_unicast.ttl_check))
  1823. return -EMSGSIZE;
  1824. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
  1825. entry->l3_unicast.group_id))
  1826. return -EMSGSIZE;
  1827. return 0;
  1828. }
  1829. static int rocker_cmd_group_tbl_add(struct rocker *rocker,
  1830. struct rocker_port *rocker_port,
  1831. struct rocker_desc_info *desc_info,
  1832. void *priv)
  1833. {
  1834. struct rocker_group_tbl_entry *entry = priv;
  1835. struct rocker_tlv *cmd_info;
  1836. int err = 0;
  1837. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
  1838. return -EMSGSIZE;
  1839. cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
  1840. if (!cmd_info)
  1841. return -EMSGSIZE;
  1842. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
  1843. entry->group_id))
  1844. return -EMSGSIZE;
  1845. switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
  1846. case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
  1847. err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
  1848. break;
  1849. case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
  1850. err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
  1851. break;
  1852. case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
  1853. case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
  1854. err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
  1855. break;
  1856. case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
  1857. err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
  1858. break;
  1859. default:
  1860. err = -ENOTSUPP;
  1861. break;
  1862. }
  1863. if (err)
  1864. return err;
  1865. rocker_tlv_nest_end(desc_info, cmd_info);
  1866. return 0;
  1867. }
  1868. static int rocker_cmd_group_tbl_del(struct rocker *rocker,
  1869. struct rocker_port *rocker_port,
  1870. struct rocker_desc_info *desc_info,
  1871. void *priv)
  1872. {
  1873. const struct rocker_group_tbl_entry *entry = priv;
  1874. struct rocker_tlv *cmd_info;
  1875. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
  1876. return -EMSGSIZE;
  1877. cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
  1878. if (!cmd_info)
  1879. return -EMSGSIZE;
  1880. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
  1881. entry->group_id))
  1882. return -EMSGSIZE;
  1883. rocker_tlv_nest_end(desc_info, cmd_info);
  1884. return 0;
  1885. }
  1886. /***************************************************
  1887. * Flow, group, FDB, internal VLAN and neigh tables
  1888. ***************************************************/
  1889. static int rocker_init_tbls(struct rocker *rocker)
  1890. {
  1891. hash_init(rocker->flow_tbl);
  1892. spin_lock_init(&rocker->flow_tbl_lock);
  1893. hash_init(rocker->group_tbl);
  1894. spin_lock_init(&rocker->group_tbl_lock);
  1895. hash_init(rocker->fdb_tbl);
  1896. spin_lock_init(&rocker->fdb_tbl_lock);
  1897. hash_init(rocker->internal_vlan_tbl);
  1898. spin_lock_init(&rocker->internal_vlan_tbl_lock);
  1899. hash_init(rocker->neigh_tbl);
  1900. spin_lock_init(&rocker->neigh_tbl_lock);
  1901. return 0;
  1902. }
  1903. static void rocker_free_tbls(struct rocker *rocker)
  1904. {
  1905. unsigned long flags;
  1906. struct rocker_flow_tbl_entry *flow_entry;
  1907. struct rocker_group_tbl_entry *group_entry;
  1908. struct rocker_fdb_tbl_entry *fdb_entry;
  1909. struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
  1910. struct rocker_neigh_tbl_entry *neigh_entry;
  1911. struct hlist_node *tmp;
  1912. int bkt;
  1913. spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
  1914. hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
  1915. hash_del(&flow_entry->entry);
  1916. spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
  1917. spin_lock_irqsave(&rocker->group_tbl_lock, flags);
  1918. hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
  1919. hash_del(&group_entry->entry);
  1920. spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
  1921. spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
  1922. hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
  1923. hash_del(&fdb_entry->entry);
  1924. spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
  1925. spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
  1926. hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
  1927. tmp, internal_vlan_entry, entry)
  1928. hash_del(&internal_vlan_entry->entry);
  1929. spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
  1930. spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
  1931. hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
  1932. hash_del(&neigh_entry->entry);
  1933. spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
  1934. }
  1935. static struct rocker_flow_tbl_entry *
  1936. rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
  1937. {
  1938. struct rocker_flow_tbl_entry *found;
  1939. size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
  1940. hash_for_each_possible(rocker->flow_tbl, found,
  1941. entry, match->key_crc32) {
  1942. if (memcmp(&found->key, &match->key, key_len) == 0)
  1943. return found;
  1944. }
  1945. return NULL;
  1946. }
  1947. static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
  1948. struct rocker_flow_tbl_entry *match,
  1949. bool nowait)
  1950. {
  1951. struct rocker *rocker = rocker_port->rocker;
  1952. struct rocker_flow_tbl_entry *found;
  1953. size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
  1954. unsigned long flags;
  1955. match->key_crc32 = crc32(~0, &match->key, key_len);
  1956. spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
  1957. found = rocker_flow_tbl_find(rocker, match);
  1958. if (found) {
  1959. match->cookie = found->cookie;
  1960. hash_del(&found->entry);
  1961. kfree(found);
  1962. found = match;
  1963. found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
  1964. } else {
  1965. found = match;
  1966. found->cookie = rocker->flow_tbl_next_cookie++;
  1967. found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
  1968. }
  1969. hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
  1970. spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
  1971. return rocker_cmd_exec(rocker, rocker_port,
  1972. rocker_cmd_flow_tbl_add,
  1973. found, NULL, NULL, nowait);
  1974. }
  1975. static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
  1976. struct rocker_flow_tbl_entry *match,
  1977. bool nowait)
  1978. {
  1979. struct rocker *rocker = rocker_port->rocker;
  1980. struct rocker_flow_tbl_entry *found;
  1981. size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
  1982. unsigned long flags;
  1983. int err = 0;
  1984. match->key_crc32 = crc32(~0, &match->key, key_len);
  1985. spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
  1986. found = rocker_flow_tbl_find(rocker, match);
  1987. if (found) {
  1988. hash_del(&found->entry);
  1989. found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
  1990. }
  1991. spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
  1992. kfree(match);
  1993. if (found) {
  1994. err = rocker_cmd_exec(rocker, rocker_port,
  1995. rocker_cmd_flow_tbl_del,
  1996. found, NULL, NULL, nowait);
  1997. kfree(found);
  1998. }
  1999. return err;
  2000. }
  2001. static gfp_t rocker_op_flags_gfp(int flags)
  2002. {
  2003. return flags & ROCKER_OP_FLAG_NOWAIT ? GFP_ATOMIC : GFP_KERNEL;
  2004. }
  2005. static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
  2006. int flags, struct rocker_flow_tbl_entry *entry)
  2007. {
  2008. bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
  2009. if (flags & ROCKER_OP_FLAG_REMOVE)
  2010. return rocker_flow_tbl_del(rocker_port, entry, nowait);
  2011. else
  2012. return rocker_flow_tbl_add(rocker_port, entry, nowait);
  2013. }
  2014. static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
  2015. int flags, u32 in_pport, u32 in_pport_mask,
  2016. enum rocker_of_dpa_table_id goto_tbl)
  2017. {
  2018. struct rocker_flow_tbl_entry *entry;
  2019. entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
  2020. if (!entry)
  2021. return -ENOMEM;
  2022. entry->key.priority = ROCKER_PRIORITY_IG_PORT;
  2023. entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
  2024. entry->key.ig_port.in_pport = in_pport;
  2025. entry->key.ig_port.in_pport_mask = in_pport_mask;
  2026. entry->key.ig_port.goto_tbl = goto_tbl;
  2027. return rocker_flow_tbl_do(rocker_port, flags, entry);
  2028. }
  2029. static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
  2030. int flags, u32 in_pport,
  2031. __be16 vlan_id, __be16 vlan_id_mask,
  2032. enum rocker_of_dpa_table_id goto_tbl,
  2033. bool untagged, __be16 new_vlan_id)
  2034. {
  2035. struct rocker_flow_tbl_entry *entry;
  2036. entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
  2037. if (!entry)
  2038. return -ENOMEM;
  2039. entry->key.priority = ROCKER_PRIORITY_VLAN;
  2040. entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
  2041. entry->key.vlan.in_pport = in_pport;
  2042. entry->key.vlan.vlan_id = vlan_id;
  2043. entry->key.vlan.vlan_id_mask = vlan_id_mask;
  2044. entry->key.vlan.goto_tbl = goto_tbl;
  2045. entry->key.vlan.untagged = untagged;
  2046. entry->key.vlan.new_vlan_id = new_vlan_id;
  2047. return rocker_flow_tbl_do(rocker_port, flags, entry);
  2048. }
  2049. static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
  2050. u32 in_pport, u32 in_pport_mask,
  2051. __be16 eth_type, const u8 *eth_dst,
  2052. const u8 *eth_dst_mask, __be16 vlan_id,
  2053. __be16 vlan_id_mask, bool copy_to_cpu,
  2054. int flags)
  2055. {
  2056. struct rocker_flow_tbl_entry *entry;
  2057. entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
  2058. if (!entry)
  2059. return -ENOMEM;
  2060. if (is_multicast_ether_addr(eth_dst)) {
  2061. entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
  2062. entry->key.term_mac.goto_tbl =
  2063. ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
  2064. } else {
  2065. entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
  2066. entry->key.term_mac.goto_tbl =
  2067. ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
  2068. }
  2069. entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
  2070. entry->key.term_mac.in_pport = in_pport;
  2071. entry->key.term_mac.in_pport_mask = in_pport_mask;
  2072. entry->key.term_mac.eth_type = eth_type;
  2073. ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
  2074. ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
  2075. entry->key.term_mac.vlan_id = vlan_id;
  2076. entry->key.term_mac.vlan_id_mask = vlan_id_mask;
  2077. entry->key.term_mac.copy_to_cpu = copy_to_cpu;
  2078. return rocker_flow_tbl_do(rocker_port, flags, entry);
  2079. }
  2080. static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
  2081. int flags,
  2082. const u8 *eth_dst, const u8 *eth_dst_mask,
  2083. __be16 vlan_id, u32 tunnel_id,
  2084. enum rocker_of_dpa_table_id goto_tbl,
  2085. u32 group_id, bool copy_to_cpu)
  2086. {
  2087. struct rocker_flow_tbl_entry *entry;
  2088. u32 priority;
  2089. bool vlan_bridging = !!vlan_id;
  2090. bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
  2091. bool wild = false;
  2092. entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
  2093. if (!entry)
  2094. return -ENOMEM;
  2095. entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
  2096. if (eth_dst) {
  2097. entry->key.bridge.has_eth_dst = 1;
  2098. ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
  2099. }
  2100. if (eth_dst_mask) {
  2101. entry->key.bridge.has_eth_dst_mask = 1;
  2102. ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
  2103. if (memcmp(eth_dst_mask, ff_mac, ETH_ALEN))
  2104. wild = true;
  2105. }
  2106. priority = ROCKER_PRIORITY_UNKNOWN;
  2107. if (vlan_bridging && dflt && wild)
  2108. priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
  2109. else if (vlan_bridging && dflt && !wild)
  2110. priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
  2111. else if (vlan_bridging && !dflt)
  2112. priority = ROCKER_PRIORITY_BRIDGING_VLAN;
  2113. else if (!vlan_bridging && dflt && wild)
  2114. priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
  2115. else if (!vlan_bridging && dflt && !wild)
  2116. priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
  2117. else if (!vlan_bridging && !dflt)
  2118. priority = ROCKER_PRIORITY_BRIDGING_TENANT;
  2119. entry->key.priority = priority;
  2120. entry->key.bridge.vlan_id = vlan_id;
  2121. entry->key.bridge.tunnel_id = tunnel_id;
  2122. entry->key.bridge.goto_tbl = goto_tbl;
  2123. entry->key.bridge.group_id = group_id;
  2124. entry->key.bridge.copy_to_cpu = copy_to_cpu;
  2125. return rocker_flow_tbl_do(rocker_port, flags, entry);
  2126. }
  2127. static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
  2128. __be16 eth_type, __be32 dst,
  2129. __be32 dst_mask, u32 priority,
  2130. enum rocker_of_dpa_table_id goto_tbl,
  2131. u32 group_id, int flags)
  2132. {
  2133. struct rocker_flow_tbl_entry *entry;
  2134. entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
  2135. if (!entry)
  2136. return -ENOMEM;
  2137. entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
  2138. entry->key.priority = priority;
  2139. entry->key.ucast_routing.eth_type = eth_type;
  2140. entry->key.ucast_routing.dst4 = dst;
  2141. entry->key.ucast_routing.dst4_mask = dst_mask;
  2142. entry->key.ucast_routing.goto_tbl = goto_tbl;
  2143. entry->key.ucast_routing.group_id = group_id;
  2144. entry->key_len = offsetof(struct rocker_flow_tbl_key,
  2145. ucast_routing.group_id);
  2146. return rocker_flow_tbl_do(rocker_port, flags, entry);
  2147. }
  2148. static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
  2149. int flags, u32 in_pport,
  2150. u32 in_pport_mask,
  2151. const u8 *eth_src, const u8 *eth_src_mask,
  2152. const u8 *eth_dst, const u8 *eth_dst_mask,
  2153. __be16 eth_type,
  2154. __be16 vlan_id, __be16 vlan_id_mask,
  2155. u8 ip_proto, u8 ip_proto_mask,
  2156. u8 ip_tos, u8 ip_tos_mask,
  2157. u32 group_id)
  2158. {
  2159. u32 priority;
  2160. struct rocker_flow_tbl_entry *entry;
  2161. entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
  2162. if (!entry)
  2163. return -ENOMEM;
  2164. priority = ROCKER_PRIORITY_ACL_NORMAL;
  2165. if (eth_dst && eth_dst_mask) {
  2166. if (memcmp(eth_dst_mask, mcast_mac, ETH_ALEN) == 0)
  2167. priority = ROCKER_PRIORITY_ACL_DFLT;
  2168. else if (is_link_local_ether_addr(eth_dst))
  2169. priority = ROCKER_PRIORITY_ACL_CTRL;
  2170. }
  2171. entry->key.priority = priority;
  2172. entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
  2173. entry->key.acl.in_pport = in_pport;
  2174. entry->key.acl.in_pport_mask = in_pport_mask;
  2175. if (eth_src)
  2176. ether_addr_copy(entry->key.acl.eth_src, eth_src);
  2177. if (eth_src_mask)
  2178. ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
  2179. if (eth_dst)
  2180. ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
  2181. if (eth_dst_mask)
  2182. ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
  2183. entry->key.acl.eth_type = eth_type;
  2184. entry->key.acl.vlan_id = vlan_id;
  2185. entry->key.acl.vlan_id_mask = vlan_id_mask;
  2186. entry->key.acl.ip_proto = ip_proto;
  2187. entry->key.acl.ip_proto_mask = ip_proto_mask;
  2188. entry->key.acl.ip_tos = ip_tos;
  2189. entry->key.acl.ip_tos_mask = ip_tos_mask;
  2190. entry->key.acl.group_id = group_id;
  2191. return rocker_flow_tbl_do(rocker_port, flags, entry);
  2192. }
  2193. static struct rocker_group_tbl_entry *
  2194. rocker_group_tbl_find(struct rocker *rocker,
  2195. struct rocker_group_tbl_entry *match)
  2196. {
  2197. struct rocker_group_tbl_entry *found;
  2198. hash_for_each_possible(rocker->group_tbl, found,
  2199. entry, match->group_id) {
  2200. if (found->group_id == match->group_id)
  2201. return found;
  2202. }
  2203. return NULL;
  2204. }
  2205. static void rocker_group_tbl_entry_free(struct rocker_group_tbl_entry *entry)
  2206. {
  2207. switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
  2208. case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
  2209. case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
  2210. kfree(entry->group_ids);
  2211. break;
  2212. default:
  2213. break;
  2214. }
  2215. kfree(entry);
  2216. }
  2217. static int rocker_group_tbl_add(struct rocker_port *rocker_port,
  2218. struct rocker_group_tbl_entry *match,
  2219. bool nowait)
  2220. {
  2221. struct rocker *rocker = rocker_port->rocker;
  2222. struct rocker_group_tbl_entry *found;
  2223. unsigned long flags;
  2224. spin_lock_irqsave(&rocker->group_tbl_lock, flags);
  2225. found = rocker_group_tbl_find(rocker, match);
  2226. if (found) {
  2227. hash_del(&found->entry);
  2228. rocker_group_tbl_entry_free(found);
  2229. found = match;
  2230. found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
  2231. } else {
  2232. found = match;
  2233. found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
  2234. }
  2235. hash_add(rocker->group_tbl, &found->entry, found->group_id);
  2236. spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
  2237. return rocker_cmd_exec(rocker, rocker_port,
  2238. rocker_cmd_group_tbl_add,
  2239. found, NULL, NULL, nowait);
  2240. }
  2241. static int rocker_group_tbl_del(struct rocker_port *rocker_port,
  2242. struct rocker_group_tbl_entry *match,
  2243. bool nowait)
  2244. {
  2245. struct rocker *rocker = rocker_port->rocker;
  2246. struct rocker_group_tbl_entry *found;
  2247. unsigned long flags;
  2248. int err = 0;
  2249. spin_lock_irqsave(&rocker->group_tbl_lock, flags);
  2250. found = rocker_group_tbl_find(rocker, match);
  2251. if (found) {
  2252. hash_del(&found->entry);
  2253. found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
  2254. }
  2255. spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
  2256. rocker_group_tbl_entry_free(match);
  2257. if (found) {
  2258. err = rocker_cmd_exec(rocker, rocker_port,
  2259. rocker_cmd_group_tbl_del,
  2260. found, NULL, NULL, nowait);
  2261. rocker_group_tbl_entry_free(found);
  2262. }
  2263. return err;
  2264. }
  2265. static int rocker_group_tbl_do(struct rocker_port *rocker_port,
  2266. int flags, struct rocker_group_tbl_entry *entry)
  2267. {
  2268. bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
  2269. if (flags & ROCKER_OP_FLAG_REMOVE)
  2270. return rocker_group_tbl_del(rocker_port, entry, nowait);
  2271. else
  2272. return rocker_group_tbl_add(rocker_port, entry, nowait);
  2273. }
  2274. static int rocker_group_l2_interface(struct rocker_port *rocker_port,
  2275. int flags, __be16 vlan_id,
  2276. u32 out_pport, int pop_vlan)
  2277. {
  2278. struct rocker_group_tbl_entry *entry;
  2279. entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
  2280. if (!entry)
  2281. return -ENOMEM;
  2282. entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
  2283. entry->l2_interface.pop_vlan = pop_vlan;
  2284. return rocker_group_tbl_do(rocker_port, flags, entry);
  2285. }
  2286. static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
  2287. int flags, u8 group_count,
  2288. u32 *group_ids, u32 group_id)
  2289. {
  2290. struct rocker_group_tbl_entry *entry;
  2291. entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
  2292. if (!entry)
  2293. return -ENOMEM;
  2294. entry->group_id = group_id;
  2295. entry->group_count = group_count;
  2296. entry->group_ids = kcalloc(group_count, sizeof(u32),
  2297. rocker_op_flags_gfp(flags));
  2298. if (!entry->group_ids) {
  2299. kfree(entry);
  2300. return -ENOMEM;
  2301. }
  2302. memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
  2303. return rocker_group_tbl_do(rocker_port, flags, entry);
  2304. }
  2305. static int rocker_group_l2_flood(struct rocker_port *rocker_port,
  2306. int flags, __be16 vlan_id,
  2307. u8 group_count, u32 *group_ids,
  2308. u32 group_id)
  2309. {
  2310. return rocker_group_l2_fan_out(rocker_port, flags,
  2311. group_count, group_ids,
  2312. group_id);
  2313. }
  2314. static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
  2315. int flags, u32 index, u8 *src_mac,
  2316. u8 *dst_mac, __be16 vlan_id,
  2317. bool ttl_check, u32 pport)
  2318. {
  2319. struct rocker_group_tbl_entry *entry;
  2320. entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
  2321. if (!entry)
  2322. return -ENOMEM;
  2323. entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
  2324. if (src_mac)
  2325. ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
  2326. if (dst_mac)
  2327. ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
  2328. entry->l3_unicast.vlan_id = vlan_id;
  2329. entry->l3_unicast.ttl_check = ttl_check;
  2330. entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
  2331. return rocker_group_tbl_do(rocker_port, flags, entry);
  2332. }
  2333. static struct rocker_neigh_tbl_entry *
  2334. rocker_neigh_tbl_find(struct rocker *rocker, __be32 ip_addr)
  2335. {
  2336. struct rocker_neigh_tbl_entry *found;
  2337. hash_for_each_possible(rocker->neigh_tbl, found,
  2338. entry, be32_to_cpu(ip_addr))
  2339. if (found->ip_addr == ip_addr)
  2340. return found;
  2341. return NULL;
  2342. }
  2343. static void _rocker_neigh_add(struct rocker *rocker,
  2344. struct rocker_neigh_tbl_entry *entry)
  2345. {
  2346. entry->index = rocker->neigh_tbl_next_index++;
  2347. entry->ref_count++;
  2348. hash_add(rocker->neigh_tbl, &entry->entry,
  2349. be32_to_cpu(entry->ip_addr));
  2350. }
  2351. static void _rocker_neigh_del(struct rocker *rocker,
  2352. struct rocker_neigh_tbl_entry *entry)
  2353. {
  2354. if (--entry->ref_count == 0) {
  2355. hash_del(&entry->entry);
  2356. kfree(entry);
  2357. }
  2358. }
  2359. static void _rocker_neigh_update(struct rocker *rocker,
  2360. struct rocker_neigh_tbl_entry *entry,
  2361. u8 *eth_dst, bool ttl_check)
  2362. {
  2363. if (eth_dst) {
  2364. ether_addr_copy(entry->eth_dst, eth_dst);
  2365. entry->ttl_check = ttl_check;
  2366. } else {
  2367. entry->ref_count++;
  2368. }
  2369. }
  2370. static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
  2371. int flags, __be32 ip_addr, u8 *eth_dst)
  2372. {
  2373. struct rocker *rocker = rocker_port->rocker;
  2374. struct rocker_neigh_tbl_entry *entry;
  2375. struct rocker_neigh_tbl_entry *found;
  2376. unsigned long lock_flags;
  2377. __be16 eth_type = htons(ETH_P_IP);
  2378. enum rocker_of_dpa_table_id goto_tbl =
  2379. ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
  2380. u32 group_id;
  2381. u32 priority = 0;
  2382. bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
  2383. bool updating;
  2384. bool removing;
  2385. int err = 0;
  2386. entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
  2387. if (!entry)
  2388. return -ENOMEM;
  2389. spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
  2390. found = rocker_neigh_tbl_find(rocker, ip_addr);
  2391. updating = found && adding;
  2392. removing = found && !adding;
  2393. adding = !found && adding;
  2394. if (adding) {
  2395. entry->ip_addr = ip_addr;
  2396. entry->dev = rocker_port->dev;
  2397. ether_addr_copy(entry->eth_dst, eth_dst);
  2398. entry->ttl_check = true;
  2399. _rocker_neigh_add(rocker, entry);
  2400. } else if (removing) {
  2401. memcpy(entry, found, sizeof(*entry));
  2402. _rocker_neigh_del(rocker, found);
  2403. } else if (updating) {
  2404. _rocker_neigh_update(rocker, found, eth_dst, true);
  2405. memcpy(entry, found, sizeof(*entry));
  2406. } else {
  2407. err = -ENOENT;
  2408. }
  2409. spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
  2410. if (err)
  2411. goto err_out;
  2412. /* For each active neighbor, we have an L3 unicast group and
  2413. * a /32 route to the neighbor, which uses the L3 unicast
  2414. * group. The L3 unicast group can also be referred to by
  2415. * other routes' nexthops.
  2416. */
  2417. err = rocker_group_l3_unicast(rocker_port, flags,
  2418. entry->index,
  2419. rocker_port->dev->dev_addr,
  2420. entry->eth_dst,
  2421. rocker_port->internal_vlan_id,
  2422. entry->ttl_check,
  2423. rocker_port->pport);
  2424. if (err) {
  2425. netdev_err(rocker_port->dev,
  2426. "Error (%d) L3 unicast group index %d\n",
  2427. err, entry->index);
  2428. goto err_out;
  2429. }
  2430. if (adding || removing) {
  2431. group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
  2432. err = rocker_flow_tbl_ucast4_routing(rocker_port,
  2433. eth_type, ip_addr,
  2434. inet_make_mask(32),
  2435. priority, goto_tbl,
  2436. group_id, flags);
  2437. if (err)
  2438. netdev_err(rocker_port->dev,
  2439. "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
  2440. err, &entry->ip_addr, group_id);
  2441. }
  2442. err_out:
  2443. if (!adding)
  2444. kfree(entry);
  2445. return err;
  2446. }
  2447. static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
  2448. __be32 ip_addr)
  2449. {
  2450. struct net_device *dev = rocker_port->dev;
  2451. struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
  2452. int err = 0;
  2453. if (!n)
  2454. n = neigh_create(&arp_tbl, &ip_addr, dev);
  2455. if (!n)
  2456. return -ENOMEM;
  2457. /* If the neigh is already resolved, then go ahead and
  2458. * install the entry, otherwise start the ARP process to
  2459. * resolve the neigh.
  2460. */
  2461. if (n->nud_state & NUD_VALID)
  2462. err = rocker_port_ipv4_neigh(rocker_port, 0, ip_addr, n->ha);
  2463. else
  2464. neigh_event_send(n, NULL);
  2465. return err;
  2466. }
  2467. static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
  2468. __be32 ip_addr, u32 *index)
  2469. {
  2470. struct rocker *rocker = rocker_port->rocker;
  2471. struct rocker_neigh_tbl_entry *entry;
  2472. struct rocker_neigh_tbl_entry *found;
  2473. unsigned long lock_flags;
  2474. bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
  2475. bool updating;
  2476. bool removing;
  2477. bool resolved = true;
  2478. int err = 0;
  2479. entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
  2480. if (!entry)
  2481. return -ENOMEM;
  2482. spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
  2483. found = rocker_neigh_tbl_find(rocker, ip_addr);
  2484. if (found)
  2485. *index = found->index;
  2486. updating = found && adding;
  2487. removing = found && !adding;
  2488. adding = !found && adding;
  2489. if (adding) {
  2490. entry->ip_addr = ip_addr;
  2491. entry->dev = rocker_port->dev;
  2492. _rocker_neigh_add(rocker, entry);
  2493. *index = entry->index;
  2494. resolved = false;
  2495. } else if (removing) {
  2496. _rocker_neigh_del(rocker, found);
  2497. } else if (updating) {
  2498. _rocker_neigh_update(rocker, found, NULL, false);
  2499. resolved = !is_zero_ether_addr(found->eth_dst);
  2500. } else {
  2501. err = -ENOENT;
  2502. }
  2503. spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
  2504. if (!adding)
  2505. kfree(entry);
  2506. if (err)
  2507. return err;
  2508. /* Resolved means neigh ip_addr is resolved to neigh mac. */
  2509. if (!resolved)
  2510. err = rocker_port_ipv4_resolve(rocker_port, ip_addr);
  2511. return err;
  2512. }
  2513. static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
  2514. int flags, __be16 vlan_id)
  2515. {
  2516. struct rocker_port *p;
  2517. struct rocker *rocker = rocker_port->rocker;
  2518. u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
  2519. u32 group_ids[ROCKER_FP_PORTS_MAX];
  2520. u8 group_count = 0;
  2521. int err;
  2522. int i;
  2523. /* Adjust the flood group for this VLAN. The flood group
  2524. * references an L2 interface group for each port in this
  2525. * VLAN.
  2526. */
  2527. for (i = 0; i < rocker->port_count; i++) {
  2528. p = rocker->ports[i];
  2529. if (!rocker_port_is_bridged(p))
  2530. continue;
  2531. if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
  2532. group_ids[group_count++] =
  2533. ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
  2534. }
  2535. }
  2536. /* If there are no bridged ports in this VLAN, we're done */
  2537. if (group_count == 0)
  2538. return 0;
  2539. err = rocker_group_l2_flood(rocker_port, flags, vlan_id,
  2540. group_count, group_ids,
  2541. group_id);
  2542. if (err)
  2543. netdev_err(rocker_port->dev,
  2544. "Error (%d) port VLAN l2 flood group\n", err);
  2545. return err;
  2546. }
  2547. static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
  2548. int flags, __be16 vlan_id,
  2549. bool pop_vlan)
  2550. {
  2551. struct rocker *rocker = rocker_port->rocker;
  2552. struct rocker_port *p;
  2553. bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
  2554. u32 out_pport;
  2555. int ref = 0;
  2556. int err;
  2557. int i;
  2558. /* An L2 interface group for this port in this VLAN, but
  2559. * only when port STP state is LEARNING|FORWARDING.
  2560. */
  2561. if (rocker_port->stp_state == BR_STATE_LEARNING ||
  2562. rocker_port->stp_state == BR_STATE_FORWARDING) {
  2563. out_pport = rocker_port->pport;
  2564. err = rocker_group_l2_interface(rocker_port, flags,
  2565. vlan_id, out_pport,
  2566. pop_vlan);
  2567. if (err) {
  2568. netdev_err(rocker_port->dev,
  2569. "Error (%d) port VLAN l2 group for pport %d\n",
  2570. err, out_pport);
  2571. return err;
  2572. }
  2573. }
  2574. /* An L2 interface group for this VLAN to CPU port.
  2575. * Add when first port joins this VLAN and destroy when
  2576. * last port leaves this VLAN.
  2577. */
  2578. for (i = 0; i < rocker->port_count; i++) {
  2579. p = rocker->ports[i];
  2580. if (test_bit(ntohs(vlan_id), p->vlan_bitmap))
  2581. ref++;
  2582. }
  2583. if ((!adding || ref != 1) && (adding || ref != 0))
  2584. return 0;
  2585. out_pport = 0;
  2586. err = rocker_group_l2_interface(rocker_port, flags,
  2587. vlan_id, out_pport,
  2588. pop_vlan);
  2589. if (err) {
  2590. netdev_err(rocker_port->dev,
  2591. "Error (%d) port VLAN l2 group for CPU port\n", err);
  2592. return err;
  2593. }
  2594. return 0;
  2595. }
  2596. static struct rocker_ctrl {
  2597. const u8 *eth_dst;
  2598. const u8 *eth_dst_mask;
  2599. __be16 eth_type;
  2600. bool acl;
  2601. bool bridge;
  2602. bool term;
  2603. bool copy_to_cpu;
  2604. } rocker_ctrls[] = {
  2605. [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
  2606. /* pass link local multicast pkts up to CPU for filtering */
  2607. .eth_dst = ll_mac,
  2608. .eth_dst_mask = ll_mask,
  2609. .acl = true,
  2610. },
  2611. [ROCKER_CTRL_LOCAL_ARP] = {
  2612. /* pass local ARP pkts up to CPU */
  2613. .eth_dst = zero_mac,
  2614. .eth_dst_mask = zero_mac,
  2615. .eth_type = htons(ETH_P_ARP),
  2616. .acl = true,
  2617. },
  2618. [ROCKER_CTRL_IPV4_MCAST] = {
  2619. /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
  2620. .eth_dst = ipv4_mcast,
  2621. .eth_dst_mask = ipv4_mask,
  2622. .eth_type = htons(ETH_P_IP),
  2623. .term = true,
  2624. .copy_to_cpu = true,
  2625. },
  2626. [ROCKER_CTRL_IPV6_MCAST] = {
  2627. /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
  2628. .eth_dst = ipv6_mcast,
  2629. .eth_dst_mask = ipv6_mask,
  2630. .eth_type = htons(ETH_P_IPV6),
  2631. .term = true,
  2632. .copy_to_cpu = true,
  2633. },
  2634. [ROCKER_CTRL_DFLT_BRIDGING] = {
  2635. /* flood any pkts on vlan */
  2636. .bridge = true,
  2637. .copy_to_cpu = true,
  2638. },
  2639. };
  2640. static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
  2641. int flags, struct rocker_ctrl *ctrl,
  2642. __be16 vlan_id)
  2643. {
  2644. u32 in_pport = rocker_port->pport;
  2645. u32 in_pport_mask = 0xffffffff;
  2646. u32 out_pport = 0;
  2647. u8 *eth_src = NULL;
  2648. u8 *eth_src_mask = NULL;
  2649. __be16 vlan_id_mask = htons(0xffff);
  2650. u8 ip_proto = 0;
  2651. u8 ip_proto_mask = 0;
  2652. u8 ip_tos = 0;
  2653. u8 ip_tos_mask = 0;
  2654. u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
  2655. int err;
  2656. err = rocker_flow_tbl_acl(rocker_port, flags,
  2657. in_pport, in_pport_mask,
  2658. eth_src, eth_src_mask,
  2659. ctrl->eth_dst, ctrl->eth_dst_mask,
  2660. ctrl->eth_type,
  2661. vlan_id, vlan_id_mask,
  2662. ip_proto, ip_proto_mask,
  2663. ip_tos, ip_tos_mask,
  2664. group_id);
  2665. if (err)
  2666. netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
  2667. return err;
  2668. }
  2669. static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
  2670. int flags, struct rocker_ctrl *ctrl,
  2671. __be16 vlan_id)
  2672. {
  2673. enum rocker_of_dpa_table_id goto_tbl =
  2674. ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
  2675. u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
  2676. u32 tunnel_id = 0;
  2677. int err;
  2678. if (!rocker_port_is_bridged(rocker_port))
  2679. return 0;
  2680. err = rocker_flow_tbl_bridge(rocker_port, flags,
  2681. ctrl->eth_dst, ctrl->eth_dst_mask,
  2682. vlan_id, tunnel_id,
  2683. goto_tbl, group_id, ctrl->copy_to_cpu);
  2684. if (err)
  2685. netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
  2686. return err;
  2687. }
  2688. static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
  2689. int flags, struct rocker_ctrl *ctrl,
  2690. __be16 vlan_id)
  2691. {
  2692. u32 in_pport_mask = 0xffffffff;
  2693. __be16 vlan_id_mask = htons(0xffff);
  2694. int err;
  2695. if (ntohs(vlan_id) == 0)
  2696. vlan_id = rocker_port->internal_vlan_id;
  2697. err = rocker_flow_tbl_term_mac(rocker_port,
  2698. rocker_port->pport, in_pport_mask,
  2699. ctrl->eth_type, ctrl->eth_dst,
  2700. ctrl->eth_dst_mask, vlan_id,
  2701. vlan_id_mask, ctrl->copy_to_cpu,
  2702. flags);
  2703. if (err)
  2704. netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
  2705. return err;
  2706. }
  2707. static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, int flags,
  2708. struct rocker_ctrl *ctrl, __be16 vlan_id)
  2709. {
  2710. if (ctrl->acl)
  2711. return rocker_port_ctrl_vlan_acl(rocker_port, flags,
  2712. ctrl, vlan_id);
  2713. if (ctrl->bridge)
  2714. return rocker_port_ctrl_vlan_bridge(rocker_port, flags,
  2715. ctrl, vlan_id);
  2716. if (ctrl->term)
  2717. return rocker_port_ctrl_vlan_term(rocker_port, flags,
  2718. ctrl, vlan_id);
  2719. return -EOPNOTSUPP;
  2720. }
  2721. static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
  2722. int flags, __be16 vlan_id)
  2723. {
  2724. int err = 0;
  2725. int i;
  2726. for (i = 0; i < ROCKER_CTRL_MAX; i++) {
  2727. if (rocker_port->ctrls[i]) {
  2728. err = rocker_port_ctrl_vlan(rocker_port, flags,
  2729. &rocker_ctrls[i], vlan_id);
  2730. if (err)
  2731. return err;
  2732. }
  2733. }
  2734. return err;
  2735. }
  2736. static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
  2737. struct rocker_ctrl *ctrl)
  2738. {
  2739. u16 vid;
  2740. int err = 0;
  2741. for (vid = 1; vid < VLAN_N_VID; vid++) {
  2742. if (!test_bit(vid, rocker_port->vlan_bitmap))
  2743. continue;
  2744. err = rocker_port_ctrl_vlan(rocker_port, flags,
  2745. ctrl, htons(vid));
  2746. if (err)
  2747. break;
  2748. }
  2749. return err;
  2750. }
  2751. static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
  2752. u16 vid)
  2753. {
  2754. enum rocker_of_dpa_table_id goto_tbl =
  2755. ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
  2756. u32 in_pport = rocker_port->pport;
  2757. __be16 vlan_id = htons(vid);
  2758. __be16 vlan_id_mask = htons(0xffff);
  2759. __be16 internal_vlan_id;
  2760. bool untagged;
  2761. bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
  2762. int err;
  2763. internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
  2764. if (adding && test_and_set_bit(ntohs(internal_vlan_id),
  2765. rocker_port->vlan_bitmap))
  2766. return 0; /* already added */
  2767. else if (!adding && !test_and_clear_bit(ntohs(internal_vlan_id),
  2768. rocker_port->vlan_bitmap))
  2769. return 0; /* already removed */
  2770. if (adding) {
  2771. err = rocker_port_ctrl_vlan_add(rocker_port, flags,
  2772. internal_vlan_id);
  2773. if (err) {
  2774. netdev_err(rocker_port->dev,
  2775. "Error (%d) port ctrl vlan add\n", err);
  2776. return err;
  2777. }
  2778. }
  2779. err = rocker_port_vlan_l2_groups(rocker_port, flags,
  2780. internal_vlan_id, untagged);
  2781. if (err) {
  2782. netdev_err(rocker_port->dev,
  2783. "Error (%d) port VLAN l2 groups\n", err);
  2784. return err;
  2785. }
  2786. err = rocker_port_vlan_flood_group(rocker_port, flags,
  2787. internal_vlan_id);
  2788. if (err) {
  2789. netdev_err(rocker_port->dev,
  2790. "Error (%d) port VLAN l2 flood group\n", err);
  2791. return err;
  2792. }
  2793. err = rocker_flow_tbl_vlan(rocker_port, flags,
  2794. in_pport, vlan_id, vlan_id_mask,
  2795. goto_tbl, untagged, internal_vlan_id);
  2796. if (err)
  2797. netdev_err(rocker_port->dev,
  2798. "Error (%d) port VLAN table\n", err);
  2799. return err;
  2800. }
  2801. static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
  2802. {
  2803. enum rocker_of_dpa_table_id goto_tbl;
  2804. u32 in_pport;
  2805. u32 in_pport_mask;
  2806. int err;
  2807. /* Normal Ethernet Frames. Matches pkts from any local physical
  2808. * ports. Goto VLAN tbl.
  2809. */
  2810. in_pport = 0;
  2811. in_pport_mask = 0xffff0000;
  2812. goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
  2813. err = rocker_flow_tbl_ig_port(rocker_port, flags,
  2814. in_pport, in_pport_mask,
  2815. goto_tbl);
  2816. if (err)
  2817. netdev_err(rocker_port->dev,
  2818. "Error (%d) ingress port table entry\n", err);
  2819. return err;
  2820. }
  2821. struct rocker_fdb_learn_work {
  2822. struct work_struct work;
  2823. struct net_device *dev;
  2824. int flags;
  2825. u8 addr[ETH_ALEN];
  2826. u16 vid;
  2827. };
  2828. static void rocker_port_fdb_learn_work(struct work_struct *work)
  2829. {
  2830. struct rocker_fdb_learn_work *lw =
  2831. container_of(work, struct rocker_fdb_learn_work, work);
  2832. bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
  2833. bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
  2834. struct netdev_switch_notifier_fdb_info info;
  2835. info.addr = lw->addr;
  2836. info.vid = lw->vid;
  2837. if (learned && removing)
  2838. call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_DEL,
  2839. lw->dev, &info.info);
  2840. else if (learned && !removing)
  2841. call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_ADD,
  2842. lw->dev, &info.info);
  2843. kfree(work);
  2844. }
  2845. static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
  2846. int flags, const u8 *addr, __be16 vlan_id)
  2847. {
  2848. struct rocker_fdb_learn_work *lw;
  2849. enum rocker_of_dpa_table_id goto_tbl =
  2850. ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
  2851. u32 out_pport = rocker_port->pport;
  2852. u32 tunnel_id = 0;
  2853. u32 group_id = ROCKER_GROUP_NONE;
  2854. bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
  2855. bool copy_to_cpu = false;
  2856. int err;
  2857. if (rocker_port_is_bridged(rocker_port))
  2858. group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
  2859. if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
  2860. err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL,
  2861. vlan_id, tunnel_id, goto_tbl,
  2862. group_id, copy_to_cpu);
  2863. if (err)
  2864. return err;
  2865. }
  2866. if (!syncing)
  2867. return 0;
  2868. if (!rocker_port_is_bridged(rocker_port))
  2869. return 0;
  2870. lw = kmalloc(sizeof(*lw), rocker_op_flags_gfp(flags));
  2871. if (!lw)
  2872. return -ENOMEM;
  2873. INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
  2874. lw->dev = rocker_port->dev;
  2875. lw->flags = flags;
  2876. ether_addr_copy(lw->addr, addr);
  2877. lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
  2878. schedule_work(&lw->work);
  2879. return 0;
  2880. }
  2881. static struct rocker_fdb_tbl_entry *
  2882. rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match)
  2883. {
  2884. struct rocker_fdb_tbl_entry *found;
  2885. hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
  2886. if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
  2887. return found;
  2888. return NULL;
  2889. }
  2890. static int rocker_port_fdb(struct rocker_port *rocker_port,
  2891. const unsigned char *addr,
  2892. __be16 vlan_id, int flags)
  2893. {
  2894. struct rocker *rocker = rocker_port->rocker;
  2895. struct rocker_fdb_tbl_entry *fdb;
  2896. struct rocker_fdb_tbl_entry *found;
  2897. bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
  2898. unsigned long lock_flags;
  2899. fdb = kzalloc(sizeof(*fdb), rocker_op_flags_gfp(flags));
  2900. if (!fdb)
  2901. return -ENOMEM;
  2902. fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
  2903. fdb->key.pport = rocker_port->pport;
  2904. ether_addr_copy(fdb->key.addr, addr);
  2905. fdb->key.vlan_id = vlan_id;
  2906. fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
  2907. spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
  2908. found = rocker_fdb_tbl_find(rocker, fdb);
  2909. if (removing && found) {
  2910. kfree(fdb);
  2911. hash_del(&found->entry);
  2912. } else if (!removing && !found) {
  2913. hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
  2914. }
  2915. spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
  2916. /* Check if adding and already exists, or removing and can't find */
  2917. if (!found != !removing) {
  2918. kfree(fdb);
  2919. if (!found && removing)
  2920. return 0;
  2921. /* Refreshing existing to update aging timers */
  2922. flags |= ROCKER_OP_FLAG_REFRESH;
  2923. }
  2924. return rocker_port_fdb_learn(rocker_port, flags, addr, vlan_id);
  2925. }
  2926. static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
  2927. {
  2928. struct rocker *rocker = rocker_port->rocker;
  2929. struct rocker_fdb_tbl_entry *found;
  2930. unsigned long lock_flags;
  2931. int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
  2932. struct hlist_node *tmp;
  2933. int bkt;
  2934. int err = 0;
  2935. if (rocker_port->stp_state == BR_STATE_LEARNING ||
  2936. rocker_port->stp_state == BR_STATE_FORWARDING)
  2937. return 0;
  2938. spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
  2939. hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
  2940. if (found->key.pport != rocker_port->pport)
  2941. continue;
  2942. if (!found->learned)
  2943. continue;
  2944. err = rocker_port_fdb_learn(rocker_port, flags,
  2945. found->key.addr,
  2946. found->key.vlan_id);
  2947. if (err)
  2948. goto err_out;
  2949. hash_del(&found->entry);
  2950. }
  2951. err_out:
  2952. spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
  2953. return err;
  2954. }
  2955. static int rocker_port_router_mac(struct rocker_port *rocker_port,
  2956. int flags, __be16 vlan_id)
  2957. {
  2958. u32 in_pport_mask = 0xffffffff;
  2959. __be16 eth_type;
  2960. const u8 *dst_mac_mask = ff_mac;
  2961. __be16 vlan_id_mask = htons(0xffff);
  2962. bool copy_to_cpu = false;
  2963. int err;
  2964. if (ntohs(vlan_id) == 0)
  2965. vlan_id = rocker_port->internal_vlan_id;
  2966. eth_type = htons(ETH_P_IP);
  2967. err = rocker_flow_tbl_term_mac(rocker_port,
  2968. rocker_port->pport, in_pport_mask,
  2969. eth_type, rocker_port->dev->dev_addr,
  2970. dst_mac_mask, vlan_id, vlan_id_mask,
  2971. copy_to_cpu, flags);
  2972. if (err)
  2973. return err;
  2974. eth_type = htons(ETH_P_IPV6);
  2975. err = rocker_flow_tbl_term_mac(rocker_port,
  2976. rocker_port->pport, in_pport_mask,
  2977. eth_type, rocker_port->dev->dev_addr,
  2978. dst_mac_mask, vlan_id, vlan_id_mask,
  2979. copy_to_cpu, flags);
  2980. return err;
  2981. }
  2982. static int rocker_port_fwding(struct rocker_port *rocker_port)
  2983. {
  2984. bool pop_vlan;
  2985. u32 out_pport;
  2986. __be16 vlan_id;
  2987. u16 vid;
  2988. int flags = ROCKER_OP_FLAG_NOWAIT;
  2989. int err;
  2990. /* Port will be forwarding-enabled if its STP state is LEARNING
  2991. * or FORWARDING. Traffic from CPU can still egress, regardless of
  2992. * port STP state. Use L2 interface group on port VLANs as a way
  2993. * to toggle port forwarding: if forwarding is disabled, L2
  2994. * interface group will not exist.
  2995. */
  2996. if (rocker_port->stp_state != BR_STATE_LEARNING &&
  2997. rocker_port->stp_state != BR_STATE_FORWARDING)
  2998. flags |= ROCKER_OP_FLAG_REMOVE;
  2999. out_pport = rocker_port->pport;
  3000. for (vid = 1; vid < VLAN_N_VID; vid++) {
  3001. if (!test_bit(vid, rocker_port->vlan_bitmap))
  3002. continue;
  3003. vlan_id = htons(vid);
  3004. pop_vlan = rocker_vlan_id_is_internal(vlan_id);
  3005. err = rocker_group_l2_interface(rocker_port, flags,
  3006. vlan_id, out_pport,
  3007. pop_vlan);
  3008. if (err) {
  3009. netdev_err(rocker_port->dev,
  3010. "Error (%d) port VLAN l2 group for pport %d\n",
  3011. err, out_pport);
  3012. return err;
  3013. }
  3014. }
  3015. return 0;
  3016. }
  3017. static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state)
  3018. {
  3019. bool want[ROCKER_CTRL_MAX] = { 0, };
  3020. int flags;
  3021. int err;
  3022. int i;
  3023. if (rocker_port->stp_state == state)
  3024. return 0;
  3025. rocker_port->stp_state = state;
  3026. switch (state) {
  3027. case BR_STATE_DISABLED:
  3028. /* port is completely disabled */
  3029. break;
  3030. case BR_STATE_LISTENING:
  3031. case BR_STATE_BLOCKING:
  3032. want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
  3033. break;
  3034. case BR_STATE_LEARNING:
  3035. case BR_STATE_FORWARDING:
  3036. want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
  3037. want[ROCKER_CTRL_IPV4_MCAST] = true;
  3038. want[ROCKER_CTRL_IPV6_MCAST] = true;
  3039. if (rocker_port_is_bridged(rocker_port))
  3040. want[ROCKER_CTRL_DFLT_BRIDGING] = true;
  3041. else
  3042. want[ROCKER_CTRL_LOCAL_ARP] = true;
  3043. break;
  3044. }
  3045. for (i = 0; i < ROCKER_CTRL_MAX; i++) {
  3046. if (want[i] != rocker_port->ctrls[i]) {
  3047. flags = ROCKER_OP_FLAG_NOWAIT |
  3048. (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
  3049. err = rocker_port_ctrl(rocker_port, flags,
  3050. &rocker_ctrls[i]);
  3051. if (err)
  3052. return err;
  3053. rocker_port->ctrls[i] = want[i];
  3054. }
  3055. }
  3056. err = rocker_port_fdb_flush(rocker_port);
  3057. if (err)
  3058. return err;
  3059. return rocker_port_fwding(rocker_port);
  3060. }
  3061. static int rocker_port_fwd_enable(struct rocker_port *rocker_port)
  3062. {
  3063. if (rocker_port_is_bridged(rocker_port))
  3064. /* bridge STP will enable port */
  3065. return 0;
  3066. /* port is not bridged, so simulate going to FORWARDING state */
  3067. return rocker_port_stp_update(rocker_port, BR_STATE_FORWARDING);
  3068. }
  3069. static int rocker_port_fwd_disable(struct rocker_port *rocker_port)
  3070. {
  3071. if (rocker_port_is_bridged(rocker_port))
  3072. /* bridge STP will disable port */
  3073. return 0;
  3074. /* port is not bridged, so simulate going to DISABLED state */
  3075. return rocker_port_stp_update(rocker_port, BR_STATE_DISABLED);
  3076. }
  3077. static struct rocker_internal_vlan_tbl_entry *
  3078. rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex)
  3079. {
  3080. struct rocker_internal_vlan_tbl_entry *found;
  3081. hash_for_each_possible(rocker->internal_vlan_tbl, found,
  3082. entry, ifindex) {
  3083. if (found->ifindex == ifindex)
  3084. return found;
  3085. }
  3086. return NULL;
  3087. }
  3088. static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
  3089. int ifindex)
  3090. {
  3091. struct rocker *rocker = rocker_port->rocker;
  3092. struct rocker_internal_vlan_tbl_entry *entry;
  3093. struct rocker_internal_vlan_tbl_entry *found;
  3094. unsigned long lock_flags;
  3095. int i;
  3096. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  3097. if (!entry)
  3098. return 0;
  3099. entry->ifindex = ifindex;
  3100. spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
  3101. found = rocker_internal_vlan_tbl_find(rocker, ifindex);
  3102. if (found) {
  3103. kfree(entry);
  3104. goto found;
  3105. }
  3106. found = entry;
  3107. hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
  3108. for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
  3109. if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
  3110. continue;
  3111. found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
  3112. goto found;
  3113. }
  3114. netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
  3115. found:
  3116. found->ref_count++;
  3117. spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
  3118. return found->vlan_id;
  3119. }
  3120. static void rocker_port_internal_vlan_id_put(struct rocker_port *rocker_port,
  3121. int ifindex)
  3122. {
  3123. struct rocker *rocker = rocker_port->rocker;
  3124. struct rocker_internal_vlan_tbl_entry *found;
  3125. unsigned long lock_flags;
  3126. unsigned long bit;
  3127. spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
  3128. found = rocker_internal_vlan_tbl_find(rocker, ifindex);
  3129. if (!found) {
  3130. netdev_err(rocker_port->dev,
  3131. "ifindex (%d) not found in internal VLAN tbl\n",
  3132. ifindex);
  3133. goto not_found;
  3134. }
  3135. if (--found->ref_count <= 0) {
  3136. bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
  3137. clear_bit(bit, rocker->internal_vlan_bitmap);
  3138. hash_del(&found->entry);
  3139. kfree(found);
  3140. }
  3141. not_found:
  3142. spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
  3143. }
  3144. static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst,
  3145. int dst_len, struct fib_info *fi, u32 tb_id,
  3146. int flags)
  3147. {
  3148. struct fib_nh *nh;
  3149. __be16 eth_type = htons(ETH_P_IP);
  3150. __be32 dst_mask = inet_make_mask(dst_len);
  3151. __be16 internal_vlan_id = rocker_port->internal_vlan_id;
  3152. u32 priority = fi->fib_priority;
  3153. enum rocker_of_dpa_table_id goto_tbl =
  3154. ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
  3155. u32 group_id;
  3156. bool nh_on_port;
  3157. bool has_gw;
  3158. u32 index;
  3159. int err;
  3160. /* XXX support ECMP */
  3161. nh = fi->fib_nh;
  3162. nh_on_port = (fi->fib_dev == rocker_port->dev);
  3163. has_gw = !!nh->nh_gw;
  3164. if (has_gw && nh_on_port) {
  3165. err = rocker_port_ipv4_nh(rocker_port, flags,
  3166. nh->nh_gw, &index);
  3167. if (err)
  3168. return err;
  3169. group_id = ROCKER_GROUP_L3_UNICAST(index);
  3170. } else {
  3171. /* Send to CPU for processing */
  3172. group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
  3173. }
  3174. err = rocker_flow_tbl_ucast4_routing(rocker_port, eth_type, dst,
  3175. dst_mask, priority, goto_tbl,
  3176. group_id, flags);
  3177. if (err)
  3178. netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
  3179. err, &dst);
  3180. return err;
  3181. }
  3182. /*****************
  3183. * Net device ops
  3184. *****************/
  3185. static int rocker_port_open(struct net_device *dev)
  3186. {
  3187. struct rocker_port *rocker_port = netdev_priv(dev);
  3188. int err;
  3189. err = rocker_port_dma_rings_init(rocker_port);
  3190. if (err)
  3191. return err;
  3192. err = request_irq(rocker_msix_tx_vector(rocker_port),
  3193. rocker_tx_irq_handler, 0,
  3194. rocker_driver_name, rocker_port);
  3195. if (err) {
  3196. netdev_err(rocker_port->dev, "cannot assign tx irq\n");
  3197. goto err_request_tx_irq;
  3198. }
  3199. err = request_irq(rocker_msix_rx_vector(rocker_port),
  3200. rocker_rx_irq_handler, 0,
  3201. rocker_driver_name, rocker_port);
  3202. if (err) {
  3203. netdev_err(rocker_port->dev, "cannot assign rx irq\n");
  3204. goto err_request_rx_irq;
  3205. }
  3206. err = rocker_port_fwd_enable(rocker_port);
  3207. if (err)
  3208. goto err_fwd_enable;
  3209. napi_enable(&rocker_port->napi_tx);
  3210. napi_enable(&rocker_port->napi_rx);
  3211. rocker_port_set_enable(rocker_port, true);
  3212. netif_start_queue(dev);
  3213. return 0;
  3214. err_fwd_enable:
  3215. free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
  3216. err_request_rx_irq:
  3217. free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
  3218. err_request_tx_irq:
  3219. rocker_port_dma_rings_fini(rocker_port);
  3220. return err;
  3221. }
  3222. static int rocker_port_stop(struct net_device *dev)
  3223. {
  3224. struct rocker_port *rocker_port = netdev_priv(dev);
  3225. netif_stop_queue(dev);
  3226. rocker_port_set_enable(rocker_port, false);
  3227. napi_disable(&rocker_port->napi_rx);
  3228. napi_disable(&rocker_port->napi_tx);
  3229. rocker_port_fwd_disable(rocker_port);
  3230. free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
  3231. free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
  3232. rocker_port_dma_rings_fini(rocker_port);
  3233. return 0;
  3234. }
  3235. static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
  3236. struct rocker_desc_info *desc_info)
  3237. {
  3238. struct rocker *rocker = rocker_port->rocker;
  3239. struct pci_dev *pdev = rocker->pdev;
  3240. struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
  3241. struct rocker_tlv *attr;
  3242. int rem;
  3243. rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
  3244. if (!attrs[ROCKER_TLV_TX_FRAGS])
  3245. return;
  3246. rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
  3247. struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
  3248. dma_addr_t dma_handle;
  3249. size_t len;
  3250. if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
  3251. continue;
  3252. rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
  3253. attr);
  3254. if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
  3255. !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
  3256. continue;
  3257. dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
  3258. len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
  3259. pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
  3260. }
  3261. }
  3262. static int rocker_tx_desc_frag_map_put(struct rocker_port *rocker_port,
  3263. struct rocker_desc_info *desc_info,
  3264. char *buf, size_t buf_len)
  3265. {
  3266. struct rocker *rocker = rocker_port->rocker;
  3267. struct pci_dev *pdev = rocker->pdev;
  3268. dma_addr_t dma_handle;
  3269. struct rocker_tlv *frag;
  3270. dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
  3271. if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
  3272. if (net_ratelimit())
  3273. netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
  3274. return -EIO;
  3275. }
  3276. frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
  3277. if (!frag)
  3278. goto unmap_frag;
  3279. if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
  3280. dma_handle))
  3281. goto nest_cancel;
  3282. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
  3283. buf_len))
  3284. goto nest_cancel;
  3285. rocker_tlv_nest_end(desc_info, frag);
  3286. return 0;
  3287. nest_cancel:
  3288. rocker_tlv_nest_cancel(desc_info, frag);
  3289. unmap_frag:
  3290. pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
  3291. return -EMSGSIZE;
  3292. }
  3293. static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
  3294. {
  3295. struct rocker_port *rocker_port = netdev_priv(dev);
  3296. struct rocker *rocker = rocker_port->rocker;
  3297. struct rocker_desc_info *desc_info;
  3298. struct rocker_tlv *frags;
  3299. int i;
  3300. int err;
  3301. desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
  3302. if (unlikely(!desc_info)) {
  3303. if (net_ratelimit())
  3304. netdev_err(dev, "tx ring full when queue awake\n");
  3305. return NETDEV_TX_BUSY;
  3306. }
  3307. rocker_desc_cookie_ptr_set(desc_info, skb);
  3308. frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
  3309. if (!frags)
  3310. goto out;
  3311. err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
  3312. skb->data, skb_headlen(skb));
  3313. if (err)
  3314. goto nest_cancel;
  3315. if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX)
  3316. goto nest_cancel;
  3317. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  3318. const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  3319. err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
  3320. skb_frag_address(frag),
  3321. skb_frag_size(frag));
  3322. if (err)
  3323. goto unmap_frags;
  3324. }
  3325. rocker_tlv_nest_end(desc_info, frags);
  3326. rocker_desc_gen_clear(desc_info);
  3327. rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
  3328. desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
  3329. if (!desc_info)
  3330. netif_stop_queue(dev);
  3331. return NETDEV_TX_OK;
  3332. unmap_frags:
  3333. rocker_tx_desc_frags_unmap(rocker_port, desc_info);
  3334. nest_cancel:
  3335. rocker_tlv_nest_cancel(desc_info, frags);
  3336. out:
  3337. dev_kfree_skb(skb);
  3338. dev->stats.tx_dropped++;
  3339. return NETDEV_TX_OK;
  3340. }
  3341. static int rocker_port_set_mac_address(struct net_device *dev, void *p)
  3342. {
  3343. struct sockaddr *addr = p;
  3344. struct rocker_port *rocker_port = netdev_priv(dev);
  3345. int err;
  3346. if (!is_valid_ether_addr(addr->sa_data))
  3347. return -EADDRNOTAVAIL;
  3348. err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
  3349. if (err)
  3350. return err;
  3351. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  3352. return 0;
  3353. }
  3354. static int rocker_port_vlan_rx_add_vid(struct net_device *dev,
  3355. __be16 proto, u16 vid)
  3356. {
  3357. struct rocker_port *rocker_port = netdev_priv(dev);
  3358. int err;
  3359. err = rocker_port_vlan(rocker_port, 0, vid);
  3360. if (err)
  3361. return err;
  3362. return rocker_port_router_mac(rocker_port, 0, htons(vid));
  3363. }
  3364. static int rocker_port_vlan_rx_kill_vid(struct net_device *dev,
  3365. __be16 proto, u16 vid)
  3366. {
  3367. struct rocker_port *rocker_port = netdev_priv(dev);
  3368. int err;
  3369. err = rocker_port_router_mac(rocker_port, ROCKER_OP_FLAG_REMOVE,
  3370. htons(vid));
  3371. if (err)
  3372. return err;
  3373. return rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, vid);
  3374. }
  3375. static int rocker_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
  3376. struct net_device *dev,
  3377. const unsigned char *addr, u16 vid,
  3378. u16 nlm_flags)
  3379. {
  3380. struct rocker_port *rocker_port = netdev_priv(dev);
  3381. __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
  3382. int flags = 0;
  3383. if (!rocker_port_is_bridged(rocker_port))
  3384. return -EINVAL;
  3385. return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
  3386. }
  3387. static int rocker_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
  3388. struct net_device *dev,
  3389. const unsigned char *addr, u16 vid)
  3390. {
  3391. struct rocker_port *rocker_port = netdev_priv(dev);
  3392. __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
  3393. int flags = ROCKER_OP_FLAG_REMOVE;
  3394. if (!rocker_port_is_bridged(rocker_port))
  3395. return -EINVAL;
  3396. return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
  3397. }
  3398. static int rocker_fdb_fill_info(struct sk_buff *skb,
  3399. struct rocker_port *rocker_port,
  3400. const unsigned char *addr, u16 vid,
  3401. u32 portid, u32 seq, int type,
  3402. unsigned int flags)
  3403. {
  3404. struct nlmsghdr *nlh;
  3405. struct ndmsg *ndm;
  3406. nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
  3407. if (!nlh)
  3408. return -EMSGSIZE;
  3409. ndm = nlmsg_data(nlh);
  3410. ndm->ndm_family = AF_BRIDGE;
  3411. ndm->ndm_pad1 = 0;
  3412. ndm->ndm_pad2 = 0;
  3413. ndm->ndm_flags = NTF_SELF;
  3414. ndm->ndm_type = 0;
  3415. ndm->ndm_ifindex = rocker_port->dev->ifindex;
  3416. ndm->ndm_state = NUD_REACHABLE;
  3417. if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
  3418. goto nla_put_failure;
  3419. if (vid && nla_put_u16(skb, NDA_VLAN, vid))
  3420. goto nla_put_failure;
  3421. nlmsg_end(skb, nlh);
  3422. return 0;
  3423. nla_put_failure:
  3424. nlmsg_cancel(skb, nlh);
  3425. return -EMSGSIZE;
  3426. }
  3427. static int rocker_port_fdb_dump(struct sk_buff *skb,
  3428. struct netlink_callback *cb,
  3429. struct net_device *dev,
  3430. struct net_device *filter_dev,
  3431. int idx)
  3432. {
  3433. struct rocker_port *rocker_port = netdev_priv(dev);
  3434. struct rocker *rocker = rocker_port->rocker;
  3435. struct rocker_fdb_tbl_entry *found;
  3436. struct hlist_node *tmp;
  3437. int bkt;
  3438. unsigned long lock_flags;
  3439. const unsigned char *addr;
  3440. u16 vid;
  3441. int err;
  3442. spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
  3443. hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
  3444. if (found->key.pport != rocker_port->pport)
  3445. continue;
  3446. if (idx < cb->args[0])
  3447. goto skip;
  3448. addr = found->key.addr;
  3449. vid = rocker_port_vlan_to_vid(rocker_port, found->key.vlan_id);
  3450. err = rocker_fdb_fill_info(skb, rocker_port, addr, vid,
  3451. NETLINK_CB(cb->skb).portid,
  3452. cb->nlh->nlmsg_seq,
  3453. RTM_NEWNEIGH, NLM_F_MULTI);
  3454. if (err < 0)
  3455. break;
  3456. skip:
  3457. ++idx;
  3458. }
  3459. spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
  3460. return idx;
  3461. }
  3462. static int rocker_port_bridge_setlink(struct net_device *dev,
  3463. struct nlmsghdr *nlh, u16 flags)
  3464. {
  3465. struct rocker_port *rocker_port = netdev_priv(dev);
  3466. struct nlattr *protinfo;
  3467. struct nlattr *attr;
  3468. int err;
  3469. protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
  3470. IFLA_PROTINFO);
  3471. if (protinfo) {
  3472. attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING);
  3473. if (attr) {
  3474. if (nla_len(attr) < sizeof(u8))
  3475. return -EINVAL;
  3476. if (nla_get_u8(attr))
  3477. rocker_port->brport_flags |= BR_LEARNING;
  3478. else
  3479. rocker_port->brport_flags &= ~BR_LEARNING;
  3480. err = rocker_port_set_learning(rocker_port);
  3481. if (err)
  3482. return err;
  3483. }
  3484. attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING_SYNC);
  3485. if (attr) {
  3486. if (nla_len(attr) < sizeof(u8))
  3487. return -EINVAL;
  3488. if (nla_get_u8(attr))
  3489. rocker_port->brport_flags |= BR_LEARNING_SYNC;
  3490. else
  3491. rocker_port->brport_flags &= ~BR_LEARNING_SYNC;
  3492. }
  3493. }
  3494. return 0;
  3495. }
  3496. static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
  3497. struct net_device *dev,
  3498. u32 filter_mask)
  3499. {
  3500. struct rocker_port *rocker_port = netdev_priv(dev);
  3501. u16 mode = BRIDGE_MODE_UNDEF;
  3502. u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
  3503. return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
  3504. rocker_port->brport_flags, mask);
  3505. }
  3506. static int rocker_port_switch_parent_id_get(struct net_device *dev,
  3507. struct netdev_phys_item_id *psid)
  3508. {
  3509. struct rocker_port *rocker_port = netdev_priv(dev);
  3510. struct rocker *rocker = rocker_port->rocker;
  3511. psid->id_len = sizeof(rocker->hw.id);
  3512. memcpy(&psid->id, &rocker->hw.id, psid->id_len);
  3513. return 0;
  3514. }
  3515. static int rocker_port_switch_port_stp_update(struct net_device *dev, u8 state)
  3516. {
  3517. struct rocker_port *rocker_port = netdev_priv(dev);
  3518. return rocker_port_stp_update(rocker_port, state);
  3519. }
  3520. static int rocker_port_switch_fib_ipv4_add(struct net_device *dev,
  3521. __be32 dst, int dst_len,
  3522. struct fib_info *fi,
  3523. u8 tos, u8 type,
  3524. u32 nlflags, u32 tb_id)
  3525. {
  3526. struct rocker_port *rocker_port = netdev_priv(dev);
  3527. int flags = 0;
  3528. return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
  3529. fi, tb_id, flags);
  3530. }
  3531. static int rocker_port_switch_fib_ipv4_del(struct net_device *dev,
  3532. __be32 dst, int dst_len,
  3533. struct fib_info *fi,
  3534. u8 tos, u8 type, u32 tb_id)
  3535. {
  3536. struct rocker_port *rocker_port = netdev_priv(dev);
  3537. int flags = ROCKER_OP_FLAG_REMOVE;
  3538. return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
  3539. fi, tb_id, flags);
  3540. }
  3541. static const struct net_device_ops rocker_port_netdev_ops = {
  3542. .ndo_open = rocker_port_open,
  3543. .ndo_stop = rocker_port_stop,
  3544. .ndo_start_xmit = rocker_port_xmit,
  3545. .ndo_set_mac_address = rocker_port_set_mac_address,
  3546. .ndo_vlan_rx_add_vid = rocker_port_vlan_rx_add_vid,
  3547. .ndo_vlan_rx_kill_vid = rocker_port_vlan_rx_kill_vid,
  3548. .ndo_fdb_add = rocker_port_fdb_add,
  3549. .ndo_fdb_del = rocker_port_fdb_del,
  3550. .ndo_fdb_dump = rocker_port_fdb_dump,
  3551. .ndo_bridge_setlink = rocker_port_bridge_setlink,
  3552. .ndo_bridge_getlink = rocker_port_bridge_getlink,
  3553. .ndo_switch_parent_id_get = rocker_port_switch_parent_id_get,
  3554. .ndo_switch_port_stp_update = rocker_port_switch_port_stp_update,
  3555. .ndo_switch_fib_ipv4_add = rocker_port_switch_fib_ipv4_add,
  3556. .ndo_switch_fib_ipv4_del = rocker_port_switch_fib_ipv4_del,
  3557. };
  3558. /********************
  3559. * ethtool interface
  3560. ********************/
  3561. static int rocker_port_get_settings(struct net_device *dev,
  3562. struct ethtool_cmd *ecmd)
  3563. {
  3564. struct rocker_port *rocker_port = netdev_priv(dev);
  3565. return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
  3566. }
  3567. static int rocker_port_set_settings(struct net_device *dev,
  3568. struct ethtool_cmd *ecmd)
  3569. {
  3570. struct rocker_port *rocker_port = netdev_priv(dev);
  3571. return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
  3572. }
  3573. static void rocker_port_get_drvinfo(struct net_device *dev,
  3574. struct ethtool_drvinfo *drvinfo)
  3575. {
  3576. strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
  3577. strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
  3578. }
  3579. static struct rocker_port_stats {
  3580. char str[ETH_GSTRING_LEN];
  3581. int type;
  3582. } rocker_port_stats[] = {
  3583. { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
  3584. { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
  3585. { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
  3586. { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
  3587. { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
  3588. { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
  3589. { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
  3590. { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
  3591. };
  3592. #define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
  3593. static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
  3594. u8 *data)
  3595. {
  3596. u8 *p = data;
  3597. int i;
  3598. switch (stringset) {
  3599. case ETH_SS_STATS:
  3600. for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
  3601. memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
  3602. p += ETH_GSTRING_LEN;
  3603. }
  3604. break;
  3605. }
  3606. }
  3607. static int
  3608. rocker_cmd_get_port_stats_prep(struct rocker *rocker,
  3609. struct rocker_port *rocker_port,
  3610. struct rocker_desc_info *desc_info,
  3611. void *priv)
  3612. {
  3613. struct rocker_tlv *cmd_stats;
  3614. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
  3615. ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
  3616. return -EMSGSIZE;
  3617. cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
  3618. if (!cmd_stats)
  3619. return -EMSGSIZE;
  3620. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
  3621. rocker_port->pport))
  3622. return -EMSGSIZE;
  3623. rocker_tlv_nest_end(desc_info, cmd_stats);
  3624. return 0;
  3625. }
  3626. static int
  3627. rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
  3628. struct rocker_port *rocker_port,
  3629. struct rocker_desc_info *desc_info,
  3630. void *priv)
  3631. {
  3632. struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
  3633. struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
  3634. struct rocker_tlv *pattr;
  3635. u32 pport;
  3636. u64 *data = priv;
  3637. int i;
  3638. rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
  3639. if (!attrs[ROCKER_TLV_CMD_INFO])
  3640. return -EIO;
  3641. rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
  3642. attrs[ROCKER_TLV_CMD_INFO]);
  3643. if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
  3644. return -EIO;
  3645. pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
  3646. if (pport != rocker_port->pport)
  3647. return -EIO;
  3648. for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
  3649. pattr = stats_attrs[rocker_port_stats[i].type];
  3650. if (!pattr)
  3651. continue;
  3652. data[i] = rocker_tlv_get_u64(pattr);
  3653. }
  3654. return 0;
  3655. }
  3656. static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
  3657. void *priv)
  3658. {
  3659. return rocker_cmd_exec(rocker_port->rocker, rocker_port,
  3660. rocker_cmd_get_port_stats_prep, NULL,
  3661. rocker_cmd_get_port_stats_ethtool_proc,
  3662. priv, false);
  3663. }
  3664. static void rocker_port_get_stats(struct net_device *dev,
  3665. struct ethtool_stats *stats, u64 *data)
  3666. {
  3667. struct rocker_port *rocker_port = netdev_priv(dev);
  3668. if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
  3669. int i;
  3670. for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
  3671. data[i] = 0;
  3672. }
  3673. return;
  3674. }
  3675. static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
  3676. {
  3677. switch (sset) {
  3678. case ETH_SS_STATS:
  3679. return ROCKER_PORT_STATS_LEN;
  3680. default:
  3681. return -EOPNOTSUPP;
  3682. }
  3683. }
  3684. static const struct ethtool_ops rocker_port_ethtool_ops = {
  3685. .get_settings = rocker_port_get_settings,
  3686. .set_settings = rocker_port_set_settings,
  3687. .get_drvinfo = rocker_port_get_drvinfo,
  3688. .get_link = ethtool_op_get_link,
  3689. .get_strings = rocker_port_get_strings,
  3690. .get_ethtool_stats = rocker_port_get_stats,
  3691. .get_sset_count = rocker_port_get_sset_count,
  3692. };
  3693. /*****************
  3694. * NAPI interface
  3695. *****************/
  3696. static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
  3697. {
  3698. return container_of(napi, struct rocker_port, napi_tx);
  3699. }
  3700. static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
  3701. {
  3702. struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
  3703. struct rocker *rocker = rocker_port->rocker;
  3704. struct rocker_desc_info *desc_info;
  3705. u32 credits = 0;
  3706. int err;
  3707. /* Cleanup tx descriptors */
  3708. while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
  3709. struct sk_buff *skb;
  3710. err = rocker_desc_err(desc_info);
  3711. if (err && net_ratelimit())
  3712. netdev_err(rocker_port->dev, "tx desc received with err %d\n",
  3713. err);
  3714. rocker_tx_desc_frags_unmap(rocker_port, desc_info);
  3715. skb = rocker_desc_cookie_ptr_get(desc_info);
  3716. if (err == 0) {
  3717. rocker_port->dev->stats.tx_packets++;
  3718. rocker_port->dev->stats.tx_bytes += skb->len;
  3719. } else
  3720. rocker_port->dev->stats.tx_errors++;
  3721. dev_kfree_skb_any(skb);
  3722. credits++;
  3723. }
  3724. if (credits && netif_queue_stopped(rocker_port->dev))
  3725. netif_wake_queue(rocker_port->dev);
  3726. napi_complete(napi);
  3727. rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
  3728. return 0;
  3729. }
  3730. static int rocker_port_rx_proc(struct rocker *rocker,
  3731. struct rocker_port *rocker_port,
  3732. struct rocker_desc_info *desc_info)
  3733. {
  3734. struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
  3735. struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
  3736. size_t rx_len;
  3737. if (!skb)
  3738. return -ENOENT;
  3739. rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
  3740. if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
  3741. return -EINVAL;
  3742. rocker_dma_rx_ring_skb_unmap(rocker, attrs);
  3743. rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
  3744. skb_put(skb, rx_len);
  3745. skb->protocol = eth_type_trans(skb, rocker_port->dev);
  3746. rocker_port->dev->stats.rx_packets++;
  3747. rocker_port->dev->stats.rx_bytes += skb->len;
  3748. netif_receive_skb(skb);
  3749. return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info);
  3750. }
  3751. static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
  3752. {
  3753. return container_of(napi, struct rocker_port, napi_rx);
  3754. }
  3755. static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
  3756. {
  3757. struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
  3758. struct rocker *rocker = rocker_port->rocker;
  3759. struct rocker_desc_info *desc_info;
  3760. u32 credits = 0;
  3761. int err;
  3762. /* Process rx descriptors */
  3763. while (credits < budget &&
  3764. (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
  3765. err = rocker_desc_err(desc_info);
  3766. if (err) {
  3767. if (net_ratelimit())
  3768. netdev_err(rocker_port->dev, "rx desc received with err %d\n",
  3769. err);
  3770. } else {
  3771. err = rocker_port_rx_proc(rocker, rocker_port,
  3772. desc_info);
  3773. if (err && net_ratelimit())
  3774. netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
  3775. err);
  3776. }
  3777. if (err)
  3778. rocker_port->dev->stats.rx_errors++;
  3779. rocker_desc_gen_clear(desc_info);
  3780. rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
  3781. credits++;
  3782. }
  3783. if (credits < budget)
  3784. napi_complete(napi);
  3785. rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
  3786. return credits;
  3787. }
  3788. /*****************
  3789. * PCI driver ops
  3790. *****************/
  3791. static void rocker_carrier_init(struct rocker_port *rocker_port)
  3792. {
  3793. struct rocker *rocker = rocker_port->rocker;
  3794. u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
  3795. bool link_up;
  3796. link_up = link_status & (1 << rocker_port->pport);
  3797. if (link_up)
  3798. netif_carrier_on(rocker_port->dev);
  3799. else
  3800. netif_carrier_off(rocker_port->dev);
  3801. }
  3802. static void rocker_remove_ports(struct rocker *rocker)
  3803. {
  3804. struct rocker_port *rocker_port;
  3805. int i;
  3806. for (i = 0; i < rocker->port_count; i++) {
  3807. rocker_port = rocker->ports[i];
  3808. rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE);
  3809. unregister_netdev(rocker_port->dev);
  3810. }
  3811. kfree(rocker->ports);
  3812. }
  3813. static void rocker_port_dev_addr_init(struct rocker *rocker,
  3814. struct rocker_port *rocker_port)
  3815. {
  3816. struct pci_dev *pdev = rocker->pdev;
  3817. int err;
  3818. err = rocker_cmd_get_port_settings_macaddr(rocker_port,
  3819. rocker_port->dev->dev_addr);
  3820. if (err) {
  3821. dev_warn(&pdev->dev, "failed to get mac address, using random\n");
  3822. eth_hw_addr_random(rocker_port->dev);
  3823. }
  3824. }
  3825. static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
  3826. {
  3827. struct pci_dev *pdev = rocker->pdev;
  3828. struct rocker_port *rocker_port;
  3829. struct net_device *dev;
  3830. int err;
  3831. dev = alloc_etherdev(sizeof(struct rocker_port));
  3832. if (!dev)
  3833. return -ENOMEM;
  3834. rocker_port = netdev_priv(dev);
  3835. rocker_port->dev = dev;
  3836. rocker_port->rocker = rocker;
  3837. rocker_port->port_number = port_number;
  3838. rocker_port->pport = port_number + 1;
  3839. rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
  3840. rocker_port_dev_addr_init(rocker, rocker_port);
  3841. dev->netdev_ops = &rocker_port_netdev_ops;
  3842. dev->ethtool_ops = &rocker_port_ethtool_ops;
  3843. netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
  3844. NAPI_POLL_WEIGHT);
  3845. netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
  3846. NAPI_POLL_WEIGHT);
  3847. rocker_carrier_init(rocker_port);
  3848. dev->features |= NETIF_F_NETNS_LOCAL |
  3849. NETIF_F_HW_VLAN_CTAG_FILTER |
  3850. NETIF_F_HW_SWITCH_OFFLOAD;
  3851. err = register_netdev(dev);
  3852. if (err) {
  3853. dev_err(&pdev->dev, "register_netdev failed\n");
  3854. goto err_register_netdev;
  3855. }
  3856. rocker->ports[port_number] = rocker_port;
  3857. rocker_port_set_learning(rocker_port);
  3858. rocker_port->internal_vlan_id =
  3859. rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
  3860. err = rocker_port_ig_tbl(rocker_port, 0);
  3861. if (err) {
  3862. dev_err(&pdev->dev, "install ig port table failed\n");
  3863. goto err_port_ig_tbl;
  3864. }
  3865. return 0;
  3866. err_port_ig_tbl:
  3867. unregister_netdev(dev);
  3868. err_register_netdev:
  3869. free_netdev(dev);
  3870. return err;
  3871. }
  3872. static int rocker_probe_ports(struct rocker *rocker)
  3873. {
  3874. int i;
  3875. size_t alloc_size;
  3876. int err;
  3877. alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
  3878. rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
  3879. if (!rocker->ports)
  3880. return -ENOMEM;
  3881. for (i = 0; i < rocker->port_count; i++) {
  3882. err = rocker_probe_port(rocker, i);
  3883. if (err)
  3884. goto remove_ports;
  3885. }
  3886. return 0;
  3887. remove_ports:
  3888. rocker_remove_ports(rocker);
  3889. return err;
  3890. }
  3891. static int rocker_msix_init(struct rocker *rocker)
  3892. {
  3893. struct pci_dev *pdev = rocker->pdev;
  3894. int msix_entries;
  3895. int i;
  3896. int err;
  3897. msix_entries = pci_msix_vec_count(pdev);
  3898. if (msix_entries < 0)
  3899. return msix_entries;
  3900. if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
  3901. return -EINVAL;
  3902. rocker->msix_entries = kmalloc_array(msix_entries,
  3903. sizeof(struct msix_entry),
  3904. GFP_KERNEL);
  3905. if (!rocker->msix_entries)
  3906. return -ENOMEM;
  3907. for (i = 0; i < msix_entries; i++)
  3908. rocker->msix_entries[i].entry = i;
  3909. err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
  3910. if (err < 0)
  3911. goto err_enable_msix;
  3912. return 0;
  3913. err_enable_msix:
  3914. kfree(rocker->msix_entries);
  3915. return err;
  3916. }
  3917. static void rocker_msix_fini(struct rocker *rocker)
  3918. {
  3919. pci_disable_msix(rocker->pdev);
  3920. kfree(rocker->msix_entries);
  3921. }
  3922. static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  3923. {
  3924. struct rocker *rocker;
  3925. int err;
  3926. rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
  3927. if (!rocker)
  3928. return -ENOMEM;
  3929. err = pci_enable_device(pdev);
  3930. if (err) {
  3931. dev_err(&pdev->dev, "pci_enable_device failed\n");
  3932. goto err_pci_enable_device;
  3933. }
  3934. err = pci_request_regions(pdev, rocker_driver_name);
  3935. if (err) {
  3936. dev_err(&pdev->dev, "pci_request_regions failed\n");
  3937. goto err_pci_request_regions;
  3938. }
  3939. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  3940. if (!err) {
  3941. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  3942. if (err) {
  3943. dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
  3944. goto err_pci_set_dma_mask;
  3945. }
  3946. } else {
  3947. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  3948. if (err) {
  3949. dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
  3950. goto err_pci_set_dma_mask;
  3951. }
  3952. }
  3953. if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
  3954. dev_err(&pdev->dev, "invalid PCI region size\n");
  3955. goto err_pci_resource_len_check;
  3956. }
  3957. rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
  3958. pci_resource_len(pdev, 0));
  3959. if (!rocker->hw_addr) {
  3960. dev_err(&pdev->dev, "ioremap failed\n");
  3961. err = -EIO;
  3962. goto err_ioremap;
  3963. }
  3964. pci_set_master(pdev);
  3965. rocker->pdev = pdev;
  3966. pci_set_drvdata(pdev, rocker);
  3967. rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
  3968. err = rocker_msix_init(rocker);
  3969. if (err) {
  3970. dev_err(&pdev->dev, "MSI-X init failed\n");
  3971. goto err_msix_init;
  3972. }
  3973. err = rocker_basic_hw_test(rocker);
  3974. if (err) {
  3975. dev_err(&pdev->dev, "basic hw test failed\n");
  3976. goto err_basic_hw_test;
  3977. }
  3978. rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
  3979. err = rocker_dma_rings_init(rocker);
  3980. if (err)
  3981. goto err_dma_rings_init;
  3982. err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
  3983. rocker_cmd_irq_handler, 0,
  3984. rocker_driver_name, rocker);
  3985. if (err) {
  3986. dev_err(&pdev->dev, "cannot assign cmd irq\n");
  3987. goto err_request_cmd_irq;
  3988. }
  3989. err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
  3990. rocker_event_irq_handler, 0,
  3991. rocker_driver_name, rocker);
  3992. if (err) {
  3993. dev_err(&pdev->dev, "cannot assign event irq\n");
  3994. goto err_request_event_irq;
  3995. }
  3996. rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
  3997. err = rocker_init_tbls(rocker);
  3998. if (err) {
  3999. dev_err(&pdev->dev, "cannot init rocker tables\n");
  4000. goto err_init_tbls;
  4001. }
  4002. err = rocker_probe_ports(rocker);
  4003. if (err) {
  4004. dev_err(&pdev->dev, "failed to probe ports\n");
  4005. goto err_probe_ports;
  4006. }
  4007. dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id);
  4008. return 0;
  4009. err_probe_ports:
  4010. rocker_free_tbls(rocker);
  4011. err_init_tbls:
  4012. free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
  4013. err_request_event_irq:
  4014. free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
  4015. err_request_cmd_irq:
  4016. rocker_dma_rings_fini(rocker);
  4017. err_dma_rings_init:
  4018. err_basic_hw_test:
  4019. rocker_msix_fini(rocker);
  4020. err_msix_init:
  4021. iounmap(rocker->hw_addr);
  4022. err_ioremap:
  4023. err_pci_resource_len_check:
  4024. err_pci_set_dma_mask:
  4025. pci_release_regions(pdev);
  4026. err_pci_request_regions:
  4027. pci_disable_device(pdev);
  4028. err_pci_enable_device:
  4029. kfree(rocker);
  4030. return err;
  4031. }
  4032. static void rocker_remove(struct pci_dev *pdev)
  4033. {
  4034. struct rocker *rocker = pci_get_drvdata(pdev);
  4035. rocker_free_tbls(rocker);
  4036. rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
  4037. rocker_remove_ports(rocker);
  4038. free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
  4039. free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
  4040. rocker_dma_rings_fini(rocker);
  4041. rocker_msix_fini(rocker);
  4042. iounmap(rocker->hw_addr);
  4043. pci_release_regions(rocker->pdev);
  4044. pci_disable_device(rocker->pdev);
  4045. kfree(rocker);
  4046. }
  4047. static struct pci_driver rocker_pci_driver = {
  4048. .name = rocker_driver_name,
  4049. .id_table = rocker_pci_id_table,
  4050. .probe = rocker_probe,
  4051. .remove = rocker_remove,
  4052. };
  4053. /************************************
  4054. * Net device notifier event handler
  4055. ************************************/
  4056. static bool rocker_port_dev_check(struct net_device *dev)
  4057. {
  4058. return dev->netdev_ops == &rocker_port_netdev_ops;
  4059. }
  4060. static int rocker_port_bridge_join(struct rocker_port *rocker_port,
  4061. struct net_device *bridge)
  4062. {
  4063. int err;
  4064. rocker_port_internal_vlan_id_put(rocker_port,
  4065. rocker_port->dev->ifindex);
  4066. rocker_port->bridge_dev = bridge;
  4067. /* Use bridge internal VLAN ID for untagged pkts */
  4068. err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
  4069. if (err)
  4070. return err;
  4071. rocker_port->internal_vlan_id =
  4072. rocker_port_internal_vlan_id_get(rocker_port,
  4073. bridge->ifindex);
  4074. return rocker_port_vlan(rocker_port, 0, 0);
  4075. }
  4076. static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
  4077. {
  4078. int err;
  4079. rocker_port_internal_vlan_id_put(rocker_port,
  4080. rocker_port->bridge_dev->ifindex);
  4081. rocker_port->bridge_dev = NULL;
  4082. /* Use port internal VLAN ID for untagged pkts */
  4083. err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
  4084. if (err)
  4085. return err;
  4086. rocker_port->internal_vlan_id =
  4087. rocker_port_internal_vlan_id_get(rocker_port,
  4088. rocker_port->dev->ifindex);
  4089. err = rocker_port_vlan(rocker_port, 0, 0);
  4090. if (err)
  4091. return err;
  4092. if (rocker_port->dev->flags & IFF_UP)
  4093. err = rocker_port_fwd_enable(rocker_port);
  4094. return err;
  4095. }
  4096. static int rocker_port_master_changed(struct net_device *dev)
  4097. {
  4098. struct rocker_port *rocker_port = netdev_priv(dev);
  4099. struct net_device *master = netdev_master_upper_dev_get(dev);
  4100. int err = 0;
  4101. if (master && master->rtnl_link_ops &&
  4102. !strcmp(master->rtnl_link_ops->kind, "bridge"))
  4103. err = rocker_port_bridge_join(rocker_port, master);
  4104. else
  4105. err = rocker_port_bridge_leave(rocker_port);
  4106. return err;
  4107. }
  4108. static int rocker_netdevice_event(struct notifier_block *unused,
  4109. unsigned long event, void *ptr)
  4110. {
  4111. struct net_device *dev;
  4112. int err;
  4113. switch (event) {
  4114. case NETDEV_CHANGEUPPER:
  4115. dev = netdev_notifier_info_to_dev(ptr);
  4116. if (!rocker_port_dev_check(dev))
  4117. return NOTIFY_DONE;
  4118. err = rocker_port_master_changed(dev);
  4119. if (err)
  4120. netdev_warn(dev,
  4121. "failed to reflect master change (err %d)\n",
  4122. err);
  4123. break;
  4124. }
  4125. return NOTIFY_DONE;
  4126. }
  4127. static struct notifier_block rocker_netdevice_nb __read_mostly = {
  4128. .notifier_call = rocker_netdevice_event,
  4129. };
  4130. /************************************
  4131. * Net event notifier event handler
  4132. ************************************/
  4133. static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
  4134. {
  4135. struct rocker_port *rocker_port = netdev_priv(dev);
  4136. int flags = (n->nud_state & NUD_VALID) ? 0 : ROCKER_OP_FLAG_REMOVE;
  4137. __be32 ip_addr = *(__be32 *)n->primary_key;
  4138. return rocker_port_ipv4_neigh(rocker_port, flags, ip_addr, n->ha);
  4139. }
  4140. static int rocker_netevent_event(struct notifier_block *unused,
  4141. unsigned long event, void *ptr)
  4142. {
  4143. struct net_device *dev;
  4144. struct neighbour *n = ptr;
  4145. int err;
  4146. switch (event) {
  4147. case NETEVENT_NEIGH_UPDATE:
  4148. if (n->tbl != &arp_tbl)
  4149. return NOTIFY_DONE;
  4150. dev = n->dev;
  4151. if (!rocker_port_dev_check(dev))
  4152. return NOTIFY_DONE;
  4153. err = rocker_neigh_update(dev, n);
  4154. if (err)
  4155. netdev_warn(dev,
  4156. "failed to handle neigh update (err %d)\n",
  4157. err);
  4158. break;
  4159. }
  4160. return NOTIFY_DONE;
  4161. }
  4162. static struct notifier_block rocker_netevent_nb __read_mostly = {
  4163. .notifier_call = rocker_netevent_event,
  4164. };
  4165. /***********************
  4166. * Module init and exit
  4167. ***********************/
  4168. static int __init rocker_module_init(void)
  4169. {
  4170. int err;
  4171. register_netdevice_notifier(&rocker_netdevice_nb);
  4172. register_netevent_notifier(&rocker_netevent_nb);
  4173. err = pci_register_driver(&rocker_pci_driver);
  4174. if (err)
  4175. goto err_pci_register_driver;
  4176. return 0;
  4177. err_pci_register_driver:
  4178. unregister_netdevice_notifier(&rocker_netevent_nb);
  4179. unregister_netdevice_notifier(&rocker_netdevice_nb);
  4180. return err;
  4181. }
  4182. static void __exit rocker_module_exit(void)
  4183. {
  4184. unregister_netevent_notifier(&rocker_netevent_nb);
  4185. unregister_netdevice_notifier(&rocker_netdevice_nb);
  4186. pci_unregister_driver(&rocker_pci_driver);
  4187. }
  4188. module_init(rocker_module_init);
  4189. module_exit(rocker_module_exit);
  4190. MODULE_LICENSE("GPL v2");
  4191. MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
  4192. MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
  4193. MODULE_DESCRIPTION("Rocker switch device driver");
  4194. MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);