qib_iba7322.c 265 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573
  1. /*
  2. * Copyright (c) 2012 Intel Corporation. All rights reserved.
  3. * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. /*
  34. * This file contains all of the code that is specific to the
  35. * InfiniPath 7322 chip
  36. */
  37. #include <linux/interrupt.h>
  38. #include <linux/pci.h>
  39. #include <linux/delay.h>
  40. #include <linux/io.h>
  41. #include <linux/jiffies.h>
  42. #include <linux/module.h>
  43. #include <rdma/ib_verbs.h>
  44. #include <rdma/ib_smi.h>
  45. #ifdef CONFIG_INFINIBAND_QIB_DCA
  46. #include <linux/dca.h>
  47. #endif
  48. #include "qib.h"
  49. #include "qib_7322_regs.h"
  50. #include "qib_qsfp.h"
  51. #include "qib_mad.h"
  52. #include "qib_verbs.h"
  53. #undef pr_fmt
  54. #define pr_fmt(fmt) QIB_DRV_NAME " " fmt
  55. static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
  56. static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
  57. static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
  58. static irqreturn_t qib_7322intr(int irq, void *data);
  59. static irqreturn_t qib_7322bufavail(int irq, void *data);
  60. static irqreturn_t sdma_intr(int irq, void *data);
  61. static irqreturn_t sdma_idle_intr(int irq, void *data);
  62. static irqreturn_t sdma_progress_intr(int irq, void *data);
  63. static irqreturn_t sdma_cleanup_intr(int irq, void *data);
  64. static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
  65. struct qib_ctxtdata *rcd);
  66. static u8 qib_7322_phys_portstate(u64);
  67. static u32 qib_7322_iblink_state(u64);
  68. static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
  69. u16 linitcmd);
  70. static void force_h1(struct qib_pportdata *);
  71. static void adj_tx_serdes(struct qib_pportdata *);
  72. static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
  73. static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
  74. static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
  75. static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
  76. static void serdes_7322_los_enable(struct qib_pportdata *, int);
  77. static int serdes_7322_init_old(struct qib_pportdata *);
  78. static int serdes_7322_init_new(struct qib_pportdata *);
  79. static void dump_sdma_7322_state(struct qib_pportdata *);
  80. #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
  81. /* LE2 serdes values for different cases */
  82. #define LE2_DEFAULT 5
  83. #define LE2_5m 4
  84. #define LE2_QME 0
  85. /* Below is special-purpose, so only really works for the IB SerDes blocks. */
  86. #define IBSD(hw_pidx) (hw_pidx + 2)
  87. /* these are variables for documentation and experimentation purposes */
  88. static const unsigned rcv_int_timeout = 375;
  89. static const unsigned rcv_int_count = 16;
  90. static const unsigned sdma_idle_cnt = 64;
  91. /* Time to stop altering Rx Equalization parameters, after link up. */
  92. #define RXEQ_DISABLE_MSECS 2500
  93. /*
  94. * Number of VLs we are configured to use (to allow for more
  95. * credits per vl, etc.)
  96. */
  97. ushort qib_num_cfg_vls = 2;
  98. module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
  99. MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
  100. static ushort qib_chase = 1;
  101. module_param_named(chase, qib_chase, ushort, S_IRUGO);
  102. MODULE_PARM_DESC(chase, "Enable state chase handling");
  103. static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
  104. module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
  105. MODULE_PARM_DESC(long_attenuation,
  106. "attenuation cutoff (dB) for long copper cable setup");
  107. static ushort qib_singleport;
  108. module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
  109. MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
  110. static ushort qib_krcvq01_no_msi;
  111. module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
  112. MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
  113. /*
  114. * Receive header queue sizes
  115. */
  116. static unsigned qib_rcvhdrcnt;
  117. module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
  118. MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
  119. static unsigned qib_rcvhdrsize;
  120. module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
  121. MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
  122. static unsigned qib_rcvhdrentsize;
  123. module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
  124. MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
  125. #define MAX_ATTEN_LEN 64 /* plenty for any real system */
  126. /* for read back, default index is ~5m copper cable */
  127. static char txselect_list[MAX_ATTEN_LEN] = "10";
  128. static struct kparam_string kp_txselect = {
  129. .string = txselect_list,
  130. .maxlen = MAX_ATTEN_LEN
  131. };
  132. static int setup_txselect(const char *, struct kernel_param *);
  133. module_param_call(txselect, setup_txselect, param_get_string,
  134. &kp_txselect, S_IWUSR | S_IRUGO);
  135. MODULE_PARM_DESC(txselect,
  136. "Tx serdes indices (for no QSFP or invalid QSFP data)");
  137. #define BOARD_QME7342 5
  138. #define BOARD_QMH7342 6
  139. #define BOARD_QMH7360 9
  140. #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
  141. BOARD_QMH7342)
  142. #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
  143. BOARD_QME7342)
  144. #define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64))
  145. #define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
  146. #define MASK_ACROSS(lsb, msb) \
  147. (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
  148. #define SYM_RMASK(regname, fldname) ((u64) \
  149. QIB_7322_##regname##_##fldname##_RMASK)
  150. #define SYM_MASK(regname, fldname) ((u64) \
  151. QIB_7322_##regname##_##fldname##_RMASK << \
  152. QIB_7322_##regname##_##fldname##_LSB)
  153. #define SYM_FIELD(value, regname, fldname) ((u64) \
  154. (((value) >> SYM_LSB(regname, fldname)) & \
  155. SYM_RMASK(regname, fldname)))
  156. /* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
  157. #define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
  158. (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
  159. #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
  160. #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
  161. #define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
  162. #define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
  163. #define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
  164. /* Below because most, but not all, fields of IntMask have that full suffix */
  165. #define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
  166. #define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
  167. /*
  168. * the size bits give us 2^N, in KB units. 0 marks as invalid,
  169. * and 7 is reserved. We currently use only 2KB and 4KB
  170. */
  171. #define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
  172. #define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
  173. #define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
  174. #define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
  175. #define SendIBSLIDAssignMask \
  176. QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
  177. #define SendIBSLMCMask \
  178. QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
  179. #define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
  180. #define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
  181. #define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
  182. #define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
  183. #define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
  184. #define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
  185. #define _QIB_GPIO_SDA_NUM 1
  186. #define _QIB_GPIO_SCL_NUM 0
  187. #define QIB_EEPROM_WEN_NUM 14
  188. #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
  189. /* HW counter clock is at 4nsec */
  190. #define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
  191. /* full speed IB port 1 only */
  192. #define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
  193. #define PORT_SPD_CAP_SHIFT 3
  194. /* full speed featuremask, both ports */
  195. #define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
  196. /*
  197. * This file contains almost all the chip-specific register information and
  198. * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
  199. */
  200. /* Use defines to tie machine-generated names to lower-case names */
  201. #define kr_contextcnt KREG_IDX(ContextCnt)
  202. #define kr_control KREG_IDX(Control)
  203. #define kr_counterregbase KREG_IDX(CntrRegBase)
  204. #define kr_errclear KREG_IDX(ErrClear)
  205. #define kr_errmask KREG_IDX(ErrMask)
  206. #define kr_errstatus KREG_IDX(ErrStatus)
  207. #define kr_extctrl KREG_IDX(EXTCtrl)
  208. #define kr_extstatus KREG_IDX(EXTStatus)
  209. #define kr_gpio_clear KREG_IDX(GPIOClear)
  210. #define kr_gpio_mask KREG_IDX(GPIOMask)
  211. #define kr_gpio_out KREG_IDX(GPIOOut)
  212. #define kr_gpio_status KREG_IDX(GPIOStatus)
  213. #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
  214. #define kr_debugportval KREG_IDX(DebugPortValueReg)
  215. #define kr_fmask KREG_IDX(feature_mask)
  216. #define kr_act_fmask KREG_IDX(active_feature_mask)
  217. #define kr_hwerrclear KREG_IDX(HwErrClear)
  218. #define kr_hwerrmask KREG_IDX(HwErrMask)
  219. #define kr_hwerrstatus KREG_IDX(HwErrStatus)
  220. #define kr_intclear KREG_IDX(IntClear)
  221. #define kr_intmask KREG_IDX(IntMask)
  222. #define kr_intredirect KREG_IDX(IntRedirect0)
  223. #define kr_intstatus KREG_IDX(IntStatus)
  224. #define kr_pagealign KREG_IDX(PageAlign)
  225. #define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
  226. #define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
  227. #define kr_rcvegrbase KREG_IDX(RcvEgrBase)
  228. #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
  229. #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
  230. #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
  231. #define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
  232. #define kr_rcvtidbase KREG_IDX(RcvTIDBase)
  233. #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
  234. #define kr_revision KREG_IDX(Revision)
  235. #define kr_scratch KREG_IDX(Scratch)
  236. #define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
  237. #define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
  238. #define kr_sendctrl KREG_IDX(SendCtrl)
  239. #define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
  240. #define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
  241. #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
  242. #define kr_sendpiobufbase KREG_IDX(SendBufBase)
  243. #define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
  244. #define kr_sendpiosize KREG_IDX(SendBufSize)
  245. #define kr_sendregbase KREG_IDX(SendRegBase)
  246. #define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
  247. #define kr_userregbase KREG_IDX(UserRegBase)
  248. #define kr_intgranted KREG_IDX(Int_Granted)
  249. #define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
  250. #define kr_intblocked KREG_IDX(IntBlocked)
  251. #define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
  252. /*
  253. * per-port kernel registers. Access only with qib_read_kreg_port()
  254. * or qib_write_kreg_port()
  255. */
  256. #define krp_errclear KREG_IBPORT_IDX(ErrClear)
  257. #define krp_errmask KREG_IBPORT_IDX(ErrMask)
  258. #define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
  259. #define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
  260. #define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
  261. #define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
  262. #define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
  263. #define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
  264. #define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
  265. #define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
  266. #define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
  267. #define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
  268. #define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
  269. #define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
  270. #define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
  271. #define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
  272. #define krp_psinterval KREG_IBPORT_IDX(PSInterval)
  273. #define krp_psstart KREG_IBPORT_IDX(PSStart)
  274. #define krp_psstat KREG_IBPORT_IDX(PSStat)
  275. #define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
  276. #define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
  277. #define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
  278. #define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
  279. #define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
  280. #define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
  281. #define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
  282. #define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
  283. #define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
  284. #define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
  285. #define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
  286. #define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
  287. #define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
  288. #define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
  289. #define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
  290. #define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
  291. #define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
  292. #define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
  293. #define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
  294. #define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
  295. #define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
  296. #define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
  297. #define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
  298. #define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
  299. #define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
  300. #define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
  301. #define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
  302. #define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
  303. #define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
  304. #define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
  305. #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
  306. /*
  307. * Per-context kernel registers. Access only with qib_read_kreg_ctxt()
  308. * or qib_write_kreg_ctxt()
  309. */
  310. #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
  311. #define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
  312. /*
  313. * TID Flow table, per context. Reduces
  314. * number of hdrq updates to one per flow (or on errors).
  315. * context 0 and 1 share same memory, but have distinct
  316. * addresses. Since for now, we never use expected sends
  317. * on kernel contexts, we don't worry about that (we initialize
  318. * those entries for ctxt 0/1 on driver load twice, for example).
  319. */
  320. #define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
  321. #define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
  322. /* these are the error bits in the tid flows, and are W1C */
  323. #define TIDFLOW_ERRBITS ( \
  324. (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
  325. SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
  326. (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
  327. SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
  328. /* Most (not all) Counters are per-IBport.
  329. * Requires LBIntCnt is at offset 0 in the group
  330. */
  331. #define CREG_IDX(regname) \
  332. ((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
  333. #define crp_badformat CREG_IDX(RxVersionErrCnt)
  334. #define crp_err_rlen CREG_IDX(RxLenErrCnt)
  335. #define crp_erricrc CREG_IDX(RxICRCErrCnt)
  336. #define crp_errlink CREG_IDX(RxLinkMalformCnt)
  337. #define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
  338. #define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
  339. #define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
  340. #define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
  341. #define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
  342. #define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
  343. #define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
  344. #define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
  345. #define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
  346. #define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
  347. #define crp_pktrcv CREG_IDX(RxDataPktCnt)
  348. #define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
  349. #define crp_pktsend CREG_IDX(TxDataPktCnt)
  350. #define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
  351. #define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
  352. #define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
  353. #define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
  354. #define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
  355. #define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
  356. #define crp_rcvebp CREG_IDX(RxEBPCnt)
  357. #define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
  358. #define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
  359. #define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
  360. #define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
  361. #define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
  362. #define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
  363. #define crp_rxvlerr CREG_IDX(RxVlErrCnt)
  364. #define crp_sendstall CREG_IDX(TxFlowStallCnt)
  365. #define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
  366. #define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
  367. #define crp_txlenerr CREG_IDX(TxLenErrCnt)
  368. #define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
  369. #define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
  370. #define crp_txunderrun CREG_IDX(TxUnderrunCnt)
  371. #define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
  372. #define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
  373. #define crp_wordrcv CREG_IDX(RxDwordCnt)
  374. #define crp_wordsend CREG_IDX(TxDwordCnt)
  375. #define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
  376. /* these are the (few) counters that are not port-specific */
  377. #define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
  378. QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
  379. #define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
  380. #define cr_lbint CREG_DEVIDX(LBIntCnt)
  381. #define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
  382. #define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
  383. #define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
  384. #define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
  385. #define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
  386. /* no chip register for # of IB ports supported, so define */
  387. #define NUM_IB_PORTS 2
  388. /* 1 VL15 buffer per hardware IB port, no register for this, so define */
  389. #define NUM_VL15_BUFS NUM_IB_PORTS
  390. /*
  391. * context 0 and 1 are special, and there is no chip register that
  392. * defines this value, so we have to define it here.
  393. * These are all allocated to either 0 or 1 for single port
  394. * hardware configuration, otherwise each gets half
  395. */
  396. #define KCTXT0_EGRCNT 2048
  397. /* values for vl and port fields in PBC, 7322-specific */
  398. #define PBC_PORT_SEL_LSB 26
  399. #define PBC_PORT_SEL_RMASK 1
  400. #define PBC_VL_NUM_LSB 27
  401. #define PBC_VL_NUM_RMASK 7
  402. #define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
  403. #define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
  404. static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
  405. [IB_RATE_2_5_GBPS] = 16,
  406. [IB_RATE_5_GBPS] = 8,
  407. [IB_RATE_10_GBPS] = 4,
  408. [IB_RATE_20_GBPS] = 2,
  409. [IB_RATE_30_GBPS] = 2,
  410. [IB_RATE_40_GBPS] = 1
  411. };
  412. #define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
  413. #define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
  414. /* link training states, from IBC */
  415. #define IB_7322_LT_STATE_DISABLED 0x00
  416. #define IB_7322_LT_STATE_LINKUP 0x01
  417. #define IB_7322_LT_STATE_POLLACTIVE 0x02
  418. #define IB_7322_LT_STATE_POLLQUIET 0x03
  419. #define IB_7322_LT_STATE_SLEEPDELAY 0x04
  420. #define IB_7322_LT_STATE_SLEEPQUIET 0x05
  421. #define IB_7322_LT_STATE_CFGDEBOUNCE 0x08
  422. #define IB_7322_LT_STATE_CFGRCVFCFG 0x09
  423. #define IB_7322_LT_STATE_CFGWAITRMT 0x0a
  424. #define IB_7322_LT_STATE_CFGIDLE 0x0b
  425. #define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c
  426. #define IB_7322_LT_STATE_TXREVLANES 0x0d
  427. #define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e
  428. #define IB_7322_LT_STATE_RECOVERIDLE 0x0f
  429. #define IB_7322_LT_STATE_CFGENH 0x10
  430. #define IB_7322_LT_STATE_CFGTEST 0x11
  431. #define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12
  432. #define IB_7322_LT_STATE_CFGWAITENH 0x13
  433. /* link state machine states from IBC */
  434. #define IB_7322_L_STATE_DOWN 0x0
  435. #define IB_7322_L_STATE_INIT 0x1
  436. #define IB_7322_L_STATE_ARM 0x2
  437. #define IB_7322_L_STATE_ACTIVE 0x3
  438. #define IB_7322_L_STATE_ACT_DEFER 0x4
  439. static const u8 qib_7322_physportstate[0x20] = {
  440. [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
  441. [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
  442. [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
  443. [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
  444. [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
  445. [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
  446. [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
  447. [IB_7322_LT_STATE_CFGRCVFCFG] =
  448. IB_PHYSPORTSTATE_CFG_TRAIN,
  449. [IB_7322_LT_STATE_CFGWAITRMT] =
  450. IB_PHYSPORTSTATE_CFG_TRAIN,
  451. [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
  452. [IB_7322_LT_STATE_RECOVERRETRAIN] =
  453. IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
  454. [IB_7322_LT_STATE_RECOVERWAITRMT] =
  455. IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
  456. [IB_7322_LT_STATE_RECOVERIDLE] =
  457. IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
  458. [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
  459. [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
  460. [IB_7322_LT_STATE_CFGWAITRMTTEST] =
  461. IB_PHYSPORTSTATE_CFG_TRAIN,
  462. [IB_7322_LT_STATE_CFGWAITENH] =
  463. IB_PHYSPORTSTATE_CFG_WAIT_ENH,
  464. [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
  465. [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
  466. [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
  467. [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
  468. };
  469. #ifdef CONFIG_INFINIBAND_QIB_DCA
  470. struct qib_irq_notify {
  471. int rcv;
  472. void *arg;
  473. struct irq_affinity_notify notify;
  474. };
  475. #endif
  476. struct qib_chip_specific {
  477. u64 __iomem *cregbase;
  478. u64 *cntrs;
  479. spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
  480. spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
  481. u64 main_int_mask; /* clear bits which have dedicated handlers */
  482. u64 int_enable_mask; /* for per port interrupts in single port mode */
  483. u64 errormask;
  484. u64 hwerrmask;
  485. u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
  486. u64 gpio_mask; /* shadow the gpio mask register */
  487. u64 extctrl; /* shadow the gpio output enable, etc... */
  488. u32 ncntrs;
  489. u32 nportcntrs;
  490. u32 cntrnamelen;
  491. u32 portcntrnamelen;
  492. u32 numctxts;
  493. u32 rcvegrcnt;
  494. u32 updthresh; /* current AvailUpdThld */
  495. u32 updthresh_dflt; /* default AvailUpdThld */
  496. u32 r1;
  497. int irq;
  498. u32 num_msix_entries;
  499. u32 sdmabufcnt;
  500. u32 lastbuf_for_pio;
  501. u32 stay_in_freeze;
  502. u32 recovery_ports_initted;
  503. #ifdef CONFIG_INFINIBAND_QIB_DCA
  504. u32 dca_ctrl;
  505. int rhdr_cpu[18];
  506. int sdma_cpu[2];
  507. u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
  508. #endif
  509. struct qib_msix_entry *msix_entries;
  510. unsigned long *sendchkenable;
  511. unsigned long *sendgrhchk;
  512. unsigned long *sendibchk;
  513. u32 rcvavail_timeout[18];
  514. char emsgbuf[128]; /* for device error interrupt msg buffer */
  515. };
  516. /* Table of entries in "human readable" form Tx Emphasis. */
  517. struct txdds_ent {
  518. u8 amp;
  519. u8 pre;
  520. u8 main;
  521. u8 post;
  522. };
  523. struct vendor_txdds_ent {
  524. u8 oui[QSFP_VOUI_LEN];
  525. u8 *partnum;
  526. struct txdds_ent sdr;
  527. struct txdds_ent ddr;
  528. struct txdds_ent qdr;
  529. };
  530. static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
  531. #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
  532. #define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */
  533. #define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */
  534. #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
  535. #define H1_FORCE_VAL 8
  536. #define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */
  537. #define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */
  538. /* The static and dynamic registers are paired, and the pairs indexed by spd */
  539. #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
  540. + ((spd) * 2))
  541. #define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
  542. #define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
  543. #define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
  544. #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
  545. #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
  546. struct qib_chippport_specific {
  547. u64 __iomem *kpregbase;
  548. u64 __iomem *cpregbase;
  549. u64 *portcntrs;
  550. struct qib_pportdata *ppd;
  551. wait_queue_head_t autoneg_wait;
  552. struct delayed_work autoneg_work;
  553. struct delayed_work ipg_work;
  554. struct timer_list chase_timer;
  555. /*
  556. * these 5 fields are used to establish deltas for IB symbol
  557. * errors and linkrecovery errors. They can be reported on
  558. * some chips during link negotiation prior to INIT, and with
  559. * DDR when faking DDR negotiations with non-IBTA switches.
  560. * The chip counters are adjusted at driver unload if there is
  561. * a non-zero delta.
  562. */
  563. u64 ibdeltainprog;
  564. u64 ibsymdelta;
  565. u64 ibsymsnap;
  566. u64 iblnkerrdelta;
  567. u64 iblnkerrsnap;
  568. u64 iblnkdownsnap;
  569. u64 iblnkdowndelta;
  570. u64 ibmalfdelta;
  571. u64 ibmalfsnap;
  572. u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
  573. u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
  574. unsigned long qdr_dfe_time;
  575. unsigned long chase_end;
  576. u32 autoneg_tries;
  577. u32 recovery_init;
  578. u32 qdr_dfe_on;
  579. u32 qdr_reforce;
  580. /*
  581. * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
  582. * entry zero is unused, to simplify indexing
  583. */
  584. u8 h1_val;
  585. u8 no_eep; /* txselect table index to use if no qsfp info */
  586. u8 ipg_tries;
  587. u8 ibmalfusesnap;
  588. struct qib_qsfp_data qsfp_data;
  589. char epmsgbuf[192]; /* for port error interrupt msg buffer */
  590. char sdmamsgbuf[192]; /* for per-port sdma error messages */
  591. };
  592. static struct {
  593. const char *name;
  594. irq_handler_t handler;
  595. int lsb;
  596. int port; /* 0 if not port-specific, else port # */
  597. int dca;
  598. } irq_table[] = {
  599. { "", qib_7322intr, -1, 0, 0 },
  600. { " (buf avail)", qib_7322bufavail,
  601. SYM_LSB(IntStatus, SendBufAvail), 0, 0},
  602. { " (sdma 0)", sdma_intr,
  603. SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
  604. { " (sdma 1)", sdma_intr,
  605. SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
  606. { " (sdmaI 0)", sdma_idle_intr,
  607. SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
  608. { " (sdmaI 1)", sdma_idle_intr,
  609. SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
  610. { " (sdmaP 0)", sdma_progress_intr,
  611. SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
  612. { " (sdmaP 1)", sdma_progress_intr,
  613. SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
  614. { " (sdmaC 0)", sdma_cleanup_intr,
  615. SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
  616. { " (sdmaC 1)", sdma_cleanup_intr,
  617. SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
  618. };
  619. #ifdef CONFIG_INFINIBAND_QIB_DCA
  620. static const struct dca_reg_map {
  621. int shadow_inx;
  622. int lsb;
  623. u64 mask;
  624. u16 regno;
  625. } dca_rcvhdr_reg_map[] = {
  626. { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
  627. ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
  628. { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
  629. ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
  630. { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
  631. ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
  632. { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
  633. ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
  634. { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
  635. ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
  636. { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
  637. ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
  638. { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
  639. ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
  640. { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
  641. ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
  642. { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
  643. ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
  644. { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
  645. ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
  646. { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
  647. ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
  648. { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
  649. ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
  650. { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
  651. ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
  652. { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
  653. ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
  654. { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
  655. ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
  656. { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
  657. ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
  658. { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
  659. ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
  660. { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
  661. ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
  662. };
  663. #endif
  664. /* ibcctrl bits */
  665. #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
  666. /* cycle through TS1/TS2 till OK */
  667. #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
  668. /* wait for TS1, then go on */
  669. #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
  670. #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
  671. #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
  672. #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
  673. #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
  674. #define BLOB_7322_IBCHG 0x101
  675. static inline void qib_write_kreg(const struct qib_devdata *dd,
  676. const u32 regno, u64 value);
  677. static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
  678. static void write_7322_initregs(struct qib_devdata *);
  679. static void write_7322_init_portregs(struct qib_pportdata *);
  680. static void setup_7322_link_recovery(struct qib_pportdata *, u32);
  681. static void check_7322_rxe_status(struct qib_pportdata *);
  682. static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
  683. #ifdef CONFIG_INFINIBAND_QIB_DCA
  684. static void qib_setup_dca(struct qib_devdata *dd);
  685. static void setup_dca_notifier(struct qib_devdata *dd,
  686. struct qib_msix_entry *m);
  687. static void reset_dca_notifier(struct qib_devdata *dd,
  688. struct qib_msix_entry *m);
  689. #endif
  690. /**
  691. * qib_read_ureg32 - read 32-bit virtualized per-context register
  692. * @dd: device
  693. * @regno: register number
  694. * @ctxt: context number
  695. *
  696. * Return the contents of a register that is virtualized to be per context.
  697. * Returns -1 on errors (not distinguishable from valid contents at
  698. * runtime; we may add a separate error variable at some point).
  699. */
  700. static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
  701. enum qib_ureg regno, int ctxt)
  702. {
  703. if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
  704. return 0;
  705. return readl(regno + (u64 __iomem *)(
  706. (dd->ureg_align * ctxt) + (dd->userbase ?
  707. (char __iomem *)dd->userbase :
  708. (char __iomem *)dd->kregbase + dd->uregbase)));
  709. }
  710. /**
  711. * qib_read_ureg - read virtualized per-context register
  712. * @dd: device
  713. * @regno: register number
  714. * @ctxt: context number
  715. *
  716. * Return the contents of a register that is virtualized to be per context.
  717. * Returns -1 on errors (not distinguishable from valid contents at
  718. * runtime; we may add a separate error variable at some point).
  719. */
  720. static inline u64 qib_read_ureg(const struct qib_devdata *dd,
  721. enum qib_ureg regno, int ctxt)
  722. {
  723. if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
  724. return 0;
  725. return readq(regno + (u64 __iomem *)(
  726. (dd->ureg_align * ctxt) + (dd->userbase ?
  727. (char __iomem *)dd->userbase :
  728. (char __iomem *)dd->kregbase + dd->uregbase)));
  729. }
  730. /**
  731. * qib_write_ureg - write virtualized per-context register
  732. * @dd: device
  733. * @regno: register number
  734. * @value: value
  735. * @ctxt: context
  736. *
  737. * Write the contents of a register that is virtualized to be per context.
  738. */
  739. static inline void qib_write_ureg(const struct qib_devdata *dd,
  740. enum qib_ureg regno, u64 value, int ctxt)
  741. {
  742. u64 __iomem *ubase;
  743. if (dd->userbase)
  744. ubase = (u64 __iomem *)
  745. ((char __iomem *) dd->userbase +
  746. dd->ureg_align * ctxt);
  747. else
  748. ubase = (u64 __iomem *)
  749. (dd->uregbase +
  750. (char __iomem *) dd->kregbase +
  751. dd->ureg_align * ctxt);
  752. if (dd->kregbase && (dd->flags & QIB_PRESENT))
  753. writeq(value, &ubase[regno]);
  754. }
  755. static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
  756. const u32 regno)
  757. {
  758. if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
  759. return -1;
  760. return readl((u32 __iomem *) &dd->kregbase[regno]);
  761. }
  762. static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
  763. const u32 regno)
  764. {
  765. if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
  766. return -1;
  767. return readq(&dd->kregbase[regno]);
  768. }
  769. static inline void qib_write_kreg(const struct qib_devdata *dd,
  770. const u32 regno, u64 value)
  771. {
  772. if (dd->kregbase && (dd->flags & QIB_PRESENT))
  773. writeq(value, &dd->kregbase[regno]);
  774. }
  775. /*
  776. * not many sanity checks for the port-specific kernel register routines,
  777. * since they are only used when it's known to be safe.
  778. */
  779. static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
  780. const u16 regno)
  781. {
  782. if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
  783. return 0ULL;
  784. return readq(&ppd->cpspec->kpregbase[regno]);
  785. }
  786. static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
  787. const u16 regno, u64 value)
  788. {
  789. if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
  790. (ppd->dd->flags & QIB_PRESENT))
  791. writeq(value, &ppd->cpspec->kpregbase[regno]);
  792. }
  793. /**
  794. * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
  795. * @dd: the qlogic_ib device
  796. * @regno: the register number to write
  797. * @ctxt: the context containing the register
  798. * @value: the value to write
  799. */
  800. static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
  801. const u16 regno, unsigned ctxt,
  802. u64 value)
  803. {
  804. qib_write_kreg(dd, regno + ctxt, value);
  805. }
  806. static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
  807. {
  808. if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
  809. return 0;
  810. return readq(&dd->cspec->cregbase[regno]);
  811. }
  812. static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
  813. {
  814. if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
  815. return 0;
  816. return readl(&dd->cspec->cregbase[regno]);
  817. }
  818. static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
  819. u16 regno, u64 value)
  820. {
  821. if (ppd->cpspec && ppd->cpspec->cpregbase &&
  822. (ppd->dd->flags & QIB_PRESENT))
  823. writeq(value, &ppd->cpspec->cpregbase[regno]);
  824. }
  825. static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
  826. u16 regno)
  827. {
  828. if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
  829. !(ppd->dd->flags & QIB_PRESENT))
  830. return 0;
  831. return readq(&ppd->cpspec->cpregbase[regno]);
  832. }
  833. static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
  834. u16 regno)
  835. {
  836. if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
  837. !(ppd->dd->flags & QIB_PRESENT))
  838. return 0;
  839. return readl(&ppd->cpspec->cpregbase[regno]);
  840. }
  841. /* bits in Control register */
  842. #define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
  843. #define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
  844. /* bits in general interrupt regs */
  845. #define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
  846. #define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
  847. #define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
  848. #define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
  849. #define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
  850. #define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
  851. #define QIB_I_C_ERROR INT_MASK(Err)
  852. #define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
  853. #define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
  854. #define QIB_I_GPIO INT_MASK(AssertGPIO)
  855. #define QIB_I_P_SDMAINT(pidx) \
  856. (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
  857. INT_MASK_P(SDmaProgress, pidx) | \
  858. INT_MASK_PM(SDmaCleanupDone, pidx))
  859. /* Interrupt bits that are "per port" */
  860. #define QIB_I_P_BITSEXTANT(pidx) \
  861. (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
  862. INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
  863. INT_MASK_P(SDmaProgress, pidx) | \
  864. INT_MASK_PM(SDmaCleanupDone, pidx))
  865. /* Interrupt bits that are common to a device */
  866. /* currently unused: QIB_I_SPIOSENT */
  867. #define QIB_I_C_BITSEXTANT \
  868. (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
  869. QIB_I_SPIOSENT | \
  870. QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
  871. #define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
  872. QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
  873. /*
  874. * Error bits that are "per port".
  875. */
  876. #define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
  877. #define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
  878. #define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
  879. #define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
  880. #define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
  881. #define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
  882. #define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
  883. #define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
  884. #define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
  885. #define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
  886. #define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
  887. #define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
  888. #define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
  889. #define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
  890. #define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
  891. #define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
  892. #define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
  893. #define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
  894. #define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
  895. #define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
  896. #define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
  897. #define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
  898. #define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
  899. #define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
  900. #define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
  901. #define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
  902. #define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
  903. #define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
  904. #define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
  905. #define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
  906. #define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
  907. #define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
  908. #define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
  909. #define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
  910. #define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
  911. #define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
  912. #define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
  913. #define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
  914. #define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
  915. /* Error bits that are common to a device */
  916. #define QIB_E_RESET ERR_MASK(ResetNegated)
  917. #define QIB_E_HARDWARE ERR_MASK(HardwareErr)
  918. #define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
  919. /*
  920. * Per chip (rather than per-port) errors. Most either do
  921. * nothing but trigger a print (because they self-recover, or
  922. * always occur in tandem with other errors that handle the
  923. * issue), or because they indicate errors with no recovery,
  924. * but we want to know that they happened.
  925. */
  926. #define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
  927. #define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
  928. #define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
  929. #define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
  930. #define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
  931. #define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
  932. #define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
  933. #define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
  934. /* SDMA chip errors (not per port)
  935. * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
  936. * the SDMAHALT error immediately, so we just print the dup error via the
  937. * E_AUTO mechanism. This is true of most of the per-port fatal errors
  938. * as well, but since this is port-independent, by definition, it's
  939. * handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per
  940. * packet send errors, and so are handled in the same manner as other
  941. * per-packet errors.
  942. */
  943. #define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
  944. #define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
  945. #define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
  946. /*
  947. * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
  948. * it is used to print "common" packet errors.
  949. */
  950. #define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
  951. QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
  952. QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
  953. QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
  954. QIB_E_P_REBP)
  955. /* Error Bits that Packet-related (Receive, per-port) */
  956. #define QIB_E_P_RPKTERRS (\
  957. QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
  958. QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
  959. QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
  960. QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
  961. QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
  962. QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
  963. /*
  964. * Error bits that are Send-related (per port)
  965. * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
  966. * All of these potentially need to have a buffer disarmed
  967. */
  968. #define QIB_E_P_SPKTERRS (\
  969. QIB_E_P_SUNEXP_PKTNUM |\
  970. QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
  971. QIB_E_P_SMAXPKTLEN |\
  972. QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
  973. QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
  974. QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
  975. #define QIB_E_SPKTERRS ( \
  976. QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
  977. ERR_MASK_N(SendUnsupportedVLErr) | \
  978. QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
  979. #define QIB_E_P_SDMAERRS ( \
  980. QIB_E_P_SDMAHALT | \
  981. QIB_E_P_SDMADESCADDRMISALIGN | \
  982. QIB_E_P_SDMAUNEXPDATA | \
  983. QIB_E_P_SDMAMISSINGDW | \
  984. QIB_E_P_SDMADWEN | \
  985. QIB_E_P_SDMARPYTAG | \
  986. QIB_E_P_SDMA1STDESC | \
  987. QIB_E_P_SDMABASE | \
  988. QIB_E_P_SDMATAILOUTOFBOUND | \
  989. QIB_E_P_SDMAOUTOFBOUND | \
  990. QIB_E_P_SDMAGENMISMATCH)
  991. /*
  992. * This sets some bits more than once, but makes it more obvious which
  993. * bits are not handled under other categories, and the repeat definition
  994. * is not a problem.
  995. */
  996. #define QIB_E_P_BITSEXTANT ( \
  997. QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
  998. QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
  999. QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
  1000. QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
  1001. )
  1002. /*
  1003. * These are errors that can occur when the link
  1004. * changes state while a packet is being sent or received. This doesn't
  1005. * cover things like EBP or VCRC that can be the result of a sending
  1006. * having the link change state, so we receive a "known bad" packet.
  1007. * All of these are "per port", so renamed:
  1008. */
  1009. #define QIB_E_P_LINK_PKTERRS (\
  1010. QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
  1011. QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
  1012. QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
  1013. QIB_E_P_RUNEXPCHAR)
  1014. /*
  1015. * This sets some bits more than once, but makes it more obvious which
  1016. * bits are not handled under other categories (such as QIB_E_SPKTERRS),
  1017. * and the repeat definition is not a problem.
  1018. */
  1019. #define QIB_E_C_BITSEXTANT (\
  1020. QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
  1021. QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
  1022. QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
  1023. /* Likewise Neuter E_SPKT_ERRS_IGNORE */
  1024. #define E_SPKT_ERRS_IGNORE 0
  1025. #define QIB_EXTS_MEMBIST_DISABLED \
  1026. SYM_MASK(EXTStatus, MemBISTDisabled)
  1027. #define QIB_EXTS_MEMBIST_ENDTEST \
  1028. SYM_MASK(EXTStatus, MemBISTEndTest)
  1029. #define QIB_E_SPIOARMLAUNCH \
  1030. ERR_MASK(SendArmLaunchErr)
  1031. #define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
  1032. #define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
  1033. /*
  1034. * IBTA_1_2 is set when multiple speeds are enabled (normal),
  1035. * and also if forced QDR (only QDR enabled). It's enabled for the
  1036. * forced QDR case so that scrambling will be enabled by the TS3
  1037. * exchange, when supported by both sides of the link.
  1038. */
  1039. #define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
  1040. #define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
  1041. #define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
  1042. #define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
  1043. #define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
  1044. #define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
  1045. SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
  1046. #define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
  1047. #define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
  1048. #define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
  1049. #define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
  1050. #define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
  1051. #define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
  1052. #define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
  1053. #define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
  1054. #define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
  1055. SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
  1056. #define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
  1057. SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
  1058. #define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
  1059. #define IBA7322_REDIRECT_VEC_PER_REG 12
  1060. #define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
  1061. #define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
  1062. #define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
  1063. #define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
  1064. #define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
  1065. #define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
  1066. #define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
  1067. .msg = #fldname , .sz = sizeof(#fldname) }
  1068. #define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
  1069. fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
  1070. static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
  1071. HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
  1072. HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
  1073. HWE_AUTO(PCIESerdesPClkNotDetect),
  1074. HWE_AUTO(PowerOnBISTFailed),
  1075. HWE_AUTO(TempsenseTholdReached),
  1076. HWE_AUTO(MemoryErr),
  1077. HWE_AUTO(PCIeBusParityErr),
  1078. HWE_AUTO(PcieCplTimeout),
  1079. HWE_AUTO(PciePoisonedTLP),
  1080. HWE_AUTO_P(SDmaMemReadErr, 1),
  1081. HWE_AUTO_P(SDmaMemReadErr, 0),
  1082. HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
  1083. HWE_AUTO_P(IBCBusToSPCParityErr, 1),
  1084. HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
  1085. HWE_AUTO(statusValidNoEop),
  1086. HWE_AUTO(LATriggered),
  1087. { .mask = 0, .sz = 0 }
  1088. };
  1089. #define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
  1090. .msg = #fldname, .sz = sizeof(#fldname) }
  1091. #define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
  1092. .msg = #fldname, .sz = sizeof(#fldname) }
  1093. static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
  1094. E_AUTO(RcvEgrFullErr),
  1095. E_AUTO(RcvHdrFullErr),
  1096. E_AUTO(ResetNegated),
  1097. E_AUTO(HardwareErr),
  1098. E_AUTO(InvalidAddrErr),
  1099. E_AUTO(SDmaVL15Err),
  1100. E_AUTO(SBufVL15MisUseErr),
  1101. E_AUTO(InvalidEEPCmd),
  1102. E_AUTO(RcvContextShareErr),
  1103. E_AUTO(SendVLMismatchErr),
  1104. E_AUTO(SendArmLaunchErr),
  1105. E_AUTO(SendSpecialTriggerErr),
  1106. E_AUTO(SDmaWrongPortErr),
  1107. E_AUTO(SDmaBufMaskDuplicateErr),
  1108. { .mask = 0, .sz = 0 }
  1109. };
  1110. static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
  1111. E_P_AUTO(IBStatusChanged),
  1112. E_P_AUTO(SHeadersErr),
  1113. E_P_AUTO(VL15BufMisuseErr),
  1114. /*
  1115. * SDmaHaltErr is not really an error, make it clearer;
  1116. */
  1117. {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
  1118. .sz = 11},
  1119. E_P_AUTO(SDmaDescAddrMisalignErr),
  1120. E_P_AUTO(SDmaUnexpDataErr),
  1121. E_P_AUTO(SDmaMissingDwErr),
  1122. E_P_AUTO(SDmaDwEnErr),
  1123. E_P_AUTO(SDmaRpyTagErr),
  1124. E_P_AUTO(SDma1stDescErr),
  1125. E_P_AUTO(SDmaBaseErr),
  1126. E_P_AUTO(SDmaTailOutOfBoundErr),
  1127. E_P_AUTO(SDmaOutOfBoundErr),
  1128. E_P_AUTO(SDmaGenMismatchErr),
  1129. E_P_AUTO(SendBufMisuseErr),
  1130. E_P_AUTO(SendUnsupportedVLErr),
  1131. E_P_AUTO(SendUnexpectedPktNumErr),
  1132. E_P_AUTO(SendDroppedDataPktErr),
  1133. E_P_AUTO(SendDroppedSmpPktErr),
  1134. E_P_AUTO(SendPktLenErr),
  1135. E_P_AUTO(SendUnderRunErr),
  1136. E_P_AUTO(SendMaxPktLenErr),
  1137. E_P_AUTO(SendMinPktLenErr),
  1138. E_P_AUTO(RcvIBLostLinkErr),
  1139. E_P_AUTO(RcvHdrErr),
  1140. E_P_AUTO(RcvHdrLenErr),
  1141. E_P_AUTO(RcvBadTidErr),
  1142. E_P_AUTO(RcvBadVersionErr),
  1143. E_P_AUTO(RcvIBFlowErr),
  1144. E_P_AUTO(RcvEBPErr),
  1145. E_P_AUTO(RcvUnsupportedVLErr),
  1146. E_P_AUTO(RcvUnexpectedCharErr),
  1147. E_P_AUTO(RcvShortPktLenErr),
  1148. E_P_AUTO(RcvLongPktLenErr),
  1149. E_P_AUTO(RcvMaxPktLenErr),
  1150. E_P_AUTO(RcvMinPktLenErr),
  1151. E_P_AUTO(RcvICRCErr),
  1152. E_P_AUTO(RcvVCRCErr),
  1153. E_P_AUTO(RcvFormatErr),
  1154. { .mask = 0, .sz = 0 }
  1155. };
  1156. /*
  1157. * Below generates "auto-message" for interrupts not specific to any port or
  1158. * context
  1159. */
  1160. #define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
  1161. .msg = #fldname, .sz = sizeof(#fldname) }
  1162. /* Below generates "auto-message" for interrupts specific to a port */
  1163. #define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
  1164. SYM_LSB(IntMask, fldname##Mask##_0), \
  1165. SYM_LSB(IntMask, fldname##Mask##_1)), \
  1166. .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
  1167. /* For some reason, the SerDesTrimDone bits are reversed */
  1168. #define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
  1169. SYM_LSB(IntMask, fldname##Mask##_1), \
  1170. SYM_LSB(IntMask, fldname##Mask##_0)), \
  1171. .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
  1172. /*
  1173. * Below generates "auto-message" for interrupts specific to a context,
  1174. * with ctxt-number appended
  1175. */
  1176. #define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
  1177. SYM_LSB(IntMask, fldname##0IntMask), \
  1178. SYM_LSB(IntMask, fldname##17IntMask)), \
  1179. .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
  1180. static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
  1181. INTR_AUTO_P(SDmaInt),
  1182. INTR_AUTO_P(SDmaProgressInt),
  1183. INTR_AUTO_P(SDmaIdleInt),
  1184. INTR_AUTO_P(SDmaCleanupDone),
  1185. INTR_AUTO_C(RcvUrg),
  1186. INTR_AUTO_P(ErrInt),
  1187. INTR_AUTO(ErrInt), /* non-port-specific errs */
  1188. INTR_AUTO(AssertGPIOInt),
  1189. INTR_AUTO_P(SendDoneInt),
  1190. INTR_AUTO(SendBufAvailInt),
  1191. INTR_AUTO_C(RcvAvail),
  1192. { .mask = 0, .sz = 0 }
  1193. };
  1194. #define TXSYMPTOM_AUTO_P(fldname) \
  1195. { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
  1196. .msg = #fldname, .sz = sizeof(#fldname) }
  1197. static const struct qib_hwerror_msgs hdrchk_msgs[] = {
  1198. TXSYMPTOM_AUTO_P(NonKeyPacket),
  1199. TXSYMPTOM_AUTO_P(GRHFail),
  1200. TXSYMPTOM_AUTO_P(PkeyFail),
  1201. TXSYMPTOM_AUTO_P(QPFail),
  1202. TXSYMPTOM_AUTO_P(SLIDFail),
  1203. TXSYMPTOM_AUTO_P(RawIPV6),
  1204. TXSYMPTOM_AUTO_P(PacketTooSmall),
  1205. { .mask = 0, .sz = 0 }
  1206. };
  1207. #define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
  1208. /*
  1209. * Called when we might have an error that is specific to a particular
  1210. * PIO buffer, and may need to cancel that buffer, so it can be re-used,
  1211. * because we don't need to force the update of pioavail
  1212. */
  1213. static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
  1214. {
  1215. struct qib_devdata *dd = ppd->dd;
  1216. u32 i;
  1217. int any;
  1218. u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
  1219. u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
  1220. unsigned long sbuf[4];
  1221. /*
  1222. * It's possible that sendbuffererror could have bits set; might
  1223. * have already done this as a result of hardware error handling.
  1224. */
  1225. any = 0;
  1226. for (i = 0; i < regcnt; ++i) {
  1227. sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
  1228. if (sbuf[i]) {
  1229. any = 1;
  1230. qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
  1231. }
  1232. }
  1233. if (any)
  1234. qib_disarm_piobufs_set(dd, sbuf, piobcnt);
  1235. }
  1236. /* No txe_recover yet, if ever */
  1237. /* No decode__errors yet */
  1238. static void err_decode(char *msg, size_t len, u64 errs,
  1239. const struct qib_hwerror_msgs *msp)
  1240. {
  1241. u64 these, lmask;
  1242. int took, multi, n = 0;
  1243. while (errs && msp && msp->mask) {
  1244. multi = (msp->mask & (msp->mask - 1));
  1245. while (errs & msp->mask) {
  1246. these = (errs & msp->mask);
  1247. lmask = (these & (these - 1)) ^ these;
  1248. if (len) {
  1249. if (n++) {
  1250. /* separate the strings */
  1251. *msg++ = ',';
  1252. len--;
  1253. }
  1254. BUG_ON(!msp->sz);
  1255. /* msp->sz counts the nul */
  1256. took = min_t(size_t, msp->sz - (size_t)1, len);
  1257. memcpy(msg, msp->msg, took);
  1258. len -= took;
  1259. msg += took;
  1260. if (len)
  1261. *msg = '\0';
  1262. }
  1263. errs &= ~lmask;
  1264. if (len && multi) {
  1265. /* More than one bit this mask */
  1266. int idx = -1;
  1267. while (lmask & msp->mask) {
  1268. ++idx;
  1269. lmask >>= 1;
  1270. }
  1271. took = scnprintf(msg, len, "_%d", idx);
  1272. len -= took;
  1273. msg += took;
  1274. }
  1275. }
  1276. ++msp;
  1277. }
  1278. /* If some bits are left, show in hex. */
  1279. if (len && errs)
  1280. snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
  1281. (unsigned long long) errs);
  1282. }
  1283. /* only called if r1 set */
  1284. static void flush_fifo(struct qib_pportdata *ppd)
  1285. {
  1286. struct qib_devdata *dd = ppd->dd;
  1287. u32 __iomem *piobuf;
  1288. u32 bufn;
  1289. u32 *hdr;
  1290. u64 pbc;
  1291. const unsigned hdrwords = 7;
  1292. static struct qib_ib_header ibhdr = {
  1293. .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
  1294. .lrh[1] = IB_LID_PERMISSIVE,
  1295. .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
  1296. .lrh[3] = IB_LID_PERMISSIVE,
  1297. .u.oth.bth[0] = cpu_to_be32(
  1298. (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
  1299. .u.oth.bth[1] = cpu_to_be32(0),
  1300. .u.oth.bth[2] = cpu_to_be32(0),
  1301. .u.oth.u.ud.deth[0] = cpu_to_be32(0),
  1302. .u.oth.u.ud.deth[1] = cpu_to_be32(0),
  1303. };
  1304. /*
  1305. * Send a dummy VL15 packet to flush the launch FIFO.
  1306. * This will not actually be sent since the TxeBypassIbc bit is set.
  1307. */
  1308. pbc = PBC_7322_VL15_SEND |
  1309. (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
  1310. (hdrwords + SIZE_OF_CRC);
  1311. piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
  1312. if (!piobuf)
  1313. return;
  1314. writeq(pbc, piobuf);
  1315. hdr = (u32 *) &ibhdr;
  1316. if (dd->flags & QIB_PIO_FLUSH_WC) {
  1317. qib_flush_wc();
  1318. qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
  1319. qib_flush_wc();
  1320. __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
  1321. qib_flush_wc();
  1322. } else
  1323. qib_pio_copy(piobuf + 2, hdr, hdrwords);
  1324. qib_sendbuf_done(dd, bufn);
  1325. }
  1326. /*
  1327. * This is called with interrupts disabled and sdma_lock held.
  1328. */
  1329. static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
  1330. {
  1331. struct qib_devdata *dd = ppd->dd;
  1332. u64 set_sendctrl = 0;
  1333. u64 clr_sendctrl = 0;
  1334. if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
  1335. set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
  1336. else
  1337. clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
  1338. if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
  1339. set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
  1340. else
  1341. clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
  1342. if (op & QIB_SDMA_SENDCTRL_OP_HALT)
  1343. set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
  1344. else
  1345. clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
  1346. if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
  1347. set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
  1348. SYM_MASK(SendCtrl_0, TxeAbortIbc) |
  1349. SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
  1350. else
  1351. clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
  1352. SYM_MASK(SendCtrl_0, TxeAbortIbc) |
  1353. SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
  1354. spin_lock(&dd->sendctrl_lock);
  1355. /* If we are draining everything, block sends first */
  1356. if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
  1357. ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
  1358. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  1359. qib_write_kreg(dd, kr_scratch, 0);
  1360. }
  1361. ppd->p_sendctrl |= set_sendctrl;
  1362. ppd->p_sendctrl &= ~clr_sendctrl;
  1363. if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
  1364. qib_write_kreg_port(ppd, krp_sendctrl,
  1365. ppd->p_sendctrl |
  1366. SYM_MASK(SendCtrl_0, SDmaCleanup));
  1367. else
  1368. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  1369. qib_write_kreg(dd, kr_scratch, 0);
  1370. if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
  1371. ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
  1372. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  1373. qib_write_kreg(dd, kr_scratch, 0);
  1374. }
  1375. spin_unlock(&dd->sendctrl_lock);
  1376. if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
  1377. flush_fifo(ppd);
  1378. }
  1379. static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
  1380. {
  1381. __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
  1382. }
  1383. static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
  1384. {
  1385. /*
  1386. * Set SendDmaLenGen and clear and set
  1387. * the MSB of the generation count to enable generation checking
  1388. * and load the internal generation counter.
  1389. */
  1390. qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
  1391. qib_write_kreg_port(ppd, krp_senddmalengen,
  1392. ppd->sdma_descq_cnt |
  1393. (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
  1394. }
  1395. /*
  1396. * Must be called with sdma_lock held, or before init finished.
  1397. */
  1398. static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
  1399. {
  1400. /* Commit writes to memory and advance the tail on the chip */
  1401. wmb();
  1402. ppd->sdma_descq_tail = tail;
  1403. qib_write_kreg_port(ppd, krp_senddmatail, tail);
  1404. }
  1405. /*
  1406. * This is called with interrupts disabled and sdma_lock held.
  1407. */
  1408. static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
  1409. {
  1410. /*
  1411. * Drain all FIFOs.
  1412. * The hardware doesn't require this but we do it so that verbs
  1413. * and user applications don't wait for link active to send stale
  1414. * data.
  1415. */
  1416. sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
  1417. qib_sdma_7322_setlengen(ppd);
  1418. qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
  1419. ppd->sdma_head_dma[0] = 0;
  1420. qib_7322_sdma_sendctrl(ppd,
  1421. ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
  1422. }
  1423. #define DISABLES_SDMA ( \
  1424. QIB_E_P_SDMAHALT | \
  1425. QIB_E_P_SDMADESCADDRMISALIGN | \
  1426. QIB_E_P_SDMAMISSINGDW | \
  1427. QIB_E_P_SDMADWEN | \
  1428. QIB_E_P_SDMARPYTAG | \
  1429. QIB_E_P_SDMA1STDESC | \
  1430. QIB_E_P_SDMABASE | \
  1431. QIB_E_P_SDMATAILOUTOFBOUND | \
  1432. QIB_E_P_SDMAOUTOFBOUND | \
  1433. QIB_E_P_SDMAGENMISMATCH)
  1434. static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
  1435. {
  1436. unsigned long flags;
  1437. struct qib_devdata *dd = ppd->dd;
  1438. errs &= QIB_E_P_SDMAERRS;
  1439. err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
  1440. errs, qib_7322p_error_msgs);
  1441. if (errs & QIB_E_P_SDMAUNEXPDATA)
  1442. qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
  1443. ppd->port);
  1444. spin_lock_irqsave(&ppd->sdma_lock, flags);
  1445. if (errs != QIB_E_P_SDMAHALT) {
  1446. /* SDMA errors have QIB_E_P_SDMAHALT and another bit set */
  1447. qib_dev_porterr(dd, ppd->port,
  1448. "SDMA %s 0x%016llx %s\n",
  1449. qib_sdma_state_names[ppd->sdma_state.current_state],
  1450. errs, ppd->cpspec->sdmamsgbuf);
  1451. dump_sdma_7322_state(ppd);
  1452. }
  1453. switch (ppd->sdma_state.current_state) {
  1454. case qib_sdma_state_s00_hw_down:
  1455. break;
  1456. case qib_sdma_state_s10_hw_start_up_wait:
  1457. if (errs & QIB_E_P_SDMAHALT)
  1458. __qib_sdma_process_event(ppd,
  1459. qib_sdma_event_e20_hw_started);
  1460. break;
  1461. case qib_sdma_state_s20_idle:
  1462. break;
  1463. case qib_sdma_state_s30_sw_clean_up_wait:
  1464. break;
  1465. case qib_sdma_state_s40_hw_clean_up_wait:
  1466. if (errs & QIB_E_P_SDMAHALT)
  1467. __qib_sdma_process_event(ppd,
  1468. qib_sdma_event_e50_hw_cleaned);
  1469. break;
  1470. case qib_sdma_state_s50_hw_halt_wait:
  1471. if (errs & QIB_E_P_SDMAHALT)
  1472. __qib_sdma_process_event(ppd,
  1473. qib_sdma_event_e60_hw_halted);
  1474. break;
  1475. case qib_sdma_state_s99_running:
  1476. __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
  1477. __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
  1478. break;
  1479. }
  1480. spin_unlock_irqrestore(&ppd->sdma_lock, flags);
  1481. }
  1482. /*
  1483. * handle per-device errors (not per-port errors)
  1484. */
  1485. static noinline void handle_7322_errors(struct qib_devdata *dd)
  1486. {
  1487. char *msg;
  1488. u64 iserr = 0;
  1489. u64 errs;
  1490. u64 mask;
  1491. int log_idx;
  1492. qib_stats.sps_errints++;
  1493. errs = qib_read_kreg64(dd, kr_errstatus);
  1494. if (!errs) {
  1495. qib_devinfo(dd->pcidev,
  1496. "device error interrupt, but no error bits set!\n");
  1497. goto done;
  1498. }
  1499. /* don't report errors that are masked */
  1500. errs &= dd->cspec->errormask;
  1501. msg = dd->cspec->emsgbuf;
  1502. /* do these first, they are most important */
  1503. if (errs & QIB_E_HARDWARE) {
  1504. *msg = '\0';
  1505. qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
  1506. } else
  1507. for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
  1508. if (errs & dd->eep_st_masks[log_idx].errs_to_log)
  1509. qib_inc_eeprom_err(dd, log_idx, 1);
  1510. if (errs & QIB_E_SPKTERRS) {
  1511. qib_disarm_7322_senderrbufs(dd->pport);
  1512. qib_stats.sps_txerrs++;
  1513. } else if (errs & QIB_E_INVALIDADDR)
  1514. qib_stats.sps_txerrs++;
  1515. else if (errs & QIB_E_ARMLAUNCH) {
  1516. qib_stats.sps_txerrs++;
  1517. qib_disarm_7322_senderrbufs(dd->pport);
  1518. }
  1519. qib_write_kreg(dd, kr_errclear, errs);
  1520. /*
  1521. * The ones we mask off are handled specially below
  1522. * or above. Also mask SDMADISABLED by default as it
  1523. * is too chatty.
  1524. */
  1525. mask = QIB_E_HARDWARE;
  1526. *msg = '\0';
  1527. err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
  1528. qib_7322error_msgs);
  1529. /*
  1530. * Getting reset is a tragedy for all ports. Mark the device
  1531. * _and_ the ports as "offline" in way meaningful to each.
  1532. */
  1533. if (errs & QIB_E_RESET) {
  1534. int pidx;
  1535. qib_dev_err(dd,
  1536. "Got reset, requires re-init (unload and reload driver)\n");
  1537. dd->flags &= ~QIB_INITTED; /* needs re-init */
  1538. /* mark as having had error */
  1539. *dd->devstatusp |= QIB_STATUS_HWERROR;
  1540. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  1541. if (dd->pport[pidx].link_speed_supported)
  1542. *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
  1543. }
  1544. if (*msg && iserr)
  1545. qib_dev_err(dd, "%s error\n", msg);
  1546. /*
  1547. * If there were hdrq or egrfull errors, wake up any processes
  1548. * waiting in poll. We used to try to check which contexts had
  1549. * the overflow, but given the cost of that and the chip reads
  1550. * to support it, it's better to just wake everybody up if we
  1551. * get an overflow; waiters can poll again if it's not them.
  1552. */
  1553. if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
  1554. qib_handle_urcv(dd, ~0U);
  1555. if (errs & ERR_MASK(RcvEgrFullErr))
  1556. qib_stats.sps_buffull++;
  1557. else
  1558. qib_stats.sps_hdrfull++;
  1559. }
  1560. done:
  1561. return;
  1562. }
  1563. static void qib_error_tasklet(unsigned long data)
  1564. {
  1565. struct qib_devdata *dd = (struct qib_devdata *)data;
  1566. handle_7322_errors(dd);
  1567. qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
  1568. }
  1569. static void reenable_chase(unsigned long opaque)
  1570. {
  1571. struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
  1572. ppd->cpspec->chase_timer.expires = 0;
  1573. qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
  1574. QLOGIC_IB_IBCC_LINKINITCMD_POLL);
  1575. }
  1576. static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
  1577. u8 ibclt)
  1578. {
  1579. ppd->cpspec->chase_end = 0;
  1580. if (!qib_chase)
  1581. return;
  1582. qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
  1583. QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  1584. ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
  1585. add_timer(&ppd->cpspec->chase_timer);
  1586. }
  1587. static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
  1588. {
  1589. u8 ibclt;
  1590. unsigned long tnow;
  1591. ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
  1592. /*
  1593. * Detect and handle the state chase issue, where we can
  1594. * get stuck if we are unlucky on timing on both sides of
  1595. * the link. If we are, we disable, set a timer, and
  1596. * then re-enable.
  1597. */
  1598. switch (ibclt) {
  1599. case IB_7322_LT_STATE_CFGRCVFCFG:
  1600. case IB_7322_LT_STATE_CFGWAITRMT:
  1601. case IB_7322_LT_STATE_TXREVLANES:
  1602. case IB_7322_LT_STATE_CFGENH:
  1603. tnow = jiffies;
  1604. if (ppd->cpspec->chase_end &&
  1605. time_after(tnow, ppd->cpspec->chase_end))
  1606. disable_chase(ppd, tnow, ibclt);
  1607. else if (!ppd->cpspec->chase_end)
  1608. ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
  1609. break;
  1610. default:
  1611. ppd->cpspec->chase_end = 0;
  1612. break;
  1613. }
  1614. if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
  1615. ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
  1616. ibclt == IB_7322_LT_STATE_LINKUP) &&
  1617. (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
  1618. force_h1(ppd);
  1619. ppd->cpspec->qdr_reforce = 1;
  1620. if (!ppd->dd->cspec->r1)
  1621. serdes_7322_los_enable(ppd, 0);
  1622. } else if (ppd->cpspec->qdr_reforce &&
  1623. (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
  1624. (ibclt == IB_7322_LT_STATE_CFGENH ||
  1625. ibclt == IB_7322_LT_STATE_CFGIDLE ||
  1626. ibclt == IB_7322_LT_STATE_LINKUP))
  1627. force_h1(ppd);
  1628. if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
  1629. ppd->link_speed_enabled == QIB_IB_QDR &&
  1630. (ibclt == IB_7322_LT_STATE_CFGTEST ||
  1631. ibclt == IB_7322_LT_STATE_CFGENH ||
  1632. (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
  1633. ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
  1634. adj_tx_serdes(ppd);
  1635. if (ibclt != IB_7322_LT_STATE_LINKUP) {
  1636. u8 ltstate = qib_7322_phys_portstate(ibcst);
  1637. u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
  1638. LinkTrainingState);
  1639. if (!ppd->dd->cspec->r1 &&
  1640. pibclt == IB_7322_LT_STATE_LINKUP &&
  1641. ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
  1642. ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
  1643. ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
  1644. ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
  1645. /* If the link went down (but no into recovery,
  1646. * turn LOS back on */
  1647. serdes_7322_los_enable(ppd, 1);
  1648. if (!ppd->cpspec->qdr_dfe_on &&
  1649. ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
  1650. ppd->cpspec->qdr_dfe_on = 1;
  1651. ppd->cpspec->qdr_dfe_time = 0;
  1652. /* On link down, reenable QDR adaptation */
  1653. qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
  1654. ppd->dd->cspec->r1 ?
  1655. QDR_STATIC_ADAPT_DOWN_R1 :
  1656. QDR_STATIC_ADAPT_DOWN);
  1657. pr_info(
  1658. "IB%u:%u re-enabled QDR adaptation ibclt %x\n",
  1659. ppd->dd->unit, ppd->port, ibclt);
  1660. }
  1661. }
  1662. }
  1663. static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
  1664. /*
  1665. * This is per-pport error handling.
  1666. * will likely get it's own MSIx interrupt (one for each port,
  1667. * although just a single handler).
  1668. */
  1669. static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
  1670. {
  1671. char *msg;
  1672. u64 ignore_this_time = 0, iserr = 0, errs, fmask;
  1673. struct qib_devdata *dd = ppd->dd;
  1674. /* do this as soon as possible */
  1675. fmask = qib_read_kreg64(dd, kr_act_fmask);
  1676. if (!fmask)
  1677. check_7322_rxe_status(ppd);
  1678. errs = qib_read_kreg_port(ppd, krp_errstatus);
  1679. if (!errs)
  1680. qib_devinfo(dd->pcidev,
  1681. "Port%d error interrupt, but no error bits set!\n",
  1682. ppd->port);
  1683. if (!fmask)
  1684. errs &= ~QIB_E_P_IBSTATUSCHANGED;
  1685. if (!errs)
  1686. goto done;
  1687. msg = ppd->cpspec->epmsgbuf;
  1688. *msg = '\0';
  1689. if (errs & ~QIB_E_P_BITSEXTANT) {
  1690. err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
  1691. errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
  1692. if (!*msg)
  1693. snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
  1694. "no others");
  1695. qib_dev_porterr(dd, ppd->port,
  1696. "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
  1697. (errs & ~QIB_E_P_BITSEXTANT), msg);
  1698. *msg = '\0';
  1699. }
  1700. if (errs & QIB_E_P_SHDR) {
  1701. u64 symptom;
  1702. /* determine cause, then write to clear */
  1703. symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
  1704. qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
  1705. err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
  1706. hdrchk_msgs);
  1707. *msg = '\0';
  1708. /* senderrbuf cleared in SPKTERRS below */
  1709. }
  1710. if (errs & QIB_E_P_SPKTERRS) {
  1711. if ((errs & QIB_E_P_LINK_PKTERRS) &&
  1712. !(ppd->lflags & QIBL_LINKACTIVE)) {
  1713. /*
  1714. * This can happen when trying to bring the link
  1715. * up, but the IB link changes state at the "wrong"
  1716. * time. The IB logic then complains that the packet
  1717. * isn't valid. We don't want to confuse people, so
  1718. * we just don't print them, except at debug
  1719. */
  1720. err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
  1721. (errs & QIB_E_P_LINK_PKTERRS),
  1722. qib_7322p_error_msgs);
  1723. *msg = '\0';
  1724. ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
  1725. }
  1726. qib_disarm_7322_senderrbufs(ppd);
  1727. } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
  1728. !(ppd->lflags & QIBL_LINKACTIVE)) {
  1729. /*
  1730. * This can happen when SMA is trying to bring the link
  1731. * up, but the IB link changes state at the "wrong" time.
  1732. * The IB logic then complains that the packet isn't
  1733. * valid. We don't want to confuse people, so we just
  1734. * don't print them, except at debug
  1735. */
  1736. err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
  1737. qib_7322p_error_msgs);
  1738. ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
  1739. *msg = '\0';
  1740. }
  1741. qib_write_kreg_port(ppd, krp_errclear, errs);
  1742. errs &= ~ignore_this_time;
  1743. if (!errs)
  1744. goto done;
  1745. if (errs & QIB_E_P_RPKTERRS)
  1746. qib_stats.sps_rcverrs++;
  1747. if (errs & QIB_E_P_SPKTERRS)
  1748. qib_stats.sps_txerrs++;
  1749. iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
  1750. if (errs & QIB_E_P_SDMAERRS)
  1751. sdma_7322_p_errors(ppd, errs);
  1752. if (errs & QIB_E_P_IBSTATUSCHANGED) {
  1753. u64 ibcs;
  1754. u8 ltstate;
  1755. ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
  1756. ltstate = qib_7322_phys_portstate(ibcs);
  1757. if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
  1758. handle_serdes_issues(ppd, ibcs);
  1759. if (!(ppd->cpspec->ibcctrl_a &
  1760. SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
  1761. /*
  1762. * We got our interrupt, so init code should be
  1763. * happy and not try alternatives. Now squelch
  1764. * other "chatter" from link-negotiation (pre Init)
  1765. */
  1766. ppd->cpspec->ibcctrl_a |=
  1767. SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
  1768. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  1769. ppd->cpspec->ibcctrl_a);
  1770. }
  1771. /* Update our picture of width and speed from chip */
  1772. ppd->link_width_active =
  1773. (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
  1774. IB_WIDTH_4X : IB_WIDTH_1X;
  1775. ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
  1776. LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
  1777. SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
  1778. QIB_IB_DDR : QIB_IB_SDR;
  1779. if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
  1780. IB_PHYSPORTSTATE_DISABLED)
  1781. qib_set_ib_7322_lstate(ppd, 0,
  1782. QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  1783. else
  1784. /*
  1785. * Since going into a recovery state causes the link
  1786. * state to go down and since recovery is transitory,
  1787. * it is better if we "miss" ever seeing the link
  1788. * training state go into recovery (i.e., ignore this
  1789. * transition for link state special handling purposes)
  1790. * without updating lastibcstat.
  1791. */
  1792. if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
  1793. ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
  1794. ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
  1795. ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
  1796. qib_handle_e_ibstatuschanged(ppd, ibcs);
  1797. }
  1798. if (*msg && iserr)
  1799. qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
  1800. if (ppd->state_wanted & ppd->lflags)
  1801. wake_up_interruptible(&ppd->state_wait);
  1802. done:
  1803. return;
  1804. }
  1805. /* enable/disable chip from delivering interrupts */
  1806. static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
  1807. {
  1808. if (enable) {
  1809. if (dd->flags & QIB_BADINTR)
  1810. return;
  1811. qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
  1812. /* cause any pending enabled interrupts to be re-delivered */
  1813. qib_write_kreg(dd, kr_intclear, 0ULL);
  1814. if (dd->cspec->num_msix_entries) {
  1815. /* and same for MSIx */
  1816. u64 val = qib_read_kreg64(dd, kr_intgranted);
  1817. if (val)
  1818. qib_write_kreg(dd, kr_intgranted, val);
  1819. }
  1820. } else
  1821. qib_write_kreg(dd, kr_intmask, 0ULL);
  1822. }
  1823. /*
  1824. * Try to cleanup as much as possible for anything that might have gone
  1825. * wrong while in freeze mode, such as pio buffers being written by user
  1826. * processes (causing armlaunch), send errors due to going into freeze mode,
  1827. * etc., and try to avoid causing extra interrupts while doing so.
  1828. * Forcibly update the in-memory pioavail register copies after cleanup
  1829. * because the chip won't do it while in freeze mode (the register values
  1830. * themselves are kept correct).
  1831. * Make sure that we don't lose any important interrupts by using the chip
  1832. * feature that says that writing 0 to a bit in *clear that is set in
  1833. * *status will cause an interrupt to be generated again (if allowed by
  1834. * the *mask value).
  1835. * This is in chip-specific code because of all of the register accesses,
  1836. * even though the details are similar on most chips.
  1837. */
  1838. static void qib_7322_clear_freeze(struct qib_devdata *dd)
  1839. {
  1840. int pidx;
  1841. /* disable error interrupts, to avoid confusion */
  1842. qib_write_kreg(dd, kr_errmask, 0ULL);
  1843. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  1844. if (dd->pport[pidx].link_speed_supported)
  1845. qib_write_kreg_port(dd->pport + pidx, krp_errmask,
  1846. 0ULL);
  1847. /* also disable interrupts; errormask is sometimes overwriten */
  1848. qib_7322_set_intr_state(dd, 0);
  1849. /* clear the freeze, and be sure chip saw it */
  1850. qib_write_kreg(dd, kr_control, dd->control);
  1851. qib_read_kreg32(dd, kr_scratch);
  1852. /*
  1853. * Force new interrupt if any hwerr, error or interrupt bits are
  1854. * still set, and clear "safe" send packet errors related to freeze
  1855. * and cancelling sends. Re-enable error interrupts before possible
  1856. * force of re-interrupt on pending interrupts.
  1857. */
  1858. qib_write_kreg(dd, kr_hwerrclear, 0ULL);
  1859. qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
  1860. qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
  1861. /* We need to purge per-port errs and reset mask, too */
  1862. for (pidx = 0; pidx < dd->num_pports; ++pidx) {
  1863. if (!dd->pport[pidx].link_speed_supported)
  1864. continue;
  1865. qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
  1866. qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
  1867. }
  1868. qib_7322_set_intr_state(dd, 1);
  1869. }
  1870. /* no error handling to speak of */
  1871. /**
  1872. * qib_7322_handle_hwerrors - display hardware errors.
  1873. * @dd: the qlogic_ib device
  1874. * @msg: the output buffer
  1875. * @msgl: the size of the output buffer
  1876. *
  1877. * Use same msg buffer as regular errors to avoid excessive stack
  1878. * use. Most hardware errors are catastrophic, but for right now,
  1879. * we'll print them and continue. We reuse the same message buffer as
  1880. * qib_handle_errors() to avoid excessive stack usage.
  1881. */
  1882. static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
  1883. size_t msgl)
  1884. {
  1885. u64 hwerrs;
  1886. u32 ctrl;
  1887. int isfatal = 0;
  1888. hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
  1889. if (!hwerrs)
  1890. goto bail;
  1891. if (hwerrs == ~0ULL) {
  1892. qib_dev_err(dd,
  1893. "Read of hardware error status failed (all bits set); ignoring\n");
  1894. goto bail;
  1895. }
  1896. qib_stats.sps_hwerrs++;
  1897. /* Always clear the error status register, except BIST fail */
  1898. qib_write_kreg(dd, kr_hwerrclear, hwerrs &
  1899. ~HWE_MASK(PowerOnBISTFailed));
  1900. hwerrs &= dd->cspec->hwerrmask;
  1901. /* no EEPROM logging, yet */
  1902. if (hwerrs)
  1903. qib_devinfo(dd->pcidev,
  1904. "Hardware error: hwerr=0x%llx (cleared)\n",
  1905. (unsigned long long) hwerrs);
  1906. ctrl = qib_read_kreg32(dd, kr_control);
  1907. if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
  1908. /*
  1909. * No recovery yet...
  1910. */
  1911. if ((hwerrs & ~HWE_MASK(LATriggered)) ||
  1912. dd->cspec->stay_in_freeze) {
  1913. /*
  1914. * If any set that we aren't ignoring only make the
  1915. * complaint once, in case it's stuck or recurring,
  1916. * and we get here multiple times
  1917. * Force link down, so switch knows, and
  1918. * LEDs are turned off.
  1919. */
  1920. if (dd->flags & QIB_INITTED)
  1921. isfatal = 1;
  1922. } else
  1923. qib_7322_clear_freeze(dd);
  1924. }
  1925. if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
  1926. isfatal = 1;
  1927. strlcpy(msg,
  1928. "[Memory BIST test failed, InfiniPath hardware unusable]",
  1929. msgl);
  1930. /* ignore from now on, so disable until driver reloaded */
  1931. dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
  1932. qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
  1933. }
  1934. err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
  1935. /* Ignore esoteric PLL failures et al. */
  1936. qib_dev_err(dd, "%s hardware error\n", msg);
  1937. if (hwerrs &
  1938. (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
  1939. SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
  1940. int pidx = 0;
  1941. int err;
  1942. unsigned long flags;
  1943. struct qib_pportdata *ppd = dd->pport;
  1944. for (; pidx < dd->num_pports; ++pidx, ppd++) {
  1945. err = 0;
  1946. if (pidx == 0 && (hwerrs &
  1947. SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
  1948. err++;
  1949. if (pidx == 1 && (hwerrs &
  1950. SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
  1951. err++;
  1952. if (err) {
  1953. spin_lock_irqsave(&ppd->sdma_lock, flags);
  1954. dump_sdma_7322_state(ppd);
  1955. spin_unlock_irqrestore(&ppd->sdma_lock, flags);
  1956. }
  1957. }
  1958. }
  1959. if (isfatal && !dd->diag_client) {
  1960. qib_dev_err(dd,
  1961. "Fatal Hardware Error, no longer usable, SN %.16s\n",
  1962. dd->serial);
  1963. /*
  1964. * for /sys status file and user programs to print; if no
  1965. * trailing brace is copied, we'll know it was truncated.
  1966. */
  1967. if (dd->freezemsg)
  1968. snprintf(dd->freezemsg, dd->freezelen,
  1969. "{%s}", msg);
  1970. qib_disable_after_error(dd);
  1971. }
  1972. bail:;
  1973. }
  1974. /**
  1975. * qib_7322_init_hwerrors - enable hardware errors
  1976. * @dd: the qlogic_ib device
  1977. *
  1978. * now that we have finished initializing everything that might reasonably
  1979. * cause a hardware error, and cleared those errors bits as they occur,
  1980. * we can enable hardware errors in the mask (potentially enabling
  1981. * freeze mode), and enable hardware errors as errors (along with
  1982. * everything else) in errormask
  1983. */
  1984. static void qib_7322_init_hwerrors(struct qib_devdata *dd)
  1985. {
  1986. int pidx;
  1987. u64 extsval;
  1988. extsval = qib_read_kreg64(dd, kr_extstatus);
  1989. if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
  1990. QIB_EXTS_MEMBIST_ENDTEST)))
  1991. qib_dev_err(dd, "MemBIST did not complete!\n");
  1992. /* never clear BIST failure, so reported on each driver load */
  1993. qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
  1994. qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
  1995. /* clear all */
  1996. qib_write_kreg(dd, kr_errclear, ~0ULL);
  1997. /* enable errors that are masked, at least this first time. */
  1998. qib_write_kreg(dd, kr_errmask, ~0ULL);
  1999. dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
  2000. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  2001. if (dd->pport[pidx].link_speed_supported)
  2002. qib_write_kreg_port(dd->pport + pidx, krp_errmask,
  2003. ~0ULL);
  2004. }
  2005. /*
  2006. * Disable and enable the armlaunch error. Used for PIO bandwidth testing
  2007. * on chips that are count-based, rather than trigger-based. There is no
  2008. * reference counting, but that's also fine, given the intended use.
  2009. * Only chip-specific because it's all register accesses
  2010. */
  2011. static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
  2012. {
  2013. if (enable) {
  2014. qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
  2015. dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
  2016. } else
  2017. dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
  2018. qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
  2019. }
  2020. /*
  2021. * Formerly took parameter <which> in pre-shifted,
  2022. * pre-merged form with LinkCmd and LinkInitCmd
  2023. * together, and assuming the zero was NOP.
  2024. */
  2025. static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
  2026. u16 linitcmd)
  2027. {
  2028. u64 mod_wd;
  2029. struct qib_devdata *dd = ppd->dd;
  2030. unsigned long flags;
  2031. if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
  2032. /*
  2033. * If we are told to disable, note that so link-recovery
  2034. * code does not attempt to bring us back up.
  2035. * Also reset everything that we can, so we start
  2036. * completely clean when re-enabled (before we
  2037. * actually issue the disable to the IBC)
  2038. */
  2039. qib_7322_mini_pcs_reset(ppd);
  2040. spin_lock_irqsave(&ppd->lflags_lock, flags);
  2041. ppd->lflags |= QIBL_IB_LINK_DISABLED;
  2042. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  2043. } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
  2044. /*
  2045. * Any other linkinitcmd will lead to LINKDOWN and then
  2046. * to INIT (if all is well), so clear flag to let
  2047. * link-recovery code attempt to bring us back up.
  2048. */
  2049. spin_lock_irqsave(&ppd->lflags_lock, flags);
  2050. ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
  2051. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  2052. /*
  2053. * Clear status change interrupt reduction so the
  2054. * new state is seen.
  2055. */
  2056. ppd->cpspec->ibcctrl_a &=
  2057. ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
  2058. }
  2059. mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
  2060. (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
  2061. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
  2062. mod_wd);
  2063. /* write to chip to prevent back-to-back writes of ibc reg */
  2064. qib_write_kreg(dd, kr_scratch, 0);
  2065. }
  2066. /*
  2067. * The total RCV buffer memory is 64KB, used for both ports, and is
  2068. * in units of 64 bytes (same as IB flow control credit unit).
  2069. * The consumedVL unit in the same registers are in 32 byte units!
  2070. * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
  2071. * and we can therefore allocate just 9 IB credits for 2 VL15 packets
  2072. * in krp_rxcreditvl15, rather than 10.
  2073. */
  2074. #define RCV_BUF_UNITSZ 64
  2075. #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
  2076. static void set_vls(struct qib_pportdata *ppd)
  2077. {
  2078. int i, numvls, totcred, cred_vl, vl0extra;
  2079. struct qib_devdata *dd = ppd->dd;
  2080. u64 val;
  2081. numvls = qib_num_vls(ppd->vls_operational);
  2082. /*
  2083. * Set up per-VL credits. Below is kluge based on these assumptions:
  2084. * 1) port is disabled at the time early_init is called.
  2085. * 2) give VL15 17 credits, for two max-plausible packets.
  2086. * 3) Give VL0-N the rest, with any rounding excess used for VL0
  2087. */
  2088. /* 2 VL15 packets @ 288 bytes each (including IB headers) */
  2089. totcred = NUM_RCV_BUF_UNITS(dd);
  2090. cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
  2091. totcred -= cred_vl;
  2092. qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
  2093. cred_vl = totcred / numvls;
  2094. vl0extra = totcred - cred_vl * numvls;
  2095. qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
  2096. for (i = 1; i < numvls; i++)
  2097. qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
  2098. for (; i < 8; i++) /* no buffer space for other VLs */
  2099. qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
  2100. /* Notify IBC that credits need to be recalculated */
  2101. val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
  2102. val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
  2103. qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
  2104. qib_write_kreg(dd, kr_scratch, 0ULL);
  2105. val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
  2106. qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
  2107. for (i = 0; i < numvls; i++)
  2108. val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
  2109. val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
  2110. /* Change the number of operational VLs */
  2111. ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
  2112. ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
  2113. ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
  2114. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
  2115. qib_write_kreg(dd, kr_scratch, 0ULL);
  2116. }
  2117. /*
  2118. * The code that deals with actual SerDes is in serdes_7322_init().
  2119. * Compared to the code for iba7220, it is minimal.
  2120. */
  2121. static int serdes_7322_init(struct qib_pportdata *ppd);
  2122. /**
  2123. * qib_7322_bringup_serdes - bring up the serdes
  2124. * @ppd: physical port on the qlogic_ib device
  2125. */
  2126. static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
  2127. {
  2128. struct qib_devdata *dd = ppd->dd;
  2129. u64 val, guid, ibc;
  2130. unsigned long flags;
  2131. int ret = 0;
  2132. /*
  2133. * SerDes model not in Pd, but still need to
  2134. * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
  2135. * eventually.
  2136. */
  2137. /* Put IBC in reset, sends disabled (should be in reset already) */
  2138. ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
  2139. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
  2140. qib_write_kreg(dd, kr_scratch, 0ULL);
  2141. /* ensure previous Tx parameters are not still forced */
  2142. qib_write_kreg_port(ppd, krp_tx_deemph_override,
  2143. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  2144. reset_tx_deemphasis_override));
  2145. if (qib_compat_ddr_negotiate) {
  2146. ppd->cpspec->ibdeltainprog = 1;
  2147. ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
  2148. crp_ibsymbolerr);
  2149. ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
  2150. crp_iblinkerrrecov);
  2151. }
  2152. /* flowcontrolwatermark is in units of KBytes */
  2153. ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
  2154. /*
  2155. * Flow control is sent this often, even if no changes in
  2156. * buffer space occur. Units are 128ns for this chip.
  2157. * Set to 3usec.
  2158. */
  2159. ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
  2160. /* max error tolerance */
  2161. ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
  2162. /* IB credit flow control. */
  2163. ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
  2164. /*
  2165. * set initial max size pkt IBC will send, including ICRC; it's the
  2166. * PIO buffer size in dwords, less 1; also see qib_set_mtu()
  2167. */
  2168. ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
  2169. SYM_LSB(IBCCtrlA_0, MaxPktLen);
  2170. ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
  2171. /*
  2172. * Reset the PCS interface to the serdes (and also ibc, which is still
  2173. * in reset from above). Writes new value of ibcctrl_a as last step.
  2174. */
  2175. qib_7322_mini_pcs_reset(ppd);
  2176. if (!ppd->cpspec->ibcctrl_b) {
  2177. unsigned lse = ppd->link_speed_enabled;
  2178. /*
  2179. * Not on re-init after reset, establish shadow
  2180. * and force initial config.
  2181. */
  2182. ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
  2183. krp_ibcctrl_b);
  2184. ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
  2185. IBA7322_IBC_SPEED_DDR |
  2186. IBA7322_IBC_SPEED_SDR |
  2187. IBA7322_IBC_WIDTH_AUTONEG |
  2188. SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
  2189. if (lse & (lse - 1)) /* Muliple speeds enabled */
  2190. ppd->cpspec->ibcctrl_b |=
  2191. (lse << IBA7322_IBC_SPEED_LSB) |
  2192. IBA7322_IBC_IBTA_1_2_MASK |
  2193. IBA7322_IBC_MAX_SPEED_MASK;
  2194. else
  2195. ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
  2196. IBA7322_IBC_SPEED_QDR |
  2197. IBA7322_IBC_IBTA_1_2_MASK :
  2198. (lse == QIB_IB_DDR) ?
  2199. IBA7322_IBC_SPEED_DDR :
  2200. IBA7322_IBC_SPEED_SDR;
  2201. if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
  2202. (IB_WIDTH_1X | IB_WIDTH_4X))
  2203. ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
  2204. else
  2205. ppd->cpspec->ibcctrl_b |=
  2206. ppd->link_width_enabled == IB_WIDTH_4X ?
  2207. IBA7322_IBC_WIDTH_4X_ONLY :
  2208. IBA7322_IBC_WIDTH_1X_ONLY;
  2209. /* always enable these on driver reload, not sticky */
  2210. ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
  2211. IBA7322_IBC_HRTBT_MASK);
  2212. }
  2213. qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
  2214. /* setup so we have more time at CFGTEST to change H1 */
  2215. val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
  2216. val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
  2217. val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
  2218. qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
  2219. serdes_7322_init(ppd);
  2220. guid = be64_to_cpu(ppd->guid);
  2221. if (!guid) {
  2222. if (dd->base_guid)
  2223. guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
  2224. ppd->guid = cpu_to_be64(guid);
  2225. }
  2226. qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
  2227. /* write to chip to prevent back-to-back writes of ibc reg */
  2228. qib_write_kreg(dd, kr_scratch, 0);
  2229. /* Enable port */
  2230. ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
  2231. set_vls(ppd);
  2232. /* initially come up DISABLED, without sending anything. */
  2233. val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
  2234. QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
  2235. qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
  2236. qib_write_kreg(dd, kr_scratch, 0ULL);
  2237. /* clear the linkinit cmds */
  2238. ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
  2239. /* be paranoid against later code motion, etc. */
  2240. spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
  2241. ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
  2242. qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
  2243. spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
  2244. /* Also enable IBSTATUSCHG interrupt. */
  2245. val = qib_read_kreg_port(ppd, krp_errmask);
  2246. qib_write_kreg_port(ppd, krp_errmask,
  2247. val | ERR_MASK_N(IBStatusChanged));
  2248. /* Always zero until we start messing with SerDes for real */
  2249. return ret;
  2250. }
  2251. /**
  2252. * qib_7322_quiet_serdes - set serdes to txidle
  2253. * @dd: the qlogic_ib device
  2254. * Called when driver is being unloaded
  2255. */
  2256. static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
  2257. {
  2258. u64 val;
  2259. unsigned long flags;
  2260. qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  2261. spin_lock_irqsave(&ppd->lflags_lock, flags);
  2262. ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
  2263. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  2264. wake_up(&ppd->cpspec->autoneg_wait);
  2265. cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
  2266. if (ppd->dd->cspec->r1)
  2267. cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
  2268. ppd->cpspec->chase_end = 0;
  2269. if (ppd->cpspec->chase_timer.data) /* if initted */
  2270. del_timer_sync(&ppd->cpspec->chase_timer);
  2271. /*
  2272. * Despite the name, actually disables IBC as well. Do it when
  2273. * we are as sure as possible that no more packets can be
  2274. * received, following the down and the PCS reset.
  2275. * The actual disabling happens in qib_7322_mini_pci_reset(),
  2276. * along with the PCS being reset.
  2277. */
  2278. ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
  2279. qib_7322_mini_pcs_reset(ppd);
  2280. /*
  2281. * Update the adjusted counters so the adjustment persists
  2282. * across driver reload.
  2283. */
  2284. if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
  2285. ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
  2286. struct qib_devdata *dd = ppd->dd;
  2287. u64 diagc;
  2288. /* enable counter writes */
  2289. diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
  2290. qib_write_kreg(dd, kr_hwdiagctrl,
  2291. diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
  2292. if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
  2293. val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
  2294. if (ppd->cpspec->ibdeltainprog)
  2295. val -= val - ppd->cpspec->ibsymsnap;
  2296. val -= ppd->cpspec->ibsymdelta;
  2297. write_7322_creg_port(ppd, crp_ibsymbolerr, val);
  2298. }
  2299. if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
  2300. val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
  2301. if (ppd->cpspec->ibdeltainprog)
  2302. val -= val - ppd->cpspec->iblnkerrsnap;
  2303. val -= ppd->cpspec->iblnkerrdelta;
  2304. write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
  2305. }
  2306. if (ppd->cpspec->iblnkdowndelta) {
  2307. val = read_7322_creg32_port(ppd, crp_iblinkdown);
  2308. val += ppd->cpspec->iblnkdowndelta;
  2309. write_7322_creg_port(ppd, crp_iblinkdown, val);
  2310. }
  2311. /*
  2312. * No need to save ibmalfdelta since IB perfcounters
  2313. * are cleared on driver reload.
  2314. */
  2315. /* and disable counter writes */
  2316. qib_write_kreg(dd, kr_hwdiagctrl, diagc);
  2317. }
  2318. }
  2319. /**
  2320. * qib_setup_7322_setextled - set the state of the two external LEDs
  2321. * @ppd: physical port on the qlogic_ib device
  2322. * @on: whether the link is up or not
  2323. *
  2324. * The exact combo of LEDs if on is true is determined by looking
  2325. * at the ibcstatus.
  2326. *
  2327. * These LEDs indicate the physical and logical state of IB link.
  2328. * For this chip (at least with recommended board pinouts), LED1
  2329. * is Yellow (logical state) and LED2 is Green (physical state),
  2330. *
  2331. * Note: We try to match the Mellanox HCA LED behavior as best
  2332. * we can. Green indicates physical link state is OK (something is
  2333. * plugged in, and we can train).
  2334. * Amber indicates the link is logically up (ACTIVE).
  2335. * Mellanox further blinks the amber LED to indicate data packet
  2336. * activity, but we have no hardware support for that, so it would
  2337. * require waking up every 10-20 msecs and checking the counters
  2338. * on the chip, and then turning the LED off if appropriate. That's
  2339. * visible overhead, so not something we will do.
  2340. */
  2341. static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
  2342. {
  2343. struct qib_devdata *dd = ppd->dd;
  2344. u64 extctl, ledblink = 0, val;
  2345. unsigned long flags;
  2346. int yel, grn;
  2347. /*
  2348. * The diags use the LED to indicate diag info, so we leave
  2349. * the external LED alone when the diags are running.
  2350. */
  2351. if (dd->diag_client)
  2352. return;
  2353. /* Allow override of LED display for, e.g. Locating system in rack */
  2354. if (ppd->led_override) {
  2355. grn = (ppd->led_override & QIB_LED_PHYS);
  2356. yel = (ppd->led_override & QIB_LED_LOG);
  2357. } else if (on) {
  2358. val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
  2359. grn = qib_7322_phys_portstate(val) ==
  2360. IB_PHYSPORTSTATE_LINKUP;
  2361. yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
  2362. } else {
  2363. grn = 0;
  2364. yel = 0;
  2365. }
  2366. spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
  2367. extctl = dd->cspec->extctrl & (ppd->port == 1 ?
  2368. ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
  2369. if (grn) {
  2370. extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
  2371. /*
  2372. * Counts are in chip clock (4ns) periods.
  2373. * This is 1/16 sec (66.6ms) on,
  2374. * 3/16 sec (187.5 ms) off, with packets rcvd.
  2375. */
  2376. ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
  2377. ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
  2378. }
  2379. if (yel)
  2380. extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
  2381. dd->cspec->extctrl = extctl;
  2382. qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
  2383. spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
  2384. if (ledblink) /* blink the LED on packet receive */
  2385. qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
  2386. }
  2387. #ifdef CONFIG_INFINIBAND_QIB_DCA
  2388. static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
  2389. {
  2390. switch (event) {
  2391. case DCA_PROVIDER_ADD:
  2392. if (dd->flags & QIB_DCA_ENABLED)
  2393. break;
  2394. if (!dca_add_requester(&dd->pcidev->dev)) {
  2395. qib_devinfo(dd->pcidev, "DCA enabled\n");
  2396. dd->flags |= QIB_DCA_ENABLED;
  2397. qib_setup_dca(dd);
  2398. }
  2399. break;
  2400. case DCA_PROVIDER_REMOVE:
  2401. if (dd->flags & QIB_DCA_ENABLED) {
  2402. dca_remove_requester(&dd->pcidev->dev);
  2403. dd->flags &= ~QIB_DCA_ENABLED;
  2404. dd->cspec->dca_ctrl = 0;
  2405. qib_write_kreg(dd, KREG_IDX(DCACtrlA),
  2406. dd->cspec->dca_ctrl);
  2407. }
  2408. break;
  2409. }
  2410. return 0;
  2411. }
  2412. static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
  2413. {
  2414. struct qib_devdata *dd = rcd->dd;
  2415. struct qib_chip_specific *cspec = dd->cspec;
  2416. if (!(dd->flags & QIB_DCA_ENABLED))
  2417. return;
  2418. if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
  2419. const struct dca_reg_map *rmp;
  2420. cspec->rhdr_cpu[rcd->ctxt] = cpu;
  2421. rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
  2422. cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
  2423. cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
  2424. (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
  2425. qib_devinfo(dd->pcidev,
  2426. "Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
  2427. (long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
  2428. qib_write_kreg(dd, rmp->regno,
  2429. cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
  2430. cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
  2431. qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
  2432. }
  2433. }
  2434. static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
  2435. {
  2436. struct qib_devdata *dd = ppd->dd;
  2437. struct qib_chip_specific *cspec = dd->cspec;
  2438. unsigned pidx = ppd->port - 1;
  2439. if (!(dd->flags & QIB_DCA_ENABLED))
  2440. return;
  2441. if (cspec->sdma_cpu[pidx] != cpu) {
  2442. cspec->sdma_cpu[pidx] = cpu;
  2443. cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
  2444. SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
  2445. SYM_MASK(DCACtrlF, SendDma0DCAOPH));
  2446. cspec->dca_rcvhdr_ctrl[4] |=
  2447. (u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
  2448. (ppd->hw_pidx ?
  2449. SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
  2450. SYM_LSB(DCACtrlF, SendDma0DCAOPH));
  2451. qib_devinfo(dd->pcidev,
  2452. "sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
  2453. (long long) cspec->dca_rcvhdr_ctrl[4]);
  2454. qib_write_kreg(dd, KREG_IDX(DCACtrlF),
  2455. cspec->dca_rcvhdr_ctrl[4]);
  2456. cspec->dca_ctrl |= ppd->hw_pidx ?
  2457. SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
  2458. SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
  2459. qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
  2460. }
  2461. }
  2462. static void qib_setup_dca(struct qib_devdata *dd)
  2463. {
  2464. struct qib_chip_specific *cspec = dd->cspec;
  2465. int i;
  2466. for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
  2467. cspec->rhdr_cpu[i] = -1;
  2468. for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
  2469. cspec->sdma_cpu[i] = -1;
  2470. cspec->dca_rcvhdr_ctrl[0] =
  2471. (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
  2472. (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
  2473. (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
  2474. (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
  2475. cspec->dca_rcvhdr_ctrl[1] =
  2476. (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
  2477. (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
  2478. (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
  2479. (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
  2480. cspec->dca_rcvhdr_ctrl[2] =
  2481. (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
  2482. (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
  2483. (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
  2484. (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
  2485. cspec->dca_rcvhdr_ctrl[3] =
  2486. (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
  2487. (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
  2488. (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
  2489. (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
  2490. cspec->dca_rcvhdr_ctrl[4] =
  2491. (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
  2492. (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
  2493. for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
  2494. qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
  2495. cspec->dca_rcvhdr_ctrl[i]);
  2496. for (i = 0; i < cspec->num_msix_entries; i++)
  2497. setup_dca_notifier(dd, &cspec->msix_entries[i]);
  2498. }
  2499. static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
  2500. const cpumask_t *mask)
  2501. {
  2502. struct qib_irq_notify *n =
  2503. container_of(notify, struct qib_irq_notify, notify);
  2504. int cpu = cpumask_first(mask);
  2505. if (n->rcv) {
  2506. struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
  2507. qib_update_rhdrq_dca(rcd, cpu);
  2508. } else {
  2509. struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
  2510. qib_update_sdma_dca(ppd, cpu);
  2511. }
  2512. }
  2513. static void qib_irq_notifier_release(struct kref *ref)
  2514. {
  2515. struct qib_irq_notify *n =
  2516. container_of(ref, struct qib_irq_notify, notify.kref);
  2517. struct qib_devdata *dd;
  2518. if (n->rcv) {
  2519. struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
  2520. dd = rcd->dd;
  2521. } else {
  2522. struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
  2523. dd = ppd->dd;
  2524. }
  2525. qib_devinfo(dd->pcidev,
  2526. "release on HCA notify 0x%p n 0x%p\n", ref, n);
  2527. kfree(n);
  2528. }
  2529. #endif
  2530. /*
  2531. * Disable MSIx interrupt if enabled, call generic MSIx code
  2532. * to cleanup, and clear pending MSIx interrupts.
  2533. * Used for fallback to INTx, after reset, and when MSIx setup fails.
  2534. */
  2535. static void qib_7322_nomsix(struct qib_devdata *dd)
  2536. {
  2537. u64 intgranted;
  2538. int n;
  2539. dd->cspec->main_int_mask = ~0ULL;
  2540. n = dd->cspec->num_msix_entries;
  2541. if (n) {
  2542. int i;
  2543. dd->cspec->num_msix_entries = 0;
  2544. for (i = 0; i < n; i++) {
  2545. #ifdef CONFIG_INFINIBAND_QIB_DCA
  2546. reset_dca_notifier(dd, &dd->cspec->msix_entries[i]);
  2547. #endif
  2548. irq_set_affinity_hint(
  2549. dd->cspec->msix_entries[i].msix.vector, NULL);
  2550. free_cpumask_var(dd->cspec->msix_entries[i].mask);
  2551. free_irq(dd->cspec->msix_entries[i].msix.vector,
  2552. dd->cspec->msix_entries[i].arg);
  2553. }
  2554. qib_nomsix(dd);
  2555. }
  2556. /* make sure no MSIx interrupts are left pending */
  2557. intgranted = qib_read_kreg64(dd, kr_intgranted);
  2558. if (intgranted)
  2559. qib_write_kreg(dd, kr_intgranted, intgranted);
  2560. }
  2561. static void qib_7322_free_irq(struct qib_devdata *dd)
  2562. {
  2563. if (dd->cspec->irq) {
  2564. free_irq(dd->cspec->irq, dd);
  2565. dd->cspec->irq = 0;
  2566. }
  2567. qib_7322_nomsix(dd);
  2568. }
  2569. static void qib_setup_7322_cleanup(struct qib_devdata *dd)
  2570. {
  2571. int i;
  2572. #ifdef CONFIG_INFINIBAND_QIB_DCA
  2573. if (dd->flags & QIB_DCA_ENABLED) {
  2574. dca_remove_requester(&dd->pcidev->dev);
  2575. dd->flags &= ~QIB_DCA_ENABLED;
  2576. dd->cspec->dca_ctrl = 0;
  2577. qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
  2578. }
  2579. #endif
  2580. qib_7322_free_irq(dd);
  2581. kfree(dd->cspec->cntrs);
  2582. kfree(dd->cspec->sendchkenable);
  2583. kfree(dd->cspec->sendgrhchk);
  2584. kfree(dd->cspec->sendibchk);
  2585. kfree(dd->cspec->msix_entries);
  2586. for (i = 0; i < dd->num_pports; i++) {
  2587. unsigned long flags;
  2588. u32 mask = QSFP_GPIO_MOD_PRS_N |
  2589. (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
  2590. kfree(dd->pport[i].cpspec->portcntrs);
  2591. if (dd->flags & QIB_HAS_QSFP) {
  2592. spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
  2593. dd->cspec->gpio_mask &= ~mask;
  2594. qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
  2595. spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
  2596. qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
  2597. }
  2598. if (dd->pport[i].ibport_data.smi_ah)
  2599. ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
  2600. }
  2601. }
  2602. /* handle SDMA interrupts */
  2603. static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
  2604. {
  2605. struct qib_pportdata *ppd0 = &dd->pport[0];
  2606. struct qib_pportdata *ppd1 = &dd->pport[1];
  2607. u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
  2608. INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
  2609. u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
  2610. INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
  2611. if (intr0)
  2612. qib_sdma_intr(ppd0);
  2613. if (intr1)
  2614. qib_sdma_intr(ppd1);
  2615. if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
  2616. qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
  2617. if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
  2618. qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
  2619. }
  2620. /*
  2621. * Set or clear the Send buffer available interrupt enable bit.
  2622. */
  2623. static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
  2624. {
  2625. unsigned long flags;
  2626. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  2627. if (needint)
  2628. dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
  2629. else
  2630. dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
  2631. qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
  2632. qib_write_kreg(dd, kr_scratch, 0ULL);
  2633. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  2634. }
  2635. /*
  2636. * Somehow got an interrupt with reserved bits set in interrupt status.
  2637. * Print a message so we know it happened, then clear them.
  2638. * keep mainline interrupt handler cache-friendly
  2639. */
  2640. static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
  2641. {
  2642. u64 kills;
  2643. char msg[128];
  2644. kills = istat & ~QIB_I_BITSEXTANT;
  2645. qib_dev_err(dd,
  2646. "Clearing reserved interrupt(s) 0x%016llx: %s\n",
  2647. (unsigned long long) kills, msg);
  2648. qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
  2649. }
  2650. /* keep mainline interrupt handler cache-friendly */
  2651. static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
  2652. {
  2653. u32 gpiostatus;
  2654. int handled = 0;
  2655. int pidx;
  2656. /*
  2657. * Boards for this chip currently don't use GPIO interrupts,
  2658. * so clear by writing GPIOstatus to GPIOclear, and complain
  2659. * to developer. To avoid endless repeats, clear
  2660. * the bits in the mask, since there is some kind of
  2661. * programming error or chip problem.
  2662. */
  2663. gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
  2664. /*
  2665. * In theory, writing GPIOstatus to GPIOclear could
  2666. * have a bad side-effect on some diagnostic that wanted
  2667. * to poll for a status-change, but the various shadows
  2668. * make that problematic at best. Diags will just suppress
  2669. * all GPIO interrupts during such tests.
  2670. */
  2671. qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
  2672. /*
  2673. * Check for QSFP MOD_PRS changes
  2674. * only works for single port if IB1 != pidx1
  2675. */
  2676. for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
  2677. ++pidx) {
  2678. struct qib_pportdata *ppd;
  2679. struct qib_qsfp_data *qd;
  2680. u32 mask;
  2681. if (!dd->pport[pidx].link_speed_supported)
  2682. continue;
  2683. mask = QSFP_GPIO_MOD_PRS_N;
  2684. ppd = dd->pport + pidx;
  2685. mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
  2686. if (gpiostatus & dd->cspec->gpio_mask & mask) {
  2687. u64 pins;
  2688. qd = &ppd->cpspec->qsfp_data;
  2689. gpiostatus &= ~mask;
  2690. pins = qib_read_kreg64(dd, kr_extstatus);
  2691. pins >>= SYM_LSB(EXTStatus, GPIOIn);
  2692. if (!(pins & mask)) {
  2693. ++handled;
  2694. qd->t_insert = jiffies;
  2695. queue_work(ib_wq, &qd->work);
  2696. }
  2697. }
  2698. }
  2699. if (gpiostatus && !handled) {
  2700. const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
  2701. u32 gpio_irq = mask & gpiostatus;
  2702. /*
  2703. * Clear any troublemakers, and update chip from shadow
  2704. */
  2705. dd->cspec->gpio_mask &= ~gpio_irq;
  2706. qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
  2707. }
  2708. }
  2709. /*
  2710. * Handle errors and unusual events first, separate function
  2711. * to improve cache hits for fast path interrupt handling.
  2712. */
  2713. static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
  2714. {
  2715. if (istat & ~QIB_I_BITSEXTANT)
  2716. unknown_7322_ibits(dd, istat);
  2717. if (istat & QIB_I_GPIO)
  2718. unknown_7322_gpio_intr(dd);
  2719. if (istat & QIB_I_C_ERROR) {
  2720. qib_write_kreg(dd, kr_errmask, 0ULL);
  2721. tasklet_schedule(&dd->error_tasklet);
  2722. }
  2723. if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
  2724. handle_7322_p_errors(dd->rcd[0]->ppd);
  2725. if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
  2726. handle_7322_p_errors(dd->rcd[1]->ppd);
  2727. }
  2728. /*
  2729. * Dynamically adjust the rcv int timeout for a context based on incoming
  2730. * packet rate.
  2731. */
  2732. static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
  2733. {
  2734. struct qib_devdata *dd = rcd->dd;
  2735. u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
  2736. /*
  2737. * Dynamically adjust idle timeout on chip
  2738. * based on number of packets processed.
  2739. */
  2740. if (npkts < rcv_int_count && timeout > 2)
  2741. timeout >>= 1;
  2742. else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
  2743. timeout = min(timeout << 1, rcv_int_timeout);
  2744. else
  2745. return;
  2746. dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
  2747. qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
  2748. }
  2749. /*
  2750. * This is the main interrupt handler.
  2751. * It will normally only be used for low frequency interrupts but may
  2752. * have to handle all interrupts if INTx is enabled or fewer than normal
  2753. * MSIx interrupts were allocated.
  2754. * This routine should ignore the interrupt bits for any of the
  2755. * dedicated MSIx handlers.
  2756. */
  2757. static irqreturn_t qib_7322intr(int irq, void *data)
  2758. {
  2759. struct qib_devdata *dd = data;
  2760. irqreturn_t ret;
  2761. u64 istat;
  2762. u64 ctxtrbits;
  2763. u64 rmask;
  2764. unsigned i;
  2765. u32 npkts;
  2766. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
  2767. /*
  2768. * This return value is not great, but we do not want the
  2769. * interrupt core code to remove our interrupt handler
  2770. * because we don't appear to be handling an interrupt
  2771. * during a chip reset.
  2772. */
  2773. ret = IRQ_HANDLED;
  2774. goto bail;
  2775. }
  2776. istat = qib_read_kreg64(dd, kr_intstatus);
  2777. if (unlikely(istat == ~0ULL)) {
  2778. qib_bad_intrstatus(dd);
  2779. qib_dev_err(dd, "Interrupt status all f's, skipping\n");
  2780. /* don't know if it was our interrupt or not */
  2781. ret = IRQ_NONE;
  2782. goto bail;
  2783. }
  2784. istat &= dd->cspec->main_int_mask;
  2785. if (unlikely(!istat)) {
  2786. /* already handled, or shared and not us */
  2787. ret = IRQ_NONE;
  2788. goto bail;
  2789. }
  2790. this_cpu_inc(*dd->int_counter);
  2791. /* handle "errors" of various kinds first, device ahead of port */
  2792. if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
  2793. QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
  2794. INT_MASK_P(Err, 1))))
  2795. unlikely_7322_intr(dd, istat);
  2796. /*
  2797. * Clear the interrupt bits we found set, relatively early, so we
  2798. * "know" know the chip will have seen this by the time we process
  2799. * the queue, and will re-interrupt if necessary. The processor
  2800. * itself won't take the interrupt again until we return.
  2801. */
  2802. qib_write_kreg(dd, kr_intclear, istat);
  2803. /*
  2804. * Handle kernel receive queues before checking for pio buffers
  2805. * available since receives can overflow; piobuf waiters can afford
  2806. * a few extra cycles, since they were waiting anyway.
  2807. */
  2808. ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
  2809. if (ctxtrbits) {
  2810. rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
  2811. (1ULL << QIB_I_RCVURG_LSB);
  2812. for (i = 0; i < dd->first_user_ctxt; i++) {
  2813. if (ctxtrbits & rmask) {
  2814. ctxtrbits &= ~rmask;
  2815. if (dd->rcd[i])
  2816. qib_kreceive(dd->rcd[i], NULL, &npkts);
  2817. }
  2818. rmask <<= 1;
  2819. }
  2820. if (ctxtrbits) {
  2821. ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
  2822. (ctxtrbits >> QIB_I_RCVURG_LSB);
  2823. qib_handle_urcv(dd, ctxtrbits);
  2824. }
  2825. }
  2826. if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
  2827. sdma_7322_intr(dd, istat);
  2828. if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
  2829. qib_ib_piobufavail(dd);
  2830. ret = IRQ_HANDLED;
  2831. bail:
  2832. return ret;
  2833. }
  2834. /*
  2835. * Dedicated receive packet available interrupt handler.
  2836. */
  2837. static irqreturn_t qib_7322pintr(int irq, void *data)
  2838. {
  2839. struct qib_ctxtdata *rcd = data;
  2840. struct qib_devdata *dd = rcd->dd;
  2841. u32 npkts;
  2842. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2843. /*
  2844. * This return value is not great, but we do not want the
  2845. * interrupt core code to remove our interrupt handler
  2846. * because we don't appear to be handling an interrupt
  2847. * during a chip reset.
  2848. */
  2849. return IRQ_HANDLED;
  2850. this_cpu_inc(*dd->int_counter);
  2851. /* Clear the interrupt bit we expect to be set. */
  2852. qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
  2853. (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
  2854. qib_kreceive(rcd, NULL, &npkts);
  2855. return IRQ_HANDLED;
  2856. }
  2857. /*
  2858. * Dedicated Send buffer available interrupt handler.
  2859. */
  2860. static irqreturn_t qib_7322bufavail(int irq, void *data)
  2861. {
  2862. struct qib_devdata *dd = data;
  2863. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2864. /*
  2865. * This return value is not great, but we do not want the
  2866. * interrupt core code to remove our interrupt handler
  2867. * because we don't appear to be handling an interrupt
  2868. * during a chip reset.
  2869. */
  2870. return IRQ_HANDLED;
  2871. this_cpu_inc(*dd->int_counter);
  2872. /* Clear the interrupt bit we expect to be set. */
  2873. qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
  2874. /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
  2875. if (dd->flags & QIB_INITTED)
  2876. qib_ib_piobufavail(dd);
  2877. else
  2878. qib_wantpiobuf_7322_intr(dd, 0);
  2879. return IRQ_HANDLED;
  2880. }
  2881. /*
  2882. * Dedicated Send DMA interrupt handler.
  2883. */
  2884. static irqreturn_t sdma_intr(int irq, void *data)
  2885. {
  2886. struct qib_pportdata *ppd = data;
  2887. struct qib_devdata *dd = ppd->dd;
  2888. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2889. /*
  2890. * This return value is not great, but we do not want the
  2891. * interrupt core code to remove our interrupt handler
  2892. * because we don't appear to be handling an interrupt
  2893. * during a chip reset.
  2894. */
  2895. return IRQ_HANDLED;
  2896. this_cpu_inc(*dd->int_counter);
  2897. /* Clear the interrupt bit we expect to be set. */
  2898. qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
  2899. INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
  2900. qib_sdma_intr(ppd);
  2901. return IRQ_HANDLED;
  2902. }
  2903. /*
  2904. * Dedicated Send DMA idle interrupt handler.
  2905. */
  2906. static irqreturn_t sdma_idle_intr(int irq, void *data)
  2907. {
  2908. struct qib_pportdata *ppd = data;
  2909. struct qib_devdata *dd = ppd->dd;
  2910. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2911. /*
  2912. * This return value is not great, but we do not want the
  2913. * interrupt core code to remove our interrupt handler
  2914. * because we don't appear to be handling an interrupt
  2915. * during a chip reset.
  2916. */
  2917. return IRQ_HANDLED;
  2918. this_cpu_inc(*dd->int_counter);
  2919. /* Clear the interrupt bit we expect to be set. */
  2920. qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
  2921. INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
  2922. qib_sdma_intr(ppd);
  2923. return IRQ_HANDLED;
  2924. }
  2925. /*
  2926. * Dedicated Send DMA progress interrupt handler.
  2927. */
  2928. static irqreturn_t sdma_progress_intr(int irq, void *data)
  2929. {
  2930. struct qib_pportdata *ppd = data;
  2931. struct qib_devdata *dd = ppd->dd;
  2932. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2933. /*
  2934. * This return value is not great, but we do not want the
  2935. * interrupt core code to remove our interrupt handler
  2936. * because we don't appear to be handling an interrupt
  2937. * during a chip reset.
  2938. */
  2939. return IRQ_HANDLED;
  2940. this_cpu_inc(*dd->int_counter);
  2941. /* Clear the interrupt bit we expect to be set. */
  2942. qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
  2943. INT_MASK_P(SDmaProgress, 1) :
  2944. INT_MASK_P(SDmaProgress, 0));
  2945. qib_sdma_intr(ppd);
  2946. return IRQ_HANDLED;
  2947. }
  2948. /*
  2949. * Dedicated Send DMA cleanup interrupt handler.
  2950. */
  2951. static irqreturn_t sdma_cleanup_intr(int irq, void *data)
  2952. {
  2953. struct qib_pportdata *ppd = data;
  2954. struct qib_devdata *dd = ppd->dd;
  2955. if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
  2956. /*
  2957. * This return value is not great, but we do not want the
  2958. * interrupt core code to remove our interrupt handler
  2959. * because we don't appear to be handling an interrupt
  2960. * during a chip reset.
  2961. */
  2962. return IRQ_HANDLED;
  2963. this_cpu_inc(*dd->int_counter);
  2964. /* Clear the interrupt bit we expect to be set. */
  2965. qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
  2966. INT_MASK_PM(SDmaCleanupDone, 1) :
  2967. INT_MASK_PM(SDmaCleanupDone, 0));
  2968. qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
  2969. return IRQ_HANDLED;
  2970. }
  2971. #ifdef CONFIG_INFINIBAND_QIB_DCA
  2972. static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
  2973. {
  2974. if (!m->dca)
  2975. return;
  2976. qib_devinfo(dd->pcidev,
  2977. "Disabling notifier on HCA %d irq %d\n",
  2978. dd->unit,
  2979. m->msix.vector);
  2980. irq_set_affinity_notifier(
  2981. m->msix.vector,
  2982. NULL);
  2983. m->notifier = NULL;
  2984. }
  2985. static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
  2986. {
  2987. struct qib_irq_notify *n;
  2988. if (!m->dca)
  2989. return;
  2990. n = kzalloc(sizeof(*n), GFP_KERNEL);
  2991. if (n) {
  2992. int ret;
  2993. m->notifier = n;
  2994. n->notify.irq = m->msix.vector;
  2995. n->notify.notify = qib_irq_notifier_notify;
  2996. n->notify.release = qib_irq_notifier_release;
  2997. n->arg = m->arg;
  2998. n->rcv = m->rcv;
  2999. qib_devinfo(dd->pcidev,
  3000. "set notifier irq %d rcv %d notify %p\n",
  3001. n->notify.irq, n->rcv, &n->notify);
  3002. ret = irq_set_affinity_notifier(
  3003. n->notify.irq,
  3004. &n->notify);
  3005. if (ret) {
  3006. m->notifier = NULL;
  3007. kfree(n);
  3008. }
  3009. }
  3010. }
  3011. #endif
  3012. /*
  3013. * Set up our chip-specific interrupt handler.
  3014. * The interrupt type has already been setup, so
  3015. * we just need to do the registration and error checking.
  3016. * If we are using MSIx interrupts, we may fall back to
  3017. * INTx later, if the interrupt handler doesn't get called
  3018. * within 1/2 second (see verify_interrupt()).
  3019. */
  3020. static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
  3021. {
  3022. int ret, i, msixnum;
  3023. u64 redirect[6];
  3024. u64 mask;
  3025. const struct cpumask *local_mask;
  3026. int firstcpu, secondcpu = 0, currrcvcpu = 0;
  3027. if (!dd->num_pports)
  3028. return;
  3029. if (clearpend) {
  3030. /*
  3031. * if not switching interrupt types, be sure interrupts are
  3032. * disabled, and then clear anything pending at this point,
  3033. * because we are starting clean.
  3034. */
  3035. qib_7322_set_intr_state(dd, 0);
  3036. /* clear the reset error, init error/hwerror mask */
  3037. qib_7322_init_hwerrors(dd);
  3038. /* clear any interrupt bits that might be set */
  3039. qib_write_kreg(dd, kr_intclear, ~0ULL);
  3040. /* make sure no pending MSIx intr, and clear diag reg */
  3041. qib_write_kreg(dd, kr_intgranted, ~0ULL);
  3042. qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
  3043. }
  3044. if (!dd->cspec->num_msix_entries) {
  3045. /* Try to get INTx interrupt */
  3046. try_intx:
  3047. if (!dd->pcidev->irq) {
  3048. qib_dev_err(dd,
  3049. "irq is 0, BIOS error? Interrupts won't work\n");
  3050. goto bail;
  3051. }
  3052. ret = request_irq(dd->pcidev->irq, qib_7322intr,
  3053. IRQF_SHARED, QIB_DRV_NAME, dd);
  3054. if (ret) {
  3055. qib_dev_err(dd,
  3056. "Couldn't setup INTx interrupt (irq=%d): %d\n",
  3057. dd->pcidev->irq, ret);
  3058. goto bail;
  3059. }
  3060. dd->cspec->irq = dd->pcidev->irq;
  3061. dd->cspec->main_int_mask = ~0ULL;
  3062. goto bail;
  3063. }
  3064. /* Try to get MSIx interrupts */
  3065. memset(redirect, 0, sizeof(redirect));
  3066. mask = ~0ULL;
  3067. msixnum = 0;
  3068. local_mask = cpumask_of_pcibus(dd->pcidev->bus);
  3069. firstcpu = cpumask_first(local_mask);
  3070. if (firstcpu >= nr_cpu_ids ||
  3071. cpumask_weight(local_mask) == num_online_cpus()) {
  3072. local_mask = topology_core_cpumask(0);
  3073. firstcpu = cpumask_first(local_mask);
  3074. }
  3075. if (firstcpu < nr_cpu_ids) {
  3076. secondcpu = cpumask_next(firstcpu, local_mask);
  3077. if (secondcpu >= nr_cpu_ids)
  3078. secondcpu = firstcpu;
  3079. currrcvcpu = secondcpu;
  3080. }
  3081. for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
  3082. irq_handler_t handler;
  3083. void *arg;
  3084. u64 val;
  3085. int lsb, reg, sh;
  3086. #ifdef CONFIG_INFINIBAND_QIB_DCA
  3087. int dca = 0;
  3088. #endif
  3089. dd->cspec->msix_entries[msixnum].
  3090. name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1]
  3091. = '\0';
  3092. if (i < ARRAY_SIZE(irq_table)) {
  3093. if (irq_table[i].port) {
  3094. /* skip if for a non-configured port */
  3095. if (irq_table[i].port > dd->num_pports)
  3096. continue;
  3097. arg = dd->pport + irq_table[i].port - 1;
  3098. } else
  3099. arg = dd;
  3100. #ifdef CONFIG_INFINIBAND_QIB_DCA
  3101. dca = irq_table[i].dca;
  3102. #endif
  3103. lsb = irq_table[i].lsb;
  3104. handler = irq_table[i].handler;
  3105. snprintf(dd->cspec->msix_entries[msixnum].name,
  3106. sizeof(dd->cspec->msix_entries[msixnum].name)
  3107. - 1,
  3108. QIB_DRV_NAME "%d%s", dd->unit,
  3109. irq_table[i].name);
  3110. } else {
  3111. unsigned ctxt;
  3112. ctxt = i - ARRAY_SIZE(irq_table);
  3113. /* per krcvq context receive interrupt */
  3114. arg = dd->rcd[ctxt];
  3115. if (!arg)
  3116. continue;
  3117. if (qib_krcvq01_no_msi && ctxt < 2)
  3118. continue;
  3119. #ifdef CONFIG_INFINIBAND_QIB_DCA
  3120. dca = 1;
  3121. #endif
  3122. lsb = QIB_I_RCVAVAIL_LSB + ctxt;
  3123. handler = qib_7322pintr;
  3124. snprintf(dd->cspec->msix_entries[msixnum].name,
  3125. sizeof(dd->cspec->msix_entries[msixnum].name)
  3126. - 1,
  3127. QIB_DRV_NAME "%d (kctx)", dd->unit);
  3128. }
  3129. ret = request_irq(
  3130. dd->cspec->msix_entries[msixnum].msix.vector,
  3131. handler, 0, dd->cspec->msix_entries[msixnum].name,
  3132. arg);
  3133. if (ret) {
  3134. /*
  3135. * Shouldn't happen since the enable said we could
  3136. * have as many as we are trying to setup here.
  3137. */
  3138. qib_dev_err(dd,
  3139. "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
  3140. msixnum,
  3141. dd->cspec->msix_entries[msixnum].msix.vector,
  3142. ret);
  3143. qib_7322_nomsix(dd);
  3144. goto try_intx;
  3145. }
  3146. dd->cspec->msix_entries[msixnum].arg = arg;
  3147. #ifdef CONFIG_INFINIBAND_QIB_DCA
  3148. dd->cspec->msix_entries[msixnum].dca = dca;
  3149. dd->cspec->msix_entries[msixnum].rcv =
  3150. handler == qib_7322pintr;
  3151. #endif
  3152. if (lsb >= 0) {
  3153. reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
  3154. sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
  3155. SYM_LSB(IntRedirect0, vec1);
  3156. mask &= ~(1ULL << lsb);
  3157. redirect[reg] |= ((u64) msixnum) << sh;
  3158. }
  3159. val = qib_read_kreg64(dd, 2 * msixnum + 1 +
  3160. (QIB_7322_MsixTable_OFFS / sizeof(u64)));
  3161. if (firstcpu < nr_cpu_ids &&
  3162. zalloc_cpumask_var(
  3163. &dd->cspec->msix_entries[msixnum].mask,
  3164. GFP_KERNEL)) {
  3165. if (handler == qib_7322pintr) {
  3166. cpumask_set_cpu(currrcvcpu,
  3167. dd->cspec->msix_entries[msixnum].mask);
  3168. currrcvcpu = cpumask_next(currrcvcpu,
  3169. local_mask);
  3170. if (currrcvcpu >= nr_cpu_ids)
  3171. currrcvcpu = secondcpu;
  3172. } else {
  3173. cpumask_set_cpu(firstcpu,
  3174. dd->cspec->msix_entries[msixnum].mask);
  3175. }
  3176. irq_set_affinity_hint(
  3177. dd->cspec->msix_entries[msixnum].msix.vector,
  3178. dd->cspec->msix_entries[msixnum].mask);
  3179. }
  3180. msixnum++;
  3181. }
  3182. /* Initialize the vector mapping */
  3183. for (i = 0; i < ARRAY_SIZE(redirect); i++)
  3184. qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
  3185. dd->cspec->main_int_mask = mask;
  3186. tasklet_init(&dd->error_tasklet, qib_error_tasklet,
  3187. (unsigned long)dd);
  3188. bail:;
  3189. }
  3190. /**
  3191. * qib_7322_boardname - fill in the board name and note features
  3192. * @dd: the qlogic_ib device
  3193. *
  3194. * info will be based on the board revision register
  3195. */
  3196. static unsigned qib_7322_boardname(struct qib_devdata *dd)
  3197. {
  3198. /* Will need enumeration of board-types here */
  3199. char *n;
  3200. u32 boardid, namelen;
  3201. unsigned features = DUAL_PORT_CAP;
  3202. boardid = SYM_FIELD(dd->revision, Revision, BoardID);
  3203. switch (boardid) {
  3204. case 0:
  3205. n = "InfiniPath_QLE7342_Emulation";
  3206. break;
  3207. case 1:
  3208. n = "InfiniPath_QLE7340";
  3209. dd->flags |= QIB_HAS_QSFP;
  3210. features = PORT_SPD_CAP;
  3211. break;
  3212. case 2:
  3213. n = "InfiniPath_QLE7342";
  3214. dd->flags |= QIB_HAS_QSFP;
  3215. break;
  3216. case 3:
  3217. n = "InfiniPath_QMI7342";
  3218. break;
  3219. case 4:
  3220. n = "InfiniPath_Unsupported7342";
  3221. qib_dev_err(dd, "Unsupported version of QMH7342\n");
  3222. features = 0;
  3223. break;
  3224. case BOARD_QMH7342:
  3225. n = "InfiniPath_QMH7342";
  3226. features = 0x24;
  3227. break;
  3228. case BOARD_QME7342:
  3229. n = "InfiniPath_QME7342";
  3230. break;
  3231. case 8:
  3232. n = "InfiniPath_QME7362";
  3233. dd->flags |= QIB_HAS_QSFP;
  3234. break;
  3235. case BOARD_QMH7360:
  3236. n = "Intel IB QDR 1P FLR-QSFP Adptr";
  3237. dd->flags |= QIB_HAS_QSFP;
  3238. break;
  3239. case 15:
  3240. n = "InfiniPath_QLE7342_TEST";
  3241. dd->flags |= QIB_HAS_QSFP;
  3242. break;
  3243. default:
  3244. n = "InfiniPath_QLE73xy_UNKNOWN";
  3245. qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
  3246. break;
  3247. }
  3248. dd->board_atten = 1; /* index into txdds_Xdr */
  3249. namelen = strlen(n) + 1;
  3250. dd->boardname = kmalloc(namelen, GFP_KERNEL);
  3251. if (!dd->boardname)
  3252. qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
  3253. else
  3254. snprintf(dd->boardname, namelen, "%s", n);
  3255. snprintf(dd->boardversion, sizeof(dd->boardversion),
  3256. "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
  3257. QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
  3258. (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
  3259. dd->majrev, dd->minrev,
  3260. (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
  3261. if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
  3262. qib_devinfo(dd->pcidev,
  3263. "IB%u: Forced to single port mode by module parameter\n",
  3264. dd->unit);
  3265. features &= PORT_SPD_CAP;
  3266. }
  3267. return features;
  3268. }
  3269. /*
  3270. * This routine sleeps, so it can only be called from user context, not
  3271. * from interrupt context.
  3272. */
  3273. static int qib_do_7322_reset(struct qib_devdata *dd)
  3274. {
  3275. u64 val;
  3276. u64 *msix_vecsave;
  3277. int i, msix_entries, ret = 1;
  3278. u16 cmdval;
  3279. u8 int_line, clinesz;
  3280. unsigned long flags;
  3281. /* Use dev_err so it shows up in logs, etc. */
  3282. qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
  3283. qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
  3284. msix_entries = dd->cspec->num_msix_entries;
  3285. /* no interrupts till re-initted */
  3286. qib_7322_set_intr_state(dd, 0);
  3287. if (msix_entries) {
  3288. qib_7322_nomsix(dd);
  3289. /* can be up to 512 bytes, too big for stack */
  3290. msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
  3291. sizeof(u64), GFP_KERNEL);
  3292. if (!msix_vecsave)
  3293. qib_dev_err(dd, "No mem to save MSIx data\n");
  3294. } else
  3295. msix_vecsave = NULL;
  3296. /*
  3297. * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
  3298. * info that is set up by the BIOS, so we have to save and restore
  3299. * it ourselves. There is some risk something could change it,
  3300. * after we save it, but since we have disabled the MSIx, it
  3301. * shouldn't be touched...
  3302. */
  3303. for (i = 0; i < msix_entries; i++) {
  3304. u64 vecaddr, vecdata;
  3305. vecaddr = qib_read_kreg64(dd, 2 * i +
  3306. (QIB_7322_MsixTable_OFFS / sizeof(u64)));
  3307. vecdata = qib_read_kreg64(dd, 1 + 2 * i +
  3308. (QIB_7322_MsixTable_OFFS / sizeof(u64)));
  3309. if (msix_vecsave) {
  3310. msix_vecsave[2 * i] = vecaddr;
  3311. /* save it without the masked bit set */
  3312. msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
  3313. }
  3314. }
  3315. dd->pport->cpspec->ibdeltainprog = 0;
  3316. dd->pport->cpspec->ibsymdelta = 0;
  3317. dd->pport->cpspec->iblnkerrdelta = 0;
  3318. dd->pport->cpspec->ibmalfdelta = 0;
  3319. /* so we check interrupts work again */
  3320. dd->z_int_counter = qib_int_counter(dd);
  3321. /*
  3322. * Keep chip from being accessed until we are ready. Use
  3323. * writeq() directly, to allow the write even though QIB_PRESENT
  3324. * isn't set.
  3325. */
  3326. dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
  3327. dd->flags |= QIB_DOING_RESET;
  3328. val = dd->control | QLOGIC_IB_C_RESET;
  3329. writeq(val, &dd->kregbase[kr_control]);
  3330. for (i = 1; i <= 5; i++) {
  3331. /*
  3332. * Allow MBIST, etc. to complete; longer on each retry.
  3333. * We sometimes get machine checks from bus timeout if no
  3334. * response, so for now, make it *really* long.
  3335. */
  3336. msleep(1000 + (1 + i) * 3000);
  3337. qib_pcie_reenable(dd, cmdval, int_line, clinesz);
  3338. /*
  3339. * Use readq directly, so we don't need to mark it as PRESENT
  3340. * until we get a successful indication that all is well.
  3341. */
  3342. val = readq(&dd->kregbase[kr_revision]);
  3343. if (val == dd->revision)
  3344. break;
  3345. if (i == 5) {
  3346. qib_dev_err(dd,
  3347. "Failed to initialize after reset, unusable\n");
  3348. ret = 0;
  3349. goto bail;
  3350. }
  3351. }
  3352. dd->flags |= QIB_PRESENT; /* it's back */
  3353. if (msix_entries) {
  3354. /* restore the MSIx vector address and data if saved above */
  3355. for (i = 0; i < msix_entries; i++) {
  3356. dd->cspec->msix_entries[i].msix.entry = i;
  3357. if (!msix_vecsave || !msix_vecsave[2 * i])
  3358. continue;
  3359. qib_write_kreg(dd, 2 * i +
  3360. (QIB_7322_MsixTable_OFFS / sizeof(u64)),
  3361. msix_vecsave[2 * i]);
  3362. qib_write_kreg(dd, 1 + 2 * i +
  3363. (QIB_7322_MsixTable_OFFS / sizeof(u64)),
  3364. msix_vecsave[1 + 2 * i]);
  3365. }
  3366. }
  3367. /* initialize the remaining registers. */
  3368. for (i = 0; i < dd->num_pports; ++i)
  3369. write_7322_init_portregs(&dd->pport[i]);
  3370. write_7322_initregs(dd);
  3371. if (qib_pcie_params(dd, dd->lbus_width,
  3372. &dd->cspec->num_msix_entries,
  3373. dd->cspec->msix_entries))
  3374. qib_dev_err(dd,
  3375. "Reset failed to setup PCIe or interrupts; continuing anyway\n");
  3376. qib_setup_7322_interrupt(dd, 1);
  3377. for (i = 0; i < dd->num_pports; ++i) {
  3378. struct qib_pportdata *ppd = &dd->pport[i];
  3379. spin_lock_irqsave(&ppd->lflags_lock, flags);
  3380. ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
  3381. ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
  3382. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  3383. }
  3384. bail:
  3385. dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
  3386. kfree(msix_vecsave);
  3387. return ret;
  3388. }
  3389. /**
  3390. * qib_7322_put_tid - write a TID to the chip
  3391. * @dd: the qlogic_ib device
  3392. * @tidptr: pointer to the expected TID (in chip) to update
  3393. * @tidtype: 0 for eager, 1 for expected
  3394. * @pa: physical address of in memory buffer; tidinvalid if freeing
  3395. */
  3396. static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
  3397. u32 type, unsigned long pa)
  3398. {
  3399. if (!(dd->flags & QIB_PRESENT))
  3400. return;
  3401. if (pa != dd->tidinvalid) {
  3402. u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
  3403. /* paranoia checks */
  3404. if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
  3405. qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
  3406. pa);
  3407. return;
  3408. }
  3409. if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
  3410. qib_dev_err(dd,
  3411. "Physical page address 0x%lx larger than supported\n",
  3412. pa);
  3413. return;
  3414. }
  3415. if (type == RCVHQ_RCV_TYPE_EAGER)
  3416. chippa |= dd->tidtemplate;
  3417. else /* for now, always full 4KB page */
  3418. chippa |= IBA7322_TID_SZ_4K;
  3419. pa = chippa;
  3420. }
  3421. writeq(pa, tidptr);
  3422. mmiowb();
  3423. }
  3424. /**
  3425. * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
  3426. * @dd: the qlogic_ib device
  3427. * @ctxt: the ctxt
  3428. *
  3429. * clear all TID entries for a ctxt, expected and eager.
  3430. * Used from qib_close().
  3431. */
  3432. static void qib_7322_clear_tids(struct qib_devdata *dd,
  3433. struct qib_ctxtdata *rcd)
  3434. {
  3435. u64 __iomem *tidbase;
  3436. unsigned long tidinv;
  3437. u32 ctxt;
  3438. int i;
  3439. if (!dd->kregbase || !rcd)
  3440. return;
  3441. ctxt = rcd->ctxt;
  3442. tidinv = dd->tidinvalid;
  3443. tidbase = (u64 __iomem *)
  3444. ((char __iomem *) dd->kregbase +
  3445. dd->rcvtidbase +
  3446. ctxt * dd->rcvtidcnt * sizeof(*tidbase));
  3447. for (i = 0; i < dd->rcvtidcnt; i++)
  3448. qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
  3449. tidinv);
  3450. tidbase = (u64 __iomem *)
  3451. ((char __iomem *) dd->kregbase +
  3452. dd->rcvegrbase +
  3453. rcd->rcvegr_tid_base * sizeof(*tidbase));
  3454. for (i = 0; i < rcd->rcvegrcnt; i++)
  3455. qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
  3456. tidinv);
  3457. }
  3458. /**
  3459. * qib_7322_tidtemplate - setup constants for TID updates
  3460. * @dd: the qlogic_ib device
  3461. *
  3462. * We setup stuff that we use a lot, to avoid calculating each time
  3463. */
  3464. static void qib_7322_tidtemplate(struct qib_devdata *dd)
  3465. {
  3466. /*
  3467. * For now, we always allocate 4KB buffers (at init) so we can
  3468. * receive max size packets. We may want a module parameter to
  3469. * specify 2KB or 4KB and/or make it per port instead of per device
  3470. * for those who want to reduce memory footprint. Note that the
  3471. * rcvhdrentsize size must be large enough to hold the largest
  3472. * IB header (currently 96 bytes) that we expect to handle (plus of
  3473. * course the 2 dwords of RHF).
  3474. */
  3475. if (dd->rcvegrbufsize == 2048)
  3476. dd->tidtemplate = IBA7322_TID_SZ_2K;
  3477. else if (dd->rcvegrbufsize == 4096)
  3478. dd->tidtemplate = IBA7322_TID_SZ_4K;
  3479. dd->tidinvalid = 0;
  3480. }
  3481. /**
  3482. * qib_init_7322_get_base_info - set chip-specific flags for user code
  3483. * @rcd: the qlogic_ib ctxt
  3484. * @kbase: qib_base_info pointer
  3485. *
  3486. * We set the PCIE flag because the lower bandwidth on PCIe vs
  3487. * HyperTransport can affect some user packet algorithims.
  3488. */
  3489. static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
  3490. struct qib_base_info *kinfo)
  3491. {
  3492. kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
  3493. QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
  3494. QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
  3495. if (rcd->dd->cspec->r1)
  3496. kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
  3497. if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
  3498. kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
  3499. return 0;
  3500. }
  3501. static struct qib_message_header *
  3502. qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
  3503. {
  3504. u32 offset = qib_hdrget_offset(rhf_addr);
  3505. return (struct qib_message_header *)
  3506. (rhf_addr - dd->rhf_offset + offset);
  3507. }
  3508. /*
  3509. * Configure number of contexts.
  3510. */
  3511. static void qib_7322_config_ctxts(struct qib_devdata *dd)
  3512. {
  3513. unsigned long flags;
  3514. u32 nchipctxts;
  3515. nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
  3516. dd->cspec->numctxts = nchipctxts;
  3517. if (qib_n_krcv_queues > 1 && dd->num_pports) {
  3518. dd->first_user_ctxt = NUM_IB_PORTS +
  3519. (qib_n_krcv_queues - 1) * dd->num_pports;
  3520. if (dd->first_user_ctxt > nchipctxts)
  3521. dd->first_user_ctxt = nchipctxts;
  3522. dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
  3523. } else {
  3524. dd->first_user_ctxt = NUM_IB_PORTS;
  3525. dd->n_krcv_queues = 1;
  3526. }
  3527. if (!qib_cfgctxts) {
  3528. int nctxts = dd->first_user_ctxt + num_online_cpus();
  3529. if (nctxts <= 6)
  3530. dd->ctxtcnt = 6;
  3531. else if (nctxts <= 10)
  3532. dd->ctxtcnt = 10;
  3533. else if (nctxts <= nchipctxts)
  3534. dd->ctxtcnt = nchipctxts;
  3535. } else if (qib_cfgctxts < dd->num_pports)
  3536. dd->ctxtcnt = dd->num_pports;
  3537. else if (qib_cfgctxts <= nchipctxts)
  3538. dd->ctxtcnt = qib_cfgctxts;
  3539. if (!dd->ctxtcnt) /* none of the above, set to max */
  3540. dd->ctxtcnt = nchipctxts;
  3541. /*
  3542. * Chip can be configured for 6, 10, or 18 ctxts, and choice
  3543. * affects number of eager TIDs per ctxt (1K, 2K, 4K).
  3544. * Lock to be paranoid about later motion, etc.
  3545. */
  3546. spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
  3547. if (dd->ctxtcnt > 10)
  3548. dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
  3549. else if (dd->ctxtcnt > 6)
  3550. dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
  3551. /* else configure for default 6 receive ctxts */
  3552. /* The XRC opcode is 5. */
  3553. dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
  3554. /*
  3555. * RcvCtrl *must* be written here so that the
  3556. * chip understands how to change rcvegrcnt below.
  3557. */
  3558. qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
  3559. spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
  3560. /* kr_rcvegrcnt changes based on the number of contexts enabled */
  3561. dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
  3562. if (qib_rcvhdrcnt)
  3563. dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
  3564. else
  3565. dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
  3566. dd->num_pports > 1 ? 1024U : 2048U);
  3567. }
  3568. static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
  3569. {
  3570. int lsb, ret = 0;
  3571. u64 maskr; /* right-justified mask */
  3572. switch (which) {
  3573. case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
  3574. ret = ppd->link_width_enabled;
  3575. goto done;
  3576. case QIB_IB_CFG_LWID: /* Get currently active Link-width */
  3577. ret = ppd->link_width_active;
  3578. goto done;
  3579. case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
  3580. ret = ppd->link_speed_enabled;
  3581. goto done;
  3582. case QIB_IB_CFG_SPD: /* Get current Link spd */
  3583. ret = ppd->link_speed_active;
  3584. goto done;
  3585. case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
  3586. lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
  3587. maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
  3588. break;
  3589. case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
  3590. lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
  3591. maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
  3592. break;
  3593. case QIB_IB_CFG_LINKLATENCY:
  3594. ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
  3595. SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
  3596. goto done;
  3597. case QIB_IB_CFG_OP_VLS:
  3598. ret = ppd->vls_operational;
  3599. goto done;
  3600. case QIB_IB_CFG_VL_HIGH_CAP:
  3601. ret = 16;
  3602. goto done;
  3603. case QIB_IB_CFG_VL_LOW_CAP:
  3604. ret = 16;
  3605. goto done;
  3606. case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
  3607. ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
  3608. OverrunThreshold);
  3609. goto done;
  3610. case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
  3611. ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
  3612. PhyerrThreshold);
  3613. goto done;
  3614. case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
  3615. /* will only take effect when the link state changes */
  3616. ret = (ppd->cpspec->ibcctrl_a &
  3617. SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
  3618. IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
  3619. goto done;
  3620. case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
  3621. lsb = IBA7322_IBC_HRTBT_LSB;
  3622. maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
  3623. break;
  3624. case QIB_IB_CFG_PMA_TICKS:
  3625. /*
  3626. * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
  3627. * Since the clock is always 250MHz, the value is 3, 1 or 0.
  3628. */
  3629. if (ppd->link_speed_active == QIB_IB_QDR)
  3630. ret = 3;
  3631. else if (ppd->link_speed_active == QIB_IB_DDR)
  3632. ret = 1;
  3633. else
  3634. ret = 0;
  3635. goto done;
  3636. default:
  3637. ret = -EINVAL;
  3638. goto done;
  3639. }
  3640. ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
  3641. done:
  3642. return ret;
  3643. }
  3644. /*
  3645. * Below again cribbed liberally from older version. Do not lean
  3646. * heavily on it.
  3647. */
  3648. #define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
  3649. #define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
  3650. | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
  3651. static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
  3652. {
  3653. struct qib_devdata *dd = ppd->dd;
  3654. u64 maskr; /* right-justified mask */
  3655. int lsb, ret = 0;
  3656. u16 lcmd, licmd;
  3657. unsigned long flags;
  3658. switch (which) {
  3659. case QIB_IB_CFG_LIDLMC:
  3660. /*
  3661. * Set LID and LMC. Combined to avoid possible hazard
  3662. * caller puts LMC in 16MSbits, DLID in 16LSbits of val
  3663. */
  3664. lsb = IBA7322_IBC_DLIDLMC_SHIFT;
  3665. maskr = IBA7322_IBC_DLIDLMC_MASK;
  3666. /*
  3667. * For header-checking, the SLID in the packet will
  3668. * be masked with SendIBSLMCMask, and compared
  3669. * with SendIBSLIDAssignMask. Make sure we do not
  3670. * set any bits not covered by the mask, or we get
  3671. * false-positives.
  3672. */
  3673. qib_write_kreg_port(ppd, krp_sendslid,
  3674. val & (val >> 16) & SendIBSLIDAssignMask);
  3675. qib_write_kreg_port(ppd, krp_sendslidmask,
  3676. (val >> 16) & SendIBSLMCMask);
  3677. break;
  3678. case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
  3679. ppd->link_width_enabled = val;
  3680. /* convert IB value to chip register value */
  3681. if (val == IB_WIDTH_1X)
  3682. val = 0;
  3683. else if (val == IB_WIDTH_4X)
  3684. val = 1;
  3685. else
  3686. val = 3;
  3687. maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
  3688. lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
  3689. break;
  3690. case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
  3691. /*
  3692. * As with width, only write the actual register if the
  3693. * link is currently down, otherwise takes effect on next
  3694. * link change. Since setting is being explicitly requested
  3695. * (via MAD or sysfs), clear autoneg failure status if speed
  3696. * autoneg is enabled.
  3697. */
  3698. ppd->link_speed_enabled = val;
  3699. val <<= IBA7322_IBC_SPEED_LSB;
  3700. maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
  3701. IBA7322_IBC_MAX_SPEED_MASK;
  3702. if (val & (val - 1)) {
  3703. /* Muliple speeds enabled */
  3704. val |= IBA7322_IBC_IBTA_1_2_MASK |
  3705. IBA7322_IBC_MAX_SPEED_MASK;
  3706. spin_lock_irqsave(&ppd->lflags_lock, flags);
  3707. ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
  3708. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  3709. } else if (val & IBA7322_IBC_SPEED_QDR)
  3710. val |= IBA7322_IBC_IBTA_1_2_MASK;
  3711. /* IBTA 1.2 mode + min/max + speed bits are contiguous */
  3712. lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
  3713. break;
  3714. case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
  3715. lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
  3716. maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
  3717. break;
  3718. case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
  3719. lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
  3720. maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
  3721. break;
  3722. case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
  3723. maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
  3724. OverrunThreshold);
  3725. if (maskr != val) {
  3726. ppd->cpspec->ibcctrl_a &=
  3727. ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
  3728. ppd->cpspec->ibcctrl_a |= (u64) val <<
  3729. SYM_LSB(IBCCtrlA_0, OverrunThreshold);
  3730. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  3731. ppd->cpspec->ibcctrl_a);
  3732. qib_write_kreg(dd, kr_scratch, 0ULL);
  3733. }
  3734. goto bail;
  3735. case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
  3736. maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
  3737. PhyerrThreshold);
  3738. if (maskr != val) {
  3739. ppd->cpspec->ibcctrl_a &=
  3740. ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
  3741. ppd->cpspec->ibcctrl_a |= (u64) val <<
  3742. SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
  3743. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  3744. ppd->cpspec->ibcctrl_a);
  3745. qib_write_kreg(dd, kr_scratch, 0ULL);
  3746. }
  3747. goto bail;
  3748. case QIB_IB_CFG_PKEYS: /* update pkeys */
  3749. maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
  3750. ((u64) ppd->pkeys[2] << 32) |
  3751. ((u64) ppd->pkeys[3] << 48);
  3752. qib_write_kreg_port(ppd, krp_partitionkey, maskr);
  3753. goto bail;
  3754. case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
  3755. /* will only take effect when the link state changes */
  3756. if (val == IB_LINKINITCMD_POLL)
  3757. ppd->cpspec->ibcctrl_a &=
  3758. ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
  3759. else /* SLEEP */
  3760. ppd->cpspec->ibcctrl_a |=
  3761. SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
  3762. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
  3763. qib_write_kreg(dd, kr_scratch, 0ULL);
  3764. goto bail;
  3765. case QIB_IB_CFG_MTU: /* update the MTU in IBC */
  3766. /*
  3767. * Update our housekeeping variables, and set IBC max
  3768. * size, same as init code; max IBC is max we allow in
  3769. * buffer, less the qword pbc, plus 1 for ICRC, in dwords
  3770. * Set even if it's unchanged, print debug message only
  3771. * on changes.
  3772. */
  3773. val = (ppd->ibmaxlen >> 2) + 1;
  3774. ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
  3775. ppd->cpspec->ibcctrl_a |= (u64)val <<
  3776. SYM_LSB(IBCCtrlA_0, MaxPktLen);
  3777. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  3778. ppd->cpspec->ibcctrl_a);
  3779. qib_write_kreg(dd, kr_scratch, 0ULL);
  3780. goto bail;
  3781. case QIB_IB_CFG_LSTATE: /* set the IB link state */
  3782. switch (val & 0xffff0000) {
  3783. case IB_LINKCMD_DOWN:
  3784. lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
  3785. ppd->cpspec->ibmalfusesnap = 1;
  3786. ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
  3787. crp_errlink);
  3788. if (!ppd->cpspec->ibdeltainprog &&
  3789. qib_compat_ddr_negotiate) {
  3790. ppd->cpspec->ibdeltainprog = 1;
  3791. ppd->cpspec->ibsymsnap =
  3792. read_7322_creg32_port(ppd,
  3793. crp_ibsymbolerr);
  3794. ppd->cpspec->iblnkerrsnap =
  3795. read_7322_creg32_port(ppd,
  3796. crp_iblinkerrrecov);
  3797. }
  3798. break;
  3799. case IB_LINKCMD_ARMED:
  3800. lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
  3801. if (ppd->cpspec->ibmalfusesnap) {
  3802. ppd->cpspec->ibmalfusesnap = 0;
  3803. ppd->cpspec->ibmalfdelta +=
  3804. read_7322_creg32_port(ppd,
  3805. crp_errlink) -
  3806. ppd->cpspec->ibmalfsnap;
  3807. }
  3808. break;
  3809. case IB_LINKCMD_ACTIVE:
  3810. lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
  3811. break;
  3812. default:
  3813. ret = -EINVAL;
  3814. qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
  3815. goto bail;
  3816. }
  3817. switch (val & 0xffff) {
  3818. case IB_LINKINITCMD_NOP:
  3819. licmd = 0;
  3820. break;
  3821. case IB_LINKINITCMD_POLL:
  3822. licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
  3823. break;
  3824. case IB_LINKINITCMD_SLEEP:
  3825. licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
  3826. break;
  3827. case IB_LINKINITCMD_DISABLE:
  3828. licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
  3829. ppd->cpspec->chase_end = 0;
  3830. /*
  3831. * stop state chase counter and timer, if running.
  3832. * wait forpending timer, but don't clear .data (ppd)!
  3833. */
  3834. if (ppd->cpspec->chase_timer.expires) {
  3835. del_timer_sync(&ppd->cpspec->chase_timer);
  3836. ppd->cpspec->chase_timer.expires = 0;
  3837. }
  3838. break;
  3839. default:
  3840. ret = -EINVAL;
  3841. qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
  3842. val & 0xffff);
  3843. goto bail;
  3844. }
  3845. qib_set_ib_7322_lstate(ppd, lcmd, licmd);
  3846. goto bail;
  3847. case QIB_IB_CFG_OP_VLS:
  3848. if (ppd->vls_operational != val) {
  3849. ppd->vls_operational = val;
  3850. set_vls(ppd);
  3851. }
  3852. goto bail;
  3853. case QIB_IB_CFG_VL_HIGH_LIMIT:
  3854. qib_write_kreg_port(ppd, krp_highprio_limit, val);
  3855. goto bail;
  3856. case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
  3857. if (val > 3) {
  3858. ret = -EINVAL;
  3859. goto bail;
  3860. }
  3861. lsb = IBA7322_IBC_HRTBT_LSB;
  3862. maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
  3863. break;
  3864. case QIB_IB_CFG_PORT:
  3865. /* val is the port number of the switch we are connected to. */
  3866. if (ppd->dd->cspec->r1) {
  3867. cancel_delayed_work(&ppd->cpspec->ipg_work);
  3868. ppd->cpspec->ipg_tries = 0;
  3869. }
  3870. goto bail;
  3871. default:
  3872. ret = -EINVAL;
  3873. goto bail;
  3874. }
  3875. ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
  3876. ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
  3877. qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
  3878. qib_write_kreg(dd, kr_scratch, 0);
  3879. bail:
  3880. return ret;
  3881. }
  3882. static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
  3883. {
  3884. int ret = 0;
  3885. u64 val, ctrlb;
  3886. /* only IBC loopback, may add serdes and xgxs loopbacks later */
  3887. if (!strncmp(what, "ibc", 3)) {
  3888. ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
  3889. Loopback);
  3890. val = 0; /* disable heart beat, so link will come up */
  3891. qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
  3892. ppd->dd->unit, ppd->port);
  3893. } else if (!strncmp(what, "off", 3)) {
  3894. ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
  3895. Loopback);
  3896. /* enable heart beat again */
  3897. val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
  3898. qib_devinfo(ppd->dd->pcidev,
  3899. "Disabling IB%u:%u IBC loopback (normal)\n",
  3900. ppd->dd->unit, ppd->port);
  3901. } else
  3902. ret = -EINVAL;
  3903. if (!ret) {
  3904. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  3905. ppd->cpspec->ibcctrl_a);
  3906. ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
  3907. << IBA7322_IBC_HRTBT_LSB);
  3908. ppd->cpspec->ibcctrl_b = ctrlb | val;
  3909. qib_write_kreg_port(ppd, krp_ibcctrl_b,
  3910. ppd->cpspec->ibcctrl_b);
  3911. qib_write_kreg(ppd->dd, kr_scratch, 0);
  3912. }
  3913. return ret;
  3914. }
  3915. static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
  3916. struct ib_vl_weight_elem *vl)
  3917. {
  3918. unsigned i;
  3919. for (i = 0; i < 16; i++, regno++, vl++) {
  3920. u32 val = qib_read_kreg_port(ppd, regno);
  3921. vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
  3922. SYM_RMASK(LowPriority0_0, VirtualLane);
  3923. vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
  3924. SYM_RMASK(LowPriority0_0, Weight);
  3925. }
  3926. }
  3927. static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
  3928. struct ib_vl_weight_elem *vl)
  3929. {
  3930. unsigned i;
  3931. for (i = 0; i < 16; i++, regno++, vl++) {
  3932. u64 val;
  3933. val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
  3934. SYM_LSB(LowPriority0_0, VirtualLane)) |
  3935. ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
  3936. SYM_LSB(LowPriority0_0, Weight));
  3937. qib_write_kreg_port(ppd, regno, val);
  3938. }
  3939. if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
  3940. struct qib_devdata *dd = ppd->dd;
  3941. unsigned long flags;
  3942. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  3943. ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
  3944. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  3945. qib_write_kreg(dd, kr_scratch, 0);
  3946. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  3947. }
  3948. }
  3949. static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
  3950. {
  3951. switch (which) {
  3952. case QIB_IB_TBL_VL_HIGH_ARB:
  3953. get_vl_weights(ppd, krp_highprio_0, t);
  3954. break;
  3955. case QIB_IB_TBL_VL_LOW_ARB:
  3956. get_vl_weights(ppd, krp_lowprio_0, t);
  3957. break;
  3958. default:
  3959. return -EINVAL;
  3960. }
  3961. return 0;
  3962. }
  3963. static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
  3964. {
  3965. switch (which) {
  3966. case QIB_IB_TBL_VL_HIGH_ARB:
  3967. set_vl_weights(ppd, krp_highprio_0, t);
  3968. break;
  3969. case QIB_IB_TBL_VL_LOW_ARB:
  3970. set_vl_weights(ppd, krp_lowprio_0, t);
  3971. break;
  3972. default:
  3973. return -EINVAL;
  3974. }
  3975. return 0;
  3976. }
  3977. static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
  3978. u32 updegr, u32 egrhd, u32 npkts)
  3979. {
  3980. /*
  3981. * Need to write timeout register before updating rcvhdrhead to ensure
  3982. * that the timer is enabled on reception of a packet.
  3983. */
  3984. if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
  3985. adjust_rcv_timeout(rcd, npkts);
  3986. if (updegr)
  3987. qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
  3988. mmiowb();
  3989. qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
  3990. qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
  3991. mmiowb();
  3992. }
  3993. static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
  3994. {
  3995. u32 head, tail;
  3996. head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
  3997. if (rcd->rcvhdrtail_kvaddr)
  3998. tail = qib_get_rcvhdrtail(rcd);
  3999. else
  4000. tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
  4001. return head == tail;
  4002. }
  4003. #define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
  4004. QIB_RCVCTRL_CTXT_DIS | \
  4005. QIB_RCVCTRL_TIDFLOW_ENB | \
  4006. QIB_RCVCTRL_TIDFLOW_DIS | \
  4007. QIB_RCVCTRL_TAILUPD_ENB | \
  4008. QIB_RCVCTRL_TAILUPD_DIS | \
  4009. QIB_RCVCTRL_INTRAVAIL_ENB | \
  4010. QIB_RCVCTRL_INTRAVAIL_DIS | \
  4011. QIB_RCVCTRL_BP_ENB | \
  4012. QIB_RCVCTRL_BP_DIS)
  4013. #define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
  4014. QIB_RCVCTRL_CTXT_DIS | \
  4015. QIB_RCVCTRL_PKEY_DIS | \
  4016. QIB_RCVCTRL_PKEY_ENB)
  4017. /*
  4018. * Modify the RCVCTRL register in chip-specific way. This
  4019. * is a function because bit positions and (future) register
  4020. * location is chip-specifc, but the needed operations are
  4021. * generic. <op> is a bit-mask because we often want to
  4022. * do multiple modifications.
  4023. */
  4024. static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
  4025. int ctxt)
  4026. {
  4027. struct qib_devdata *dd = ppd->dd;
  4028. struct qib_ctxtdata *rcd;
  4029. u64 mask, val;
  4030. unsigned long flags;
  4031. spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
  4032. if (op & QIB_RCVCTRL_TIDFLOW_ENB)
  4033. dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
  4034. if (op & QIB_RCVCTRL_TIDFLOW_DIS)
  4035. dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
  4036. if (op & QIB_RCVCTRL_TAILUPD_ENB)
  4037. dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
  4038. if (op & QIB_RCVCTRL_TAILUPD_DIS)
  4039. dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
  4040. if (op & QIB_RCVCTRL_PKEY_ENB)
  4041. ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
  4042. if (op & QIB_RCVCTRL_PKEY_DIS)
  4043. ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
  4044. if (ctxt < 0) {
  4045. mask = (1ULL << dd->ctxtcnt) - 1;
  4046. rcd = NULL;
  4047. } else {
  4048. mask = (1ULL << ctxt);
  4049. rcd = dd->rcd[ctxt];
  4050. }
  4051. if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
  4052. ppd->p_rcvctrl |=
  4053. (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
  4054. if (!(dd->flags & QIB_NODMA_RTAIL)) {
  4055. op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
  4056. dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
  4057. }
  4058. /* Write these registers before the context is enabled. */
  4059. qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
  4060. rcd->rcvhdrqtailaddr_phys);
  4061. qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
  4062. rcd->rcvhdrq_phys);
  4063. rcd->seq_cnt = 1;
  4064. }
  4065. if (op & QIB_RCVCTRL_CTXT_DIS)
  4066. ppd->p_rcvctrl &=
  4067. ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
  4068. if (op & QIB_RCVCTRL_BP_ENB)
  4069. dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
  4070. if (op & QIB_RCVCTRL_BP_DIS)
  4071. dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
  4072. if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
  4073. dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
  4074. if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
  4075. dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
  4076. /*
  4077. * Decide which registers to write depending on the ops enabled.
  4078. * Special case is "flush" (no bits set at all)
  4079. * which needs to write both.
  4080. */
  4081. if (op == 0 || (op & RCVCTRL_COMMON_MODS))
  4082. qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
  4083. if (op == 0 || (op & RCVCTRL_PORT_MODS))
  4084. qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
  4085. if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
  4086. /*
  4087. * Init the context registers also; if we were
  4088. * disabled, tail and head should both be zero
  4089. * already from the enable, but since we don't
  4090. * know, we have to do it explicitly.
  4091. */
  4092. val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
  4093. qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
  4094. /* be sure enabling write seen; hd/tl should be 0 */
  4095. (void) qib_read_kreg32(dd, kr_scratch);
  4096. val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
  4097. dd->rcd[ctxt]->head = val;
  4098. /* If kctxt, interrupt on next receive. */
  4099. if (ctxt < dd->first_user_ctxt)
  4100. val |= dd->rhdrhead_intr_off;
  4101. qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
  4102. } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
  4103. dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
  4104. /* arm rcv interrupt */
  4105. val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
  4106. qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
  4107. }
  4108. if (op & QIB_RCVCTRL_CTXT_DIS) {
  4109. unsigned f;
  4110. /* Now that the context is disabled, clear these registers. */
  4111. if (ctxt >= 0) {
  4112. qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
  4113. qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
  4114. for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
  4115. qib_write_ureg(dd, ur_rcvflowtable + f,
  4116. TIDFLOW_ERRBITS, ctxt);
  4117. } else {
  4118. unsigned i;
  4119. for (i = 0; i < dd->cfgctxts; i++) {
  4120. qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
  4121. i, 0);
  4122. qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
  4123. for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
  4124. qib_write_ureg(dd, ur_rcvflowtable + f,
  4125. TIDFLOW_ERRBITS, i);
  4126. }
  4127. }
  4128. }
  4129. spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
  4130. }
  4131. /*
  4132. * Modify the SENDCTRL register in chip-specific way. This
  4133. * is a function where there are multiple such registers with
  4134. * slightly different layouts.
  4135. * The chip doesn't allow back-to-back sendctrl writes, so write
  4136. * the scratch register after writing sendctrl.
  4137. *
  4138. * Which register is written depends on the operation.
  4139. * Most operate on the common register, while
  4140. * SEND_ENB and SEND_DIS operate on the per-port ones.
  4141. * SEND_ENB is included in common because it can change SPCL_TRIG
  4142. */
  4143. #define SENDCTRL_COMMON_MODS (\
  4144. QIB_SENDCTRL_CLEAR | \
  4145. QIB_SENDCTRL_AVAIL_DIS | \
  4146. QIB_SENDCTRL_AVAIL_ENB | \
  4147. QIB_SENDCTRL_AVAIL_BLIP | \
  4148. QIB_SENDCTRL_DISARM | \
  4149. QIB_SENDCTRL_DISARM_ALL | \
  4150. QIB_SENDCTRL_SEND_ENB)
  4151. #define SENDCTRL_PORT_MODS (\
  4152. QIB_SENDCTRL_CLEAR | \
  4153. QIB_SENDCTRL_SEND_ENB | \
  4154. QIB_SENDCTRL_SEND_DIS | \
  4155. QIB_SENDCTRL_FLUSH)
  4156. static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
  4157. {
  4158. struct qib_devdata *dd = ppd->dd;
  4159. u64 tmp_dd_sendctrl;
  4160. unsigned long flags;
  4161. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  4162. /* First the dd ones that are "sticky", saved in shadow */
  4163. if (op & QIB_SENDCTRL_CLEAR)
  4164. dd->sendctrl = 0;
  4165. if (op & QIB_SENDCTRL_AVAIL_DIS)
  4166. dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
  4167. else if (op & QIB_SENDCTRL_AVAIL_ENB) {
  4168. dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
  4169. if (dd->flags & QIB_USE_SPCL_TRIG)
  4170. dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
  4171. }
  4172. /* Then the ppd ones that are "sticky", saved in shadow */
  4173. if (op & QIB_SENDCTRL_SEND_DIS)
  4174. ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
  4175. else if (op & QIB_SENDCTRL_SEND_ENB)
  4176. ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
  4177. if (op & QIB_SENDCTRL_DISARM_ALL) {
  4178. u32 i, last;
  4179. tmp_dd_sendctrl = dd->sendctrl;
  4180. last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
  4181. /*
  4182. * Disarm any buffers that are not yet launched,
  4183. * disabling updates until done.
  4184. */
  4185. tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
  4186. for (i = 0; i < last; i++) {
  4187. qib_write_kreg(dd, kr_sendctrl,
  4188. tmp_dd_sendctrl |
  4189. SYM_MASK(SendCtrl, Disarm) | i);
  4190. qib_write_kreg(dd, kr_scratch, 0);
  4191. }
  4192. }
  4193. if (op & QIB_SENDCTRL_FLUSH) {
  4194. u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
  4195. /*
  4196. * Now drain all the fifos. The Abort bit should never be
  4197. * needed, so for now, at least, we don't use it.
  4198. */
  4199. tmp_ppd_sendctrl |=
  4200. SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
  4201. SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
  4202. SYM_MASK(SendCtrl_0, TxeBypassIbc);
  4203. qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
  4204. qib_write_kreg(dd, kr_scratch, 0);
  4205. }
  4206. tmp_dd_sendctrl = dd->sendctrl;
  4207. if (op & QIB_SENDCTRL_DISARM)
  4208. tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
  4209. ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
  4210. SYM_LSB(SendCtrl, DisarmSendBuf));
  4211. if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
  4212. (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
  4213. tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
  4214. if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
  4215. qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
  4216. qib_write_kreg(dd, kr_scratch, 0);
  4217. }
  4218. if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
  4219. qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
  4220. qib_write_kreg(dd, kr_scratch, 0);
  4221. }
  4222. if (op & QIB_SENDCTRL_AVAIL_BLIP) {
  4223. qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
  4224. qib_write_kreg(dd, kr_scratch, 0);
  4225. }
  4226. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  4227. if (op & QIB_SENDCTRL_FLUSH) {
  4228. u32 v;
  4229. /*
  4230. * ensure writes have hit chip, then do a few
  4231. * more reads, to allow DMA of pioavail registers
  4232. * to occur, so in-memory copy is in sync with
  4233. * the chip. Not always safe to sleep.
  4234. */
  4235. v = qib_read_kreg32(dd, kr_scratch);
  4236. qib_write_kreg(dd, kr_scratch, v);
  4237. v = qib_read_kreg32(dd, kr_scratch);
  4238. qib_write_kreg(dd, kr_scratch, v);
  4239. qib_read_kreg32(dd, kr_scratch);
  4240. }
  4241. }
  4242. #define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
  4243. #define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
  4244. #define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
  4245. /**
  4246. * qib_portcntr_7322 - read a per-port chip counter
  4247. * @ppd: the qlogic_ib pport
  4248. * @creg: the counter to read (not a chip offset)
  4249. */
  4250. static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
  4251. {
  4252. struct qib_devdata *dd = ppd->dd;
  4253. u64 ret = 0ULL;
  4254. u16 creg;
  4255. /* 0xffff for unimplemented or synthesized counters */
  4256. static const u32 xlator[] = {
  4257. [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
  4258. [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
  4259. [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
  4260. [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
  4261. [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
  4262. [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
  4263. [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
  4264. [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
  4265. [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
  4266. [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
  4267. [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
  4268. [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
  4269. [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */
  4270. [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
  4271. [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
  4272. [QIBPORTCNTR_ERRICRC] = crp_erricrc,
  4273. [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
  4274. [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
  4275. [QIBPORTCNTR_BADFORMAT] = crp_badformat,
  4276. [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
  4277. [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
  4278. [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
  4279. [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
  4280. [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
  4281. [QIBPORTCNTR_ERRLINK] = crp_errlink,
  4282. [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
  4283. [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
  4284. [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
  4285. [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
  4286. [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
  4287. /*
  4288. * the next 3 aren't really counters, but were implemented
  4289. * as counters in older chips, so still get accessed as
  4290. * though they were counters from this code.
  4291. */
  4292. [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
  4293. [QIBPORTCNTR_PSSTART] = krp_psstart,
  4294. [QIBPORTCNTR_PSSTAT] = krp_psstat,
  4295. /* pseudo-counter, summed for all ports */
  4296. [QIBPORTCNTR_KHDROVFL] = 0xffff,
  4297. };
  4298. if (reg >= ARRAY_SIZE(xlator)) {
  4299. qib_devinfo(ppd->dd->pcidev,
  4300. "Unimplemented portcounter %u\n", reg);
  4301. goto done;
  4302. }
  4303. creg = xlator[reg] & _PORT_CNTR_IDXMASK;
  4304. /* handle non-counters and special cases first */
  4305. if (reg == QIBPORTCNTR_KHDROVFL) {
  4306. int i;
  4307. /* sum over all kernel contexts (skip if mini_init) */
  4308. for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
  4309. struct qib_ctxtdata *rcd = dd->rcd[i];
  4310. if (!rcd || rcd->ppd != ppd)
  4311. continue;
  4312. ret += read_7322_creg32(dd, cr_base_egrovfl + i);
  4313. }
  4314. goto done;
  4315. } else if (reg == QIBPORTCNTR_RXDROPPKT) {
  4316. /*
  4317. * Used as part of the synthesis of port_rcv_errors
  4318. * in the verbs code for IBTA counters. Not needed for 7322,
  4319. * because all the errors are already counted by other cntrs.
  4320. */
  4321. goto done;
  4322. } else if (reg == QIBPORTCNTR_PSINTERVAL ||
  4323. reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
  4324. /* were counters in older chips, now per-port kernel regs */
  4325. ret = qib_read_kreg_port(ppd, creg);
  4326. goto done;
  4327. }
  4328. /*
  4329. * Only fast increment counters are 64 bits; use 32 bit reads to
  4330. * avoid two independent reads when on Opteron.
  4331. */
  4332. if (xlator[reg] & _PORT_64BIT_FLAG)
  4333. ret = read_7322_creg_port(ppd, creg);
  4334. else
  4335. ret = read_7322_creg32_port(ppd, creg);
  4336. if (creg == crp_ibsymbolerr) {
  4337. if (ppd->cpspec->ibdeltainprog)
  4338. ret -= ret - ppd->cpspec->ibsymsnap;
  4339. ret -= ppd->cpspec->ibsymdelta;
  4340. } else if (creg == crp_iblinkerrrecov) {
  4341. if (ppd->cpspec->ibdeltainprog)
  4342. ret -= ret - ppd->cpspec->iblnkerrsnap;
  4343. ret -= ppd->cpspec->iblnkerrdelta;
  4344. } else if (creg == crp_errlink)
  4345. ret -= ppd->cpspec->ibmalfdelta;
  4346. else if (creg == crp_iblinkdown)
  4347. ret += ppd->cpspec->iblnkdowndelta;
  4348. done:
  4349. return ret;
  4350. }
  4351. /*
  4352. * Device counter names (not port-specific), one line per stat,
  4353. * single string. Used by utilities like ipathstats to print the stats
  4354. * in a way which works for different versions of drivers, without changing
  4355. * the utility. Names need to be 12 chars or less (w/o newline), for proper
  4356. * display by utility.
  4357. * Non-error counters are first.
  4358. * Start of "error" conters is indicated by a leading "E " on the first
  4359. * "error" counter, and doesn't count in label length.
  4360. * The EgrOvfl list needs to be last so we truncate them at the configured
  4361. * context count for the device.
  4362. * cntr7322indices contains the corresponding register indices.
  4363. */
  4364. static const char cntr7322names[] =
  4365. "Interrupts\n"
  4366. "HostBusStall\n"
  4367. "E RxTIDFull\n"
  4368. "RxTIDInvalid\n"
  4369. "RxTIDFloDrop\n" /* 7322 only */
  4370. "Ctxt0EgrOvfl\n"
  4371. "Ctxt1EgrOvfl\n"
  4372. "Ctxt2EgrOvfl\n"
  4373. "Ctxt3EgrOvfl\n"
  4374. "Ctxt4EgrOvfl\n"
  4375. "Ctxt5EgrOvfl\n"
  4376. "Ctxt6EgrOvfl\n"
  4377. "Ctxt7EgrOvfl\n"
  4378. "Ctxt8EgrOvfl\n"
  4379. "Ctxt9EgrOvfl\n"
  4380. "Ctx10EgrOvfl\n"
  4381. "Ctx11EgrOvfl\n"
  4382. "Ctx12EgrOvfl\n"
  4383. "Ctx13EgrOvfl\n"
  4384. "Ctx14EgrOvfl\n"
  4385. "Ctx15EgrOvfl\n"
  4386. "Ctx16EgrOvfl\n"
  4387. "Ctx17EgrOvfl\n"
  4388. ;
  4389. static const u32 cntr7322indices[] = {
  4390. cr_lbint | _PORT_64BIT_FLAG,
  4391. cr_lbstall | _PORT_64BIT_FLAG,
  4392. cr_tidfull,
  4393. cr_tidinvalid,
  4394. cr_rxtidflowdrop,
  4395. cr_base_egrovfl + 0,
  4396. cr_base_egrovfl + 1,
  4397. cr_base_egrovfl + 2,
  4398. cr_base_egrovfl + 3,
  4399. cr_base_egrovfl + 4,
  4400. cr_base_egrovfl + 5,
  4401. cr_base_egrovfl + 6,
  4402. cr_base_egrovfl + 7,
  4403. cr_base_egrovfl + 8,
  4404. cr_base_egrovfl + 9,
  4405. cr_base_egrovfl + 10,
  4406. cr_base_egrovfl + 11,
  4407. cr_base_egrovfl + 12,
  4408. cr_base_egrovfl + 13,
  4409. cr_base_egrovfl + 14,
  4410. cr_base_egrovfl + 15,
  4411. cr_base_egrovfl + 16,
  4412. cr_base_egrovfl + 17,
  4413. };
  4414. /*
  4415. * same as cntr7322names and cntr7322indices, but for port-specific counters.
  4416. * portcntr7322indices is somewhat complicated by some registers needing
  4417. * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
  4418. */
  4419. static const char portcntr7322names[] =
  4420. "TxPkt\n"
  4421. "TxFlowPkt\n"
  4422. "TxWords\n"
  4423. "RxPkt\n"
  4424. "RxFlowPkt\n"
  4425. "RxWords\n"
  4426. "TxFlowStall\n"
  4427. "TxDmaDesc\n" /* 7220 and 7322-only */
  4428. "E RxDlidFltr\n" /* 7220 and 7322-only */
  4429. "IBStatusChng\n"
  4430. "IBLinkDown\n"
  4431. "IBLnkRecov\n"
  4432. "IBRxLinkErr\n"
  4433. "IBSymbolErr\n"
  4434. "RxLLIErr\n"
  4435. "RxBadFormat\n"
  4436. "RxBadLen\n"
  4437. "RxBufOvrfl\n"
  4438. "RxEBP\n"
  4439. "RxFlowCtlErr\n"
  4440. "RxICRCerr\n"
  4441. "RxLPCRCerr\n"
  4442. "RxVCRCerr\n"
  4443. "RxInvalLen\n"
  4444. "RxInvalPKey\n"
  4445. "RxPktDropped\n"
  4446. "TxBadLength\n"
  4447. "TxDropped\n"
  4448. "TxInvalLen\n"
  4449. "TxUnderrun\n"
  4450. "TxUnsupVL\n"
  4451. "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
  4452. "RxVL15Drop\n"
  4453. "RxVlErr\n"
  4454. "XcessBufOvfl\n"
  4455. "RxQPBadCtxt\n" /* 7322-only from here down */
  4456. "TXBadHeader\n"
  4457. ;
  4458. static const u32 portcntr7322indices[] = {
  4459. QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
  4460. crp_pktsendflow,
  4461. QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
  4462. QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
  4463. crp_pktrcvflowctrl,
  4464. QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
  4465. QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
  4466. crp_txsdmadesc | _PORT_64BIT_FLAG,
  4467. crp_rxdlidfltr,
  4468. crp_ibstatuschange,
  4469. QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
  4470. QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
  4471. QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
  4472. QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
  4473. QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
  4474. QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
  4475. QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
  4476. QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
  4477. QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
  4478. crp_rcvflowctrlviol,
  4479. QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
  4480. QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
  4481. QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
  4482. QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
  4483. QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
  4484. QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
  4485. crp_txminmaxlenerr,
  4486. crp_txdroppedpkt,
  4487. crp_txlenerr,
  4488. crp_txunderrun,
  4489. crp_txunsupvl,
  4490. QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
  4491. QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
  4492. QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
  4493. QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
  4494. crp_rxqpinvalidctxt,
  4495. crp_txhdrerr,
  4496. };
  4497. /* do all the setup to make the counter reads efficient later */
  4498. static void init_7322_cntrnames(struct qib_devdata *dd)
  4499. {
  4500. int i, j = 0;
  4501. char *s;
  4502. for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
  4503. i++) {
  4504. /* we always have at least one counter before the egrovfl */
  4505. if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
  4506. j = 1;
  4507. s = strchr(s + 1, '\n');
  4508. if (s && j)
  4509. j++;
  4510. }
  4511. dd->cspec->ncntrs = i;
  4512. if (!s)
  4513. /* full list; size is without terminating null */
  4514. dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
  4515. else
  4516. dd->cspec->cntrnamelen = 1 + s - cntr7322names;
  4517. dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
  4518. * sizeof(u64), GFP_KERNEL);
  4519. if (!dd->cspec->cntrs)
  4520. qib_dev_err(dd, "Failed allocation for counters\n");
  4521. for (i = 0, s = (char *)portcntr7322names; s; i++)
  4522. s = strchr(s + 1, '\n');
  4523. dd->cspec->nportcntrs = i - 1;
  4524. dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
  4525. for (i = 0; i < dd->num_pports; ++i) {
  4526. dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
  4527. * sizeof(u64), GFP_KERNEL);
  4528. if (!dd->pport[i].cpspec->portcntrs)
  4529. qib_dev_err(dd,
  4530. "Failed allocation for portcounters\n");
  4531. }
  4532. }
  4533. static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
  4534. u64 **cntrp)
  4535. {
  4536. u32 ret;
  4537. if (namep) {
  4538. ret = dd->cspec->cntrnamelen;
  4539. if (pos >= ret)
  4540. ret = 0; /* final read after getting everything */
  4541. else
  4542. *namep = (char *) cntr7322names;
  4543. } else {
  4544. u64 *cntr = dd->cspec->cntrs;
  4545. int i;
  4546. ret = dd->cspec->ncntrs * sizeof(u64);
  4547. if (!cntr || pos >= ret) {
  4548. /* everything read, or couldn't get memory */
  4549. ret = 0;
  4550. goto done;
  4551. }
  4552. *cntrp = cntr;
  4553. for (i = 0; i < dd->cspec->ncntrs; i++)
  4554. if (cntr7322indices[i] & _PORT_64BIT_FLAG)
  4555. *cntr++ = read_7322_creg(dd,
  4556. cntr7322indices[i] &
  4557. _PORT_CNTR_IDXMASK);
  4558. else
  4559. *cntr++ = read_7322_creg32(dd,
  4560. cntr7322indices[i]);
  4561. }
  4562. done:
  4563. return ret;
  4564. }
  4565. static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
  4566. char **namep, u64 **cntrp)
  4567. {
  4568. u32 ret;
  4569. if (namep) {
  4570. ret = dd->cspec->portcntrnamelen;
  4571. if (pos >= ret)
  4572. ret = 0; /* final read after getting everything */
  4573. else
  4574. *namep = (char *)portcntr7322names;
  4575. } else {
  4576. struct qib_pportdata *ppd = &dd->pport[port];
  4577. u64 *cntr = ppd->cpspec->portcntrs;
  4578. int i;
  4579. ret = dd->cspec->nportcntrs * sizeof(u64);
  4580. if (!cntr || pos >= ret) {
  4581. /* everything read, or couldn't get memory */
  4582. ret = 0;
  4583. goto done;
  4584. }
  4585. *cntrp = cntr;
  4586. for (i = 0; i < dd->cspec->nportcntrs; i++) {
  4587. if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
  4588. *cntr++ = qib_portcntr_7322(ppd,
  4589. portcntr7322indices[i] &
  4590. _PORT_CNTR_IDXMASK);
  4591. else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
  4592. *cntr++ = read_7322_creg_port(ppd,
  4593. portcntr7322indices[i] &
  4594. _PORT_CNTR_IDXMASK);
  4595. else
  4596. *cntr++ = read_7322_creg32_port(ppd,
  4597. portcntr7322indices[i]);
  4598. }
  4599. }
  4600. done:
  4601. return ret;
  4602. }
  4603. /**
  4604. * qib_get_7322_faststats - get word counters from chip before they overflow
  4605. * @opaque - contains a pointer to the qlogic_ib device qib_devdata
  4606. *
  4607. * VESTIGIAL IBA7322 has no "small fast counters", so the only
  4608. * real purpose of this function is to maintain the notion of
  4609. * "active time", which in turn is only logged into the eeprom,
  4610. * which we don;t have, yet, for 7322-based boards.
  4611. *
  4612. * called from add_timer
  4613. */
  4614. static void qib_get_7322_faststats(unsigned long opaque)
  4615. {
  4616. struct qib_devdata *dd = (struct qib_devdata *) opaque;
  4617. struct qib_pportdata *ppd;
  4618. unsigned long flags;
  4619. u64 traffic_wds;
  4620. int pidx;
  4621. for (pidx = 0; pidx < dd->num_pports; ++pidx) {
  4622. ppd = dd->pport + pidx;
  4623. /*
  4624. * If port isn't enabled or not operational ports, or
  4625. * diags is running (can cause memory diags to fail)
  4626. * skip this port this time.
  4627. */
  4628. if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
  4629. || dd->diag_client)
  4630. continue;
  4631. /*
  4632. * Maintain an activity timer, based on traffic
  4633. * exceeding a threshold, so we need to check the word-counts
  4634. * even if they are 64-bit.
  4635. */
  4636. traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
  4637. qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
  4638. spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
  4639. traffic_wds -= ppd->dd->traffic_wds;
  4640. ppd->dd->traffic_wds += traffic_wds;
  4641. spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
  4642. if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
  4643. QIB_IB_QDR) &&
  4644. (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
  4645. QIBL_LINKACTIVE)) &&
  4646. ppd->cpspec->qdr_dfe_time &&
  4647. time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
  4648. ppd->cpspec->qdr_dfe_on = 0;
  4649. qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
  4650. ppd->dd->cspec->r1 ?
  4651. QDR_STATIC_ADAPT_INIT_R1 :
  4652. QDR_STATIC_ADAPT_INIT);
  4653. force_h1(ppd);
  4654. }
  4655. }
  4656. mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
  4657. }
  4658. /*
  4659. * If we were using MSIx, try to fallback to INTx.
  4660. */
  4661. static int qib_7322_intr_fallback(struct qib_devdata *dd)
  4662. {
  4663. if (!dd->cspec->num_msix_entries)
  4664. return 0; /* already using INTx */
  4665. qib_devinfo(dd->pcidev,
  4666. "MSIx interrupt not detected, trying INTx interrupts\n");
  4667. qib_7322_nomsix(dd);
  4668. qib_enable_intx(dd->pcidev);
  4669. qib_setup_7322_interrupt(dd, 0);
  4670. return 1;
  4671. }
  4672. /*
  4673. * Reset the XGXS (between serdes and IBC). Slightly less intrusive
  4674. * than resetting the IBC or external link state, and useful in some
  4675. * cases to cause some retraining. To do this right, we reset IBC
  4676. * as well, then return to previous state (which may be still in reset)
  4677. * NOTE: some callers of this "know" this writes the current value
  4678. * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
  4679. * check all callers.
  4680. */
  4681. static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
  4682. {
  4683. u64 val;
  4684. struct qib_devdata *dd = ppd->dd;
  4685. const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
  4686. SYM_MASK(IBPCSConfig_0, xcv_treset) |
  4687. SYM_MASK(IBPCSConfig_0, tx_rx_reset);
  4688. val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
  4689. qib_write_kreg(dd, kr_hwerrmask,
  4690. dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
  4691. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  4692. ppd->cpspec->ibcctrl_a &
  4693. ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
  4694. qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
  4695. qib_read_kreg32(dd, kr_scratch);
  4696. qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
  4697. qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
  4698. qib_write_kreg(dd, kr_scratch, 0ULL);
  4699. qib_write_kreg(dd, kr_hwerrclear,
  4700. SYM_MASK(HwErrClear, statusValidNoEopClear));
  4701. qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
  4702. }
  4703. /*
  4704. * This code for non-IBTA-compliant IB speed negotiation is only known to
  4705. * work for the SDR to DDR transition, and only between an HCA and a switch
  4706. * with recent firmware. It is based on observed heuristics, rather than
  4707. * actual knowledge of the non-compliant speed negotiation.
  4708. * It has a number of hard-coded fields, since the hope is to rewrite this
  4709. * when a spec is available on how the negoation is intended to work.
  4710. */
  4711. static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
  4712. u32 dcnt, u32 *data)
  4713. {
  4714. int i;
  4715. u64 pbc;
  4716. u32 __iomem *piobuf;
  4717. u32 pnum, control, len;
  4718. struct qib_devdata *dd = ppd->dd;
  4719. i = 0;
  4720. len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
  4721. control = qib_7322_setpbc_control(ppd, len, 0, 15);
  4722. pbc = ((u64) control << 32) | len;
  4723. while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
  4724. if (i++ > 15)
  4725. return;
  4726. udelay(2);
  4727. }
  4728. /* disable header check on this packet, since it can't be valid */
  4729. dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
  4730. writeq(pbc, piobuf);
  4731. qib_flush_wc();
  4732. qib_pio_copy(piobuf + 2, hdr, 7);
  4733. qib_pio_copy(piobuf + 9, data, dcnt);
  4734. if (dd->flags & QIB_USE_SPCL_TRIG) {
  4735. u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
  4736. qib_flush_wc();
  4737. __raw_writel(0xaebecede, piobuf + spcl_off);
  4738. }
  4739. qib_flush_wc();
  4740. qib_sendbuf_done(dd, pnum);
  4741. /* and re-enable hdr check */
  4742. dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
  4743. }
  4744. /*
  4745. * _start packet gets sent twice at start, _done gets sent twice at end
  4746. */
  4747. static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
  4748. {
  4749. struct qib_devdata *dd = ppd->dd;
  4750. static u32 swapped;
  4751. u32 dw, i, hcnt, dcnt, *data;
  4752. static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
  4753. static u32 madpayload_start[0x40] = {
  4754. 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
  4755. 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
  4756. 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
  4757. };
  4758. static u32 madpayload_done[0x40] = {
  4759. 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
  4760. 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
  4761. 0x40000001, 0x1388, 0x15e, /* rest 0's */
  4762. };
  4763. dcnt = ARRAY_SIZE(madpayload_start);
  4764. hcnt = ARRAY_SIZE(hdr);
  4765. if (!swapped) {
  4766. /* for maintainability, do it at runtime */
  4767. for (i = 0; i < hcnt; i++) {
  4768. dw = (__force u32) cpu_to_be32(hdr[i]);
  4769. hdr[i] = dw;
  4770. }
  4771. for (i = 0; i < dcnt; i++) {
  4772. dw = (__force u32) cpu_to_be32(madpayload_start[i]);
  4773. madpayload_start[i] = dw;
  4774. dw = (__force u32) cpu_to_be32(madpayload_done[i]);
  4775. madpayload_done[i] = dw;
  4776. }
  4777. swapped = 1;
  4778. }
  4779. data = which ? madpayload_done : madpayload_start;
  4780. autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
  4781. qib_read_kreg64(dd, kr_scratch);
  4782. udelay(2);
  4783. autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
  4784. qib_read_kreg64(dd, kr_scratch);
  4785. udelay(2);
  4786. }
  4787. /*
  4788. * Do the absolute minimum to cause an IB speed change, and make it
  4789. * ready, but don't actually trigger the change. The caller will
  4790. * do that when ready (if link is in Polling training state, it will
  4791. * happen immediately, otherwise when link next goes down)
  4792. *
  4793. * This routine should only be used as part of the DDR autonegotation
  4794. * code for devices that are not compliant with IB 1.2 (or code that
  4795. * fixes things up for same).
  4796. *
  4797. * When link has gone down, and autoneg enabled, or autoneg has
  4798. * failed and we give up until next time we set both speeds, and
  4799. * then we want IBTA enabled as well as "use max enabled speed.
  4800. */
  4801. static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
  4802. {
  4803. u64 newctrlb;
  4804. newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
  4805. IBA7322_IBC_IBTA_1_2_MASK |
  4806. IBA7322_IBC_MAX_SPEED_MASK);
  4807. if (speed & (speed - 1)) /* multiple speeds */
  4808. newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
  4809. IBA7322_IBC_IBTA_1_2_MASK |
  4810. IBA7322_IBC_MAX_SPEED_MASK;
  4811. else
  4812. newctrlb |= speed == QIB_IB_QDR ?
  4813. IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
  4814. ((speed == QIB_IB_DDR ?
  4815. IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
  4816. if (newctrlb == ppd->cpspec->ibcctrl_b)
  4817. return;
  4818. ppd->cpspec->ibcctrl_b = newctrlb;
  4819. qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
  4820. qib_write_kreg(ppd->dd, kr_scratch, 0);
  4821. }
  4822. /*
  4823. * This routine is only used when we are not talking to another
  4824. * IB 1.2-compliant device that we think can do DDR.
  4825. * (This includes all existing switch chips as of Oct 2007.)
  4826. * 1.2-compliant devices go directly to DDR prior to reaching INIT
  4827. */
  4828. static void try_7322_autoneg(struct qib_pportdata *ppd)
  4829. {
  4830. unsigned long flags;
  4831. spin_lock_irqsave(&ppd->lflags_lock, flags);
  4832. ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
  4833. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  4834. qib_autoneg_7322_send(ppd, 0);
  4835. set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
  4836. qib_7322_mini_pcs_reset(ppd);
  4837. /* 2 msec is minimum length of a poll cycle */
  4838. queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
  4839. msecs_to_jiffies(2));
  4840. }
  4841. /*
  4842. * Handle the empirically determined mechanism for auto-negotiation
  4843. * of DDR speed with switches.
  4844. */
  4845. static void autoneg_7322_work(struct work_struct *work)
  4846. {
  4847. struct qib_pportdata *ppd;
  4848. struct qib_devdata *dd;
  4849. u64 startms;
  4850. u32 i;
  4851. unsigned long flags;
  4852. ppd = container_of(work, struct qib_chippport_specific,
  4853. autoneg_work.work)->ppd;
  4854. dd = ppd->dd;
  4855. startms = jiffies_to_msecs(jiffies);
  4856. /*
  4857. * Busy wait for this first part, it should be at most a
  4858. * few hundred usec, since we scheduled ourselves for 2msec.
  4859. */
  4860. for (i = 0; i < 25; i++) {
  4861. if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
  4862. == IB_7322_LT_STATE_POLLQUIET) {
  4863. qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
  4864. break;
  4865. }
  4866. udelay(100);
  4867. }
  4868. if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
  4869. goto done; /* we got there early or told to stop */
  4870. /* we expect this to timeout */
  4871. if (wait_event_timeout(ppd->cpspec->autoneg_wait,
  4872. !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
  4873. msecs_to_jiffies(90)))
  4874. goto done;
  4875. qib_7322_mini_pcs_reset(ppd);
  4876. /* we expect this to timeout */
  4877. if (wait_event_timeout(ppd->cpspec->autoneg_wait,
  4878. !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
  4879. msecs_to_jiffies(1700)))
  4880. goto done;
  4881. qib_7322_mini_pcs_reset(ppd);
  4882. set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
  4883. /*
  4884. * Wait up to 250 msec for link to train and get to INIT at DDR;
  4885. * this should terminate early.
  4886. */
  4887. wait_event_timeout(ppd->cpspec->autoneg_wait,
  4888. !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
  4889. msecs_to_jiffies(250));
  4890. done:
  4891. if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
  4892. spin_lock_irqsave(&ppd->lflags_lock, flags);
  4893. ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
  4894. if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
  4895. ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
  4896. ppd->cpspec->autoneg_tries = 0;
  4897. }
  4898. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  4899. set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
  4900. }
  4901. }
  4902. /*
  4903. * This routine is used to request IPG set in the QLogic switch.
  4904. * Only called if r1.
  4905. */
  4906. static void try_7322_ipg(struct qib_pportdata *ppd)
  4907. {
  4908. struct qib_ibport *ibp = &ppd->ibport_data;
  4909. struct ib_mad_send_buf *send_buf;
  4910. struct ib_mad_agent *agent;
  4911. struct ib_smp *smp;
  4912. unsigned delay;
  4913. int ret;
  4914. agent = ibp->send_agent;
  4915. if (!agent)
  4916. goto retry;
  4917. send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
  4918. IB_MGMT_MAD_DATA, GFP_ATOMIC);
  4919. if (IS_ERR(send_buf))
  4920. goto retry;
  4921. if (!ibp->smi_ah) {
  4922. struct ib_ah *ah;
  4923. ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
  4924. if (IS_ERR(ah))
  4925. ret = PTR_ERR(ah);
  4926. else {
  4927. send_buf->ah = ah;
  4928. ibp->smi_ah = to_iah(ah);
  4929. ret = 0;
  4930. }
  4931. } else {
  4932. send_buf->ah = &ibp->smi_ah->ibah;
  4933. ret = 0;
  4934. }
  4935. smp = send_buf->mad;
  4936. smp->base_version = IB_MGMT_BASE_VERSION;
  4937. smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
  4938. smp->class_version = 1;
  4939. smp->method = IB_MGMT_METHOD_SEND;
  4940. smp->hop_cnt = 1;
  4941. smp->attr_id = QIB_VENDOR_IPG;
  4942. smp->attr_mod = 0;
  4943. if (!ret)
  4944. ret = ib_post_send_mad(send_buf, NULL);
  4945. if (ret)
  4946. ib_free_send_mad(send_buf);
  4947. retry:
  4948. delay = 2 << ppd->cpspec->ipg_tries;
  4949. queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
  4950. msecs_to_jiffies(delay));
  4951. }
  4952. /*
  4953. * Timeout handler for setting IPG.
  4954. * Only called if r1.
  4955. */
  4956. static void ipg_7322_work(struct work_struct *work)
  4957. {
  4958. struct qib_pportdata *ppd;
  4959. ppd = container_of(work, struct qib_chippport_specific,
  4960. ipg_work.work)->ppd;
  4961. if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
  4962. && ++ppd->cpspec->ipg_tries <= 10)
  4963. try_7322_ipg(ppd);
  4964. }
  4965. static u32 qib_7322_iblink_state(u64 ibcs)
  4966. {
  4967. u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
  4968. switch (state) {
  4969. case IB_7322_L_STATE_INIT:
  4970. state = IB_PORT_INIT;
  4971. break;
  4972. case IB_7322_L_STATE_ARM:
  4973. state = IB_PORT_ARMED;
  4974. break;
  4975. case IB_7322_L_STATE_ACTIVE:
  4976. /* fall through */
  4977. case IB_7322_L_STATE_ACT_DEFER:
  4978. state = IB_PORT_ACTIVE;
  4979. break;
  4980. default: /* fall through */
  4981. case IB_7322_L_STATE_DOWN:
  4982. state = IB_PORT_DOWN;
  4983. break;
  4984. }
  4985. return state;
  4986. }
  4987. /* returns the IBTA port state, rather than the IBC link training state */
  4988. static u8 qib_7322_phys_portstate(u64 ibcs)
  4989. {
  4990. u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
  4991. return qib_7322_physportstate[state];
  4992. }
  4993. static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
  4994. {
  4995. int ret = 0, symadj = 0;
  4996. unsigned long flags;
  4997. int mult;
  4998. spin_lock_irqsave(&ppd->lflags_lock, flags);
  4999. ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
  5000. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  5001. /* Update our picture of width and speed from chip */
  5002. if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
  5003. ppd->link_speed_active = QIB_IB_QDR;
  5004. mult = 4;
  5005. } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
  5006. ppd->link_speed_active = QIB_IB_DDR;
  5007. mult = 2;
  5008. } else {
  5009. ppd->link_speed_active = QIB_IB_SDR;
  5010. mult = 1;
  5011. }
  5012. if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
  5013. ppd->link_width_active = IB_WIDTH_4X;
  5014. mult *= 4;
  5015. } else
  5016. ppd->link_width_active = IB_WIDTH_1X;
  5017. ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
  5018. if (!ibup) {
  5019. u64 clr;
  5020. /* Link went down. */
  5021. /* do IPG MAD again after linkdown, even if last time failed */
  5022. ppd->cpspec->ipg_tries = 0;
  5023. clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
  5024. (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
  5025. SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
  5026. if (clr)
  5027. qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
  5028. if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
  5029. QIBL_IB_AUTONEG_INPROG)))
  5030. set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
  5031. if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
  5032. struct qib_qsfp_data *qd =
  5033. &ppd->cpspec->qsfp_data;
  5034. /* unlock the Tx settings, speed may change */
  5035. qib_write_kreg_port(ppd, krp_tx_deemph_override,
  5036. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  5037. reset_tx_deemphasis_override));
  5038. qib_cancel_sends(ppd);
  5039. /* on link down, ensure sane pcs state */
  5040. qib_7322_mini_pcs_reset(ppd);
  5041. /* schedule the qsfp refresh which should turn the link
  5042. off */
  5043. if (ppd->dd->flags & QIB_HAS_QSFP) {
  5044. qd->t_insert = jiffies;
  5045. queue_work(ib_wq, &qd->work);
  5046. }
  5047. spin_lock_irqsave(&ppd->sdma_lock, flags);
  5048. if (__qib_sdma_running(ppd))
  5049. __qib_sdma_process_event(ppd,
  5050. qib_sdma_event_e70_go_idle);
  5051. spin_unlock_irqrestore(&ppd->sdma_lock, flags);
  5052. }
  5053. clr = read_7322_creg32_port(ppd, crp_iblinkdown);
  5054. if (clr == ppd->cpspec->iblnkdownsnap)
  5055. ppd->cpspec->iblnkdowndelta++;
  5056. } else {
  5057. if (qib_compat_ddr_negotiate &&
  5058. !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
  5059. QIBL_IB_AUTONEG_INPROG)) &&
  5060. ppd->link_speed_active == QIB_IB_SDR &&
  5061. (ppd->link_speed_enabled & QIB_IB_DDR)
  5062. && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
  5063. /* we are SDR, and auto-negotiation enabled */
  5064. ++ppd->cpspec->autoneg_tries;
  5065. if (!ppd->cpspec->ibdeltainprog) {
  5066. ppd->cpspec->ibdeltainprog = 1;
  5067. ppd->cpspec->ibsymdelta +=
  5068. read_7322_creg32_port(ppd,
  5069. crp_ibsymbolerr) -
  5070. ppd->cpspec->ibsymsnap;
  5071. ppd->cpspec->iblnkerrdelta +=
  5072. read_7322_creg32_port(ppd,
  5073. crp_iblinkerrrecov) -
  5074. ppd->cpspec->iblnkerrsnap;
  5075. }
  5076. try_7322_autoneg(ppd);
  5077. ret = 1; /* no other IB status change processing */
  5078. } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
  5079. ppd->link_speed_active == QIB_IB_SDR) {
  5080. qib_autoneg_7322_send(ppd, 1);
  5081. set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
  5082. qib_7322_mini_pcs_reset(ppd);
  5083. udelay(2);
  5084. ret = 1; /* no other IB status change processing */
  5085. } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
  5086. (ppd->link_speed_active & QIB_IB_DDR)) {
  5087. spin_lock_irqsave(&ppd->lflags_lock, flags);
  5088. ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
  5089. QIBL_IB_AUTONEG_FAILED);
  5090. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  5091. ppd->cpspec->autoneg_tries = 0;
  5092. /* re-enable SDR, for next link down */
  5093. set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
  5094. wake_up(&ppd->cpspec->autoneg_wait);
  5095. symadj = 1;
  5096. } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
  5097. /*
  5098. * Clear autoneg failure flag, and do setup
  5099. * so we'll try next time link goes down and
  5100. * back to INIT (possibly connected to a
  5101. * different device).
  5102. */
  5103. spin_lock_irqsave(&ppd->lflags_lock, flags);
  5104. ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
  5105. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  5106. ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
  5107. symadj = 1;
  5108. }
  5109. if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
  5110. symadj = 1;
  5111. if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
  5112. try_7322_ipg(ppd);
  5113. if (!ppd->cpspec->recovery_init)
  5114. setup_7322_link_recovery(ppd, 0);
  5115. ppd->cpspec->qdr_dfe_time = jiffies +
  5116. msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
  5117. }
  5118. ppd->cpspec->ibmalfusesnap = 0;
  5119. ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
  5120. crp_errlink);
  5121. }
  5122. if (symadj) {
  5123. ppd->cpspec->iblnkdownsnap =
  5124. read_7322_creg32_port(ppd, crp_iblinkdown);
  5125. if (ppd->cpspec->ibdeltainprog) {
  5126. ppd->cpspec->ibdeltainprog = 0;
  5127. ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
  5128. crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
  5129. ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
  5130. crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
  5131. }
  5132. } else if (!ibup && qib_compat_ddr_negotiate &&
  5133. !ppd->cpspec->ibdeltainprog &&
  5134. !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
  5135. ppd->cpspec->ibdeltainprog = 1;
  5136. ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
  5137. crp_ibsymbolerr);
  5138. ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
  5139. crp_iblinkerrrecov);
  5140. }
  5141. if (!ret)
  5142. qib_setup_7322_setextled(ppd, ibup);
  5143. return ret;
  5144. }
  5145. /*
  5146. * Does read/modify/write to appropriate registers to
  5147. * set output and direction bits selected by mask.
  5148. * these are in their canonical postions (e.g. lsb of
  5149. * dir will end up in D48 of extctrl on existing chips).
  5150. * returns contents of GP Inputs.
  5151. */
  5152. static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
  5153. {
  5154. u64 read_val, new_out;
  5155. unsigned long flags;
  5156. if (mask) {
  5157. /* some bits being written, lock access to GPIO */
  5158. dir &= mask;
  5159. out &= mask;
  5160. spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
  5161. dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
  5162. dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
  5163. new_out = (dd->cspec->gpio_out & ~mask) | out;
  5164. qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
  5165. qib_write_kreg(dd, kr_gpio_out, new_out);
  5166. dd->cspec->gpio_out = new_out;
  5167. spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
  5168. }
  5169. /*
  5170. * It is unlikely that a read at this time would get valid
  5171. * data on a pin whose direction line was set in the same
  5172. * call to this function. We include the read here because
  5173. * that allows us to potentially combine a change on one pin with
  5174. * a read on another, and because the old code did something like
  5175. * this.
  5176. */
  5177. read_val = qib_read_kreg64(dd, kr_extstatus);
  5178. return SYM_FIELD(read_val, EXTStatus, GPIOIn);
  5179. }
  5180. /* Enable writes to config EEPROM, if possible. Returns previous state */
  5181. static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
  5182. {
  5183. int prev_wen;
  5184. u32 mask;
  5185. mask = 1 << QIB_EEPROM_WEN_NUM;
  5186. prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
  5187. gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
  5188. return prev_wen & 1;
  5189. }
  5190. /*
  5191. * Read fundamental info we need to use the chip. These are
  5192. * the registers that describe chip capabilities, and are
  5193. * saved in shadow registers.
  5194. */
  5195. static void get_7322_chip_params(struct qib_devdata *dd)
  5196. {
  5197. u64 val;
  5198. u32 piobufs;
  5199. int mtu;
  5200. dd->palign = qib_read_kreg32(dd, kr_pagealign);
  5201. dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
  5202. dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
  5203. dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
  5204. dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
  5205. dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
  5206. dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
  5207. val = qib_read_kreg64(dd, kr_sendpiobufcnt);
  5208. dd->piobcnt2k = val & ~0U;
  5209. dd->piobcnt4k = val >> 32;
  5210. val = qib_read_kreg64(dd, kr_sendpiosize);
  5211. dd->piosize2k = val & ~0U;
  5212. dd->piosize4k = val >> 32;
  5213. mtu = ib_mtu_enum_to_int(qib_ibmtu);
  5214. if (mtu == -1)
  5215. mtu = QIB_DEFAULT_MTU;
  5216. dd->pport[0].ibmtu = (u32)mtu;
  5217. dd->pport[1].ibmtu = (u32)mtu;
  5218. /* these may be adjusted in init_chip_wc_pat() */
  5219. dd->pio2kbase = (u32 __iomem *)
  5220. ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
  5221. dd->pio4kbase = (u32 __iomem *)
  5222. ((char __iomem *) dd->kregbase +
  5223. (dd->piobufbase >> 32));
  5224. /*
  5225. * 4K buffers take 2 pages; we use roundup just to be
  5226. * paranoid; we calculate it once here, rather than on
  5227. * ever buf allocate
  5228. */
  5229. dd->align4k = ALIGN(dd->piosize4k, dd->palign);
  5230. piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
  5231. dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
  5232. (sizeof(u64) * BITS_PER_BYTE / 2);
  5233. }
  5234. /*
  5235. * The chip base addresses in cspec and cpspec have to be set
  5236. * after possible init_chip_wc_pat(), rather than in
  5237. * get_7322_chip_params(), so split out as separate function
  5238. */
  5239. static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
  5240. {
  5241. u32 cregbase;
  5242. cregbase = qib_read_kreg32(dd, kr_counterregbase);
  5243. dd->cspec->cregbase = (u64 __iomem *)(cregbase +
  5244. (char __iomem *)dd->kregbase);
  5245. dd->egrtidbase = (u64 __iomem *)
  5246. ((char __iomem *) dd->kregbase + dd->rcvegrbase);
  5247. /* port registers are defined as relative to base of chip */
  5248. dd->pport[0].cpspec->kpregbase =
  5249. (u64 __iomem *)((char __iomem *)dd->kregbase);
  5250. dd->pport[1].cpspec->kpregbase =
  5251. (u64 __iomem *)(dd->palign +
  5252. (char __iomem *)dd->kregbase);
  5253. dd->pport[0].cpspec->cpregbase =
  5254. (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
  5255. kr_counterregbase) + (char __iomem *)dd->kregbase);
  5256. dd->pport[1].cpspec->cpregbase =
  5257. (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
  5258. kr_counterregbase) + (char __iomem *)dd->kregbase);
  5259. }
  5260. /*
  5261. * This is a fairly special-purpose observer, so we only support
  5262. * the port-specific parts of SendCtrl
  5263. */
  5264. #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \
  5265. SYM_MASK(SendCtrl_0, SDmaEnable) | \
  5266. SYM_MASK(SendCtrl_0, SDmaIntEnable) | \
  5267. SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
  5268. SYM_MASK(SendCtrl_0, SDmaHalt) | \
  5269. SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \
  5270. SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
  5271. static int sendctrl_hook(struct qib_devdata *dd,
  5272. const struct diag_observer *op, u32 offs,
  5273. u64 *data, u64 mask, int only_32)
  5274. {
  5275. unsigned long flags;
  5276. unsigned idx;
  5277. unsigned pidx;
  5278. struct qib_pportdata *ppd = NULL;
  5279. u64 local_data, all_bits;
  5280. /*
  5281. * The fixed correspondence between Physical ports and pports is
  5282. * severed. We need to hunt for the ppd that corresponds
  5283. * to the offset we got. And we have to do that without admitting
  5284. * we know the stride, apparently.
  5285. */
  5286. for (pidx = 0; pidx < dd->num_pports; ++pidx) {
  5287. u64 __iomem *psptr;
  5288. u32 psoffs;
  5289. ppd = dd->pport + pidx;
  5290. if (!ppd->cpspec->kpregbase)
  5291. continue;
  5292. psptr = ppd->cpspec->kpregbase + krp_sendctrl;
  5293. psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
  5294. if (psoffs == offs)
  5295. break;
  5296. }
  5297. /* If pport is not being managed by driver, just avoid shadows. */
  5298. if (pidx >= dd->num_pports)
  5299. ppd = NULL;
  5300. /* In any case, "idx" is flat index in kreg space */
  5301. idx = offs / sizeof(u64);
  5302. all_bits = ~0ULL;
  5303. if (only_32)
  5304. all_bits >>= 32;
  5305. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  5306. if (!ppd || (mask & all_bits) != all_bits) {
  5307. /*
  5308. * At least some mask bits are zero, so we need
  5309. * to read. The judgement call is whether from
  5310. * reg or shadow. First-cut: read reg, and complain
  5311. * if any bits which should be shadowed are different
  5312. * from their shadowed value.
  5313. */
  5314. if (only_32)
  5315. local_data = (u64)qib_read_kreg32(dd, idx);
  5316. else
  5317. local_data = qib_read_kreg64(dd, idx);
  5318. *data = (local_data & ~mask) | (*data & mask);
  5319. }
  5320. if (mask) {
  5321. /*
  5322. * At least some mask bits are one, so we need
  5323. * to write, but only shadow some bits.
  5324. */
  5325. u64 sval, tval; /* Shadowed, transient */
  5326. /*
  5327. * New shadow val is bits we don't want to touch,
  5328. * ORed with bits we do, that are intended for shadow.
  5329. */
  5330. if (ppd) {
  5331. sval = ppd->p_sendctrl & ~mask;
  5332. sval |= *data & SENDCTRL_SHADOWED & mask;
  5333. ppd->p_sendctrl = sval;
  5334. } else
  5335. sval = *data & SENDCTRL_SHADOWED & mask;
  5336. tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
  5337. qib_write_kreg(dd, idx, tval);
  5338. qib_write_kreg(dd, kr_scratch, 0Ull);
  5339. }
  5340. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  5341. return only_32 ? 4 : 8;
  5342. }
  5343. static const struct diag_observer sendctrl_0_observer = {
  5344. sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
  5345. KREG_IDX(SendCtrl_0) * sizeof(u64)
  5346. };
  5347. static const struct diag_observer sendctrl_1_observer = {
  5348. sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
  5349. KREG_IDX(SendCtrl_1) * sizeof(u64)
  5350. };
  5351. static ushort sdma_fetch_prio = 8;
  5352. module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
  5353. MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
  5354. /* Besides logging QSFP events, we set appropriate TxDDS values */
  5355. static void init_txdds_table(struct qib_pportdata *ppd, int override);
  5356. static void qsfp_7322_event(struct work_struct *work)
  5357. {
  5358. struct qib_qsfp_data *qd;
  5359. struct qib_pportdata *ppd;
  5360. unsigned long pwrup;
  5361. unsigned long flags;
  5362. int ret;
  5363. u32 le2;
  5364. qd = container_of(work, struct qib_qsfp_data, work);
  5365. ppd = qd->ppd;
  5366. pwrup = qd->t_insert +
  5367. msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
  5368. /* Delay for 20 msecs to allow ModPrs resistor to setup */
  5369. mdelay(QSFP_MODPRS_LAG_MSEC);
  5370. if (!qib_qsfp_mod_present(ppd)) {
  5371. ppd->cpspec->qsfp_data.modpresent = 0;
  5372. /* Set the physical link to disabled */
  5373. qib_set_ib_7322_lstate(ppd, 0,
  5374. QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  5375. spin_lock_irqsave(&ppd->lflags_lock, flags);
  5376. ppd->lflags &= ~QIBL_LINKV;
  5377. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  5378. } else {
  5379. /*
  5380. * Some QSFP's not only do not respond until the full power-up
  5381. * time, but may behave badly if we try. So hold off responding
  5382. * to insertion.
  5383. */
  5384. while (1) {
  5385. if (time_is_before_jiffies(pwrup))
  5386. break;
  5387. msleep(20);
  5388. }
  5389. ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
  5390. /*
  5391. * Need to change LE2 back to defaults if we couldn't
  5392. * read the cable type (to handle cable swaps), so do this
  5393. * even on failure to read cable information. We don't
  5394. * get here for QME, so IS_QME check not needed here.
  5395. */
  5396. if (!ret && !ppd->dd->cspec->r1) {
  5397. if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
  5398. le2 = LE2_QME;
  5399. else if (qd->cache.atten[1] >= qib_long_atten &&
  5400. QSFP_IS_CU(qd->cache.tech))
  5401. le2 = LE2_5m;
  5402. else
  5403. le2 = LE2_DEFAULT;
  5404. } else
  5405. le2 = LE2_DEFAULT;
  5406. ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
  5407. /*
  5408. * We always change parameteters, since we can choose
  5409. * values for cables without eeproms, and the cable may have
  5410. * changed from a cable with full or partial eeprom content
  5411. * to one with partial or no content.
  5412. */
  5413. init_txdds_table(ppd, 0);
  5414. /* The physical link is being re-enabled only when the
  5415. * previous state was DISABLED and the VALID bit is not
  5416. * set. This should only happen when the cable has been
  5417. * physically pulled. */
  5418. if (!ppd->cpspec->qsfp_data.modpresent &&
  5419. (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
  5420. ppd->cpspec->qsfp_data.modpresent = 1;
  5421. qib_set_ib_7322_lstate(ppd, 0,
  5422. QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
  5423. spin_lock_irqsave(&ppd->lflags_lock, flags);
  5424. ppd->lflags |= QIBL_LINKV;
  5425. spin_unlock_irqrestore(&ppd->lflags_lock, flags);
  5426. }
  5427. }
  5428. }
  5429. /*
  5430. * There is little we can do but complain to the user if QSFP
  5431. * initialization fails.
  5432. */
  5433. static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
  5434. {
  5435. unsigned long flags;
  5436. struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
  5437. struct qib_devdata *dd = ppd->dd;
  5438. u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
  5439. mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
  5440. qd->ppd = ppd;
  5441. qib_qsfp_init(qd, qsfp_7322_event);
  5442. spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
  5443. dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
  5444. dd->cspec->gpio_mask |= mod_prs_bit;
  5445. qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
  5446. qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
  5447. spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
  5448. }
  5449. /*
  5450. * called at device initialization time, and also if the txselect
  5451. * module parameter is changed. This is used for cables that don't
  5452. * have valid QSFP EEPROMs (not present, or attenuation is zero).
  5453. * We initialize to the default, then if there is a specific
  5454. * unit,port match, we use that (and set it immediately, for the
  5455. * current speed, if the link is at INIT or better).
  5456. * String format is "default# unit#,port#=# ... u,p=#", separators must
  5457. * be a SPACE character. A newline terminates. The u,p=# tuples may
  5458. * optionally have "u,p=#,#", where the final # is the H1 value
  5459. * The last specific match is used (actually, all are used, but last
  5460. * one is the one that winds up set); if none at all, fall back on default.
  5461. */
  5462. static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
  5463. {
  5464. char *nxt, *str;
  5465. u32 pidx, unit, port, deflt, h1;
  5466. unsigned long val;
  5467. int any = 0, seth1;
  5468. int txdds_size;
  5469. str = txselect_list;
  5470. /* default number is validated in setup_txselect() */
  5471. deflt = simple_strtoul(str, &nxt, 0);
  5472. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  5473. dd->pport[pidx].cpspec->no_eep = deflt;
  5474. txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
  5475. if (IS_QME(dd) || IS_QMH(dd))
  5476. txdds_size += TXDDS_MFG_SZ;
  5477. while (*nxt && nxt[1]) {
  5478. str = ++nxt;
  5479. unit = simple_strtoul(str, &nxt, 0);
  5480. if (nxt == str || !*nxt || *nxt != ',') {
  5481. while (*nxt && *nxt++ != ' ') /* skip to next, if any */
  5482. ;
  5483. continue;
  5484. }
  5485. str = ++nxt;
  5486. port = simple_strtoul(str, &nxt, 0);
  5487. if (nxt == str || *nxt != '=') {
  5488. while (*nxt && *nxt++ != ' ') /* skip to next, if any */
  5489. ;
  5490. continue;
  5491. }
  5492. str = ++nxt;
  5493. val = simple_strtoul(str, &nxt, 0);
  5494. if (nxt == str) {
  5495. while (*nxt && *nxt++ != ' ') /* skip to next, if any */
  5496. ;
  5497. continue;
  5498. }
  5499. if (val >= txdds_size)
  5500. continue;
  5501. seth1 = 0;
  5502. h1 = 0; /* gcc thinks it might be used uninitted */
  5503. if (*nxt == ',' && nxt[1]) {
  5504. str = ++nxt;
  5505. h1 = (u32)simple_strtoul(str, &nxt, 0);
  5506. if (nxt == str)
  5507. while (*nxt && *nxt++ != ' ') /* skip */
  5508. ;
  5509. else
  5510. seth1 = 1;
  5511. }
  5512. for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
  5513. ++pidx) {
  5514. struct qib_pportdata *ppd = &dd->pport[pidx];
  5515. if (ppd->port != port || !ppd->link_speed_supported)
  5516. continue;
  5517. ppd->cpspec->no_eep = val;
  5518. if (seth1)
  5519. ppd->cpspec->h1_val = h1;
  5520. /* now change the IBC and serdes, overriding generic */
  5521. init_txdds_table(ppd, 1);
  5522. /* Re-enable the physical state machine on mezz boards
  5523. * now that the correct settings have been set.
  5524. * QSFP boards are handles by the QSFP event handler */
  5525. if (IS_QMH(dd) || IS_QME(dd))
  5526. qib_set_ib_7322_lstate(ppd, 0,
  5527. QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
  5528. any++;
  5529. }
  5530. if (*nxt == '\n')
  5531. break; /* done */
  5532. }
  5533. if (change && !any) {
  5534. /* no specific setting, use the default.
  5535. * Change the IBC and serdes, but since it's
  5536. * general, don't override specific settings.
  5537. */
  5538. for (pidx = 0; pidx < dd->num_pports; ++pidx)
  5539. if (dd->pport[pidx].link_speed_supported)
  5540. init_txdds_table(&dd->pport[pidx], 0);
  5541. }
  5542. }
  5543. /* handle the txselect parameter changing */
  5544. static int setup_txselect(const char *str, struct kernel_param *kp)
  5545. {
  5546. struct qib_devdata *dd;
  5547. unsigned long val;
  5548. char *n;
  5549. if (strlen(str) >= MAX_ATTEN_LEN) {
  5550. pr_info("txselect_values string too long\n");
  5551. return -ENOSPC;
  5552. }
  5553. val = simple_strtoul(str, &n, 0);
  5554. if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
  5555. TXDDS_MFG_SZ)) {
  5556. pr_info("txselect_values must start with a number < %d\n",
  5557. TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
  5558. return -EINVAL;
  5559. }
  5560. strcpy(txselect_list, str);
  5561. list_for_each_entry(dd, &qib_dev_list, list)
  5562. if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
  5563. set_no_qsfp_atten(dd, 1);
  5564. return 0;
  5565. }
  5566. /*
  5567. * Write the final few registers that depend on some of the
  5568. * init setup. Done late in init, just before bringing up
  5569. * the serdes.
  5570. */
  5571. static int qib_late_7322_initreg(struct qib_devdata *dd)
  5572. {
  5573. int ret = 0, n;
  5574. u64 val;
  5575. qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
  5576. qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
  5577. qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
  5578. qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
  5579. val = qib_read_kreg64(dd, kr_sendpioavailaddr);
  5580. if (val != dd->pioavailregs_phys) {
  5581. qib_dev_err(dd,
  5582. "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
  5583. (unsigned long) dd->pioavailregs_phys,
  5584. (unsigned long long) val);
  5585. ret = -EINVAL;
  5586. }
  5587. n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
  5588. qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
  5589. /* driver sends get pkey, lid, etc. checking also, to catch bugs */
  5590. qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
  5591. qib_register_observer(dd, &sendctrl_0_observer);
  5592. qib_register_observer(dd, &sendctrl_1_observer);
  5593. dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
  5594. qib_write_kreg(dd, kr_control, dd->control);
  5595. /*
  5596. * Set SendDmaFetchPriority and init Tx params, including
  5597. * QSFP handler on boards that have QSFP.
  5598. * First set our default attenuation entry for cables that
  5599. * don't have valid attenuation.
  5600. */
  5601. set_no_qsfp_atten(dd, 0);
  5602. for (n = 0; n < dd->num_pports; ++n) {
  5603. struct qib_pportdata *ppd = dd->pport + n;
  5604. qib_write_kreg_port(ppd, krp_senddmaprioritythld,
  5605. sdma_fetch_prio & 0xf);
  5606. /* Initialize qsfp if present on board. */
  5607. if (dd->flags & QIB_HAS_QSFP)
  5608. qib_init_7322_qsfp(ppd);
  5609. }
  5610. dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
  5611. qib_write_kreg(dd, kr_control, dd->control);
  5612. return ret;
  5613. }
  5614. /* per IB port errors. */
  5615. #define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
  5616. MASK_ACROSS(8, 15))
  5617. #define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
  5618. #define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
  5619. MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
  5620. MASK_ACROSS(0, 11))
  5621. /*
  5622. * Write the initialization per-port registers that need to be done at
  5623. * driver load and after reset completes (i.e., that aren't done as part
  5624. * of other init procedures called from qib_init.c).
  5625. * Some of these should be redundant on reset, but play safe.
  5626. */
  5627. static void write_7322_init_portregs(struct qib_pportdata *ppd)
  5628. {
  5629. u64 val;
  5630. int i;
  5631. if (!ppd->link_speed_supported) {
  5632. /* no buffer credits for this port */
  5633. for (i = 1; i < 8; i++)
  5634. qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
  5635. qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
  5636. qib_write_kreg(ppd->dd, kr_scratch, 0);
  5637. return;
  5638. }
  5639. /*
  5640. * Set the number of supported virtual lanes in IBC,
  5641. * for flow control packet handling on unsupported VLs
  5642. */
  5643. val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
  5644. val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
  5645. val |= (u64)(ppd->vls_supported - 1) <<
  5646. SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
  5647. qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
  5648. qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
  5649. /* enable tx header checking */
  5650. qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
  5651. IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
  5652. IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
  5653. qib_write_kreg_port(ppd, krp_ncmodectrl,
  5654. SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
  5655. /*
  5656. * Unconditionally clear the bufmask bits. If SDMA is
  5657. * enabled, we'll set them appropriately later.
  5658. */
  5659. qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
  5660. qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
  5661. qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
  5662. if (ppd->dd->cspec->r1)
  5663. ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
  5664. }
  5665. /*
  5666. * Write the initialization per-device registers that need to be done at
  5667. * driver load and after reset completes (i.e., that aren't done as part
  5668. * of other init procedures called from qib_init.c). Also write per-port
  5669. * registers that are affected by overall device config, such as QP mapping
  5670. * Some of these should be redundant on reset, but play safe.
  5671. */
  5672. static void write_7322_initregs(struct qib_devdata *dd)
  5673. {
  5674. struct qib_pportdata *ppd;
  5675. int i, pidx;
  5676. u64 val;
  5677. /* Set Multicast QPs received by port 2 to map to context one. */
  5678. qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
  5679. for (pidx = 0; pidx < dd->num_pports; ++pidx) {
  5680. unsigned n, regno;
  5681. unsigned long flags;
  5682. if (dd->n_krcv_queues < 2 ||
  5683. !dd->pport[pidx].link_speed_supported)
  5684. continue;
  5685. ppd = &dd->pport[pidx];
  5686. /* be paranoid against later code motion, etc. */
  5687. spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
  5688. ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
  5689. spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
  5690. /* Initialize QP to context mapping */
  5691. regno = krp_rcvqpmaptable;
  5692. val = 0;
  5693. if (dd->num_pports > 1)
  5694. n = dd->first_user_ctxt / dd->num_pports;
  5695. else
  5696. n = dd->first_user_ctxt - 1;
  5697. for (i = 0; i < 32; ) {
  5698. unsigned ctxt;
  5699. if (dd->num_pports > 1)
  5700. ctxt = (i % n) * dd->num_pports + pidx;
  5701. else if (i % n)
  5702. ctxt = (i % n) + 1;
  5703. else
  5704. ctxt = ppd->hw_pidx;
  5705. val |= ctxt << (5 * (i % 6));
  5706. i++;
  5707. if (i % 6 == 0) {
  5708. qib_write_kreg_port(ppd, regno, val);
  5709. val = 0;
  5710. regno++;
  5711. }
  5712. }
  5713. qib_write_kreg_port(ppd, regno, val);
  5714. }
  5715. /*
  5716. * Setup up interrupt mitigation for kernel contexts, but
  5717. * not user contexts (user contexts use interrupts when
  5718. * stalled waiting for any packet, so want those interrupts
  5719. * right away).
  5720. */
  5721. for (i = 0; i < dd->first_user_ctxt; i++) {
  5722. dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
  5723. qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
  5724. }
  5725. /*
  5726. * Initialize as (disabled) rcvflow tables. Application code
  5727. * will setup each flow as it uses the flow.
  5728. * Doesn't clear any of the error bits that might be set.
  5729. */
  5730. val = TIDFLOW_ERRBITS; /* these are W1C */
  5731. for (i = 0; i < dd->cfgctxts; i++) {
  5732. int flow;
  5733. for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
  5734. qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
  5735. }
  5736. /*
  5737. * dual cards init to dual port recovery, single port cards to
  5738. * the one port. Dual port cards may later adjust to 1 port,
  5739. * and then back to dual port if both ports are connected
  5740. * */
  5741. if (dd->num_pports)
  5742. setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
  5743. }
  5744. static int qib_init_7322_variables(struct qib_devdata *dd)
  5745. {
  5746. struct qib_pportdata *ppd;
  5747. unsigned features, pidx, sbufcnt;
  5748. int ret, mtu;
  5749. u32 sbufs, updthresh;
  5750. resource_size_t vl15off;
  5751. /* pport structs are contiguous, allocated after devdata */
  5752. ppd = (struct qib_pportdata *)(dd + 1);
  5753. dd->pport = ppd;
  5754. ppd[0].dd = dd;
  5755. ppd[1].dd = dd;
  5756. dd->cspec = (struct qib_chip_specific *)(ppd + 2);
  5757. ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
  5758. ppd[1].cpspec = &ppd[0].cpspec[1];
  5759. ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
  5760. ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
  5761. spin_lock_init(&dd->cspec->rcvmod_lock);
  5762. spin_lock_init(&dd->cspec->gpio_lock);
  5763. /* we haven't yet set QIB_PRESENT, so use read directly */
  5764. dd->revision = readq(&dd->kregbase[kr_revision]);
  5765. if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
  5766. qib_dev_err(dd,
  5767. "Revision register read failure, giving up initialization\n");
  5768. ret = -ENODEV;
  5769. goto bail;
  5770. }
  5771. dd->flags |= QIB_PRESENT; /* now register routines work */
  5772. dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
  5773. dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
  5774. dd->cspec->r1 = dd->minrev == 1;
  5775. get_7322_chip_params(dd);
  5776. features = qib_7322_boardname(dd);
  5777. /* now that piobcnt2k and 4k set, we can allocate these */
  5778. sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
  5779. NUM_VL15_BUFS + BITS_PER_LONG - 1;
  5780. sbufcnt /= BITS_PER_LONG;
  5781. dd->cspec->sendchkenable = kmalloc(sbufcnt *
  5782. sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
  5783. dd->cspec->sendgrhchk = kmalloc(sbufcnt *
  5784. sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
  5785. dd->cspec->sendibchk = kmalloc(sbufcnt *
  5786. sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
  5787. if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
  5788. !dd->cspec->sendibchk) {
  5789. qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
  5790. ret = -ENOMEM;
  5791. goto bail;
  5792. }
  5793. ppd = dd->pport;
  5794. /*
  5795. * GPIO bits for TWSI data and clock,
  5796. * used for serial EEPROM.
  5797. */
  5798. dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
  5799. dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
  5800. dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
  5801. dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
  5802. QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
  5803. QIB_HAS_THRESH_UPDATE |
  5804. (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
  5805. dd->flags |= qib_special_trigger ?
  5806. QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
  5807. /*
  5808. * Setup initial values. These may change when PAT is enabled, but
  5809. * we need these to do initial chip register accesses.
  5810. */
  5811. qib_7322_set_baseaddrs(dd);
  5812. mtu = ib_mtu_enum_to_int(qib_ibmtu);
  5813. if (mtu == -1)
  5814. mtu = QIB_DEFAULT_MTU;
  5815. dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
  5816. /* all hwerrors become interrupts, unless special purposed */
  5817. dd->cspec->hwerrmask = ~0ULL;
  5818. /* link_recovery setup causes these errors, so ignore them,
  5819. * other than clearing them when they occur */
  5820. dd->cspec->hwerrmask &=
  5821. ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
  5822. SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
  5823. HWE_MASK(LATriggered));
  5824. for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
  5825. struct qib_chippport_specific *cp = ppd->cpspec;
  5826. ppd->link_speed_supported = features & PORT_SPD_CAP;
  5827. features >>= PORT_SPD_CAP_SHIFT;
  5828. if (!ppd->link_speed_supported) {
  5829. /* single port mode (7340, or configured) */
  5830. dd->skip_kctxt_mask |= 1 << pidx;
  5831. if (pidx == 0) {
  5832. /* Make sure port is disabled. */
  5833. qib_write_kreg_port(ppd, krp_rcvctrl, 0);
  5834. qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
  5835. ppd[0] = ppd[1];
  5836. dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
  5837. IBSerdesPClkNotDetectMask_0)
  5838. | SYM_MASK(HwErrMask,
  5839. SDmaMemReadErrMask_0));
  5840. dd->cspec->int_enable_mask &= ~(
  5841. SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
  5842. SYM_MASK(IntMask, SDmaIdleIntMask_0) |
  5843. SYM_MASK(IntMask, SDmaProgressIntMask_0) |
  5844. SYM_MASK(IntMask, SDmaIntMask_0) |
  5845. SYM_MASK(IntMask, ErrIntMask_0) |
  5846. SYM_MASK(IntMask, SendDoneIntMask_0));
  5847. } else {
  5848. /* Make sure port is disabled. */
  5849. qib_write_kreg_port(ppd, krp_rcvctrl, 0);
  5850. qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
  5851. dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
  5852. IBSerdesPClkNotDetectMask_1)
  5853. | SYM_MASK(HwErrMask,
  5854. SDmaMemReadErrMask_1));
  5855. dd->cspec->int_enable_mask &= ~(
  5856. SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
  5857. SYM_MASK(IntMask, SDmaIdleIntMask_1) |
  5858. SYM_MASK(IntMask, SDmaProgressIntMask_1) |
  5859. SYM_MASK(IntMask, SDmaIntMask_1) |
  5860. SYM_MASK(IntMask, ErrIntMask_1) |
  5861. SYM_MASK(IntMask, SendDoneIntMask_1));
  5862. }
  5863. continue;
  5864. }
  5865. dd->num_pports++;
  5866. ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
  5867. if (ret) {
  5868. dd->num_pports--;
  5869. goto bail;
  5870. }
  5871. ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
  5872. ppd->link_width_enabled = IB_WIDTH_4X;
  5873. ppd->link_speed_enabled = ppd->link_speed_supported;
  5874. /*
  5875. * Set the initial values to reasonable default, will be set
  5876. * for real when link is up.
  5877. */
  5878. ppd->link_width_active = IB_WIDTH_4X;
  5879. ppd->link_speed_active = QIB_IB_SDR;
  5880. ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
  5881. switch (qib_num_cfg_vls) {
  5882. case 1:
  5883. ppd->vls_supported = IB_VL_VL0;
  5884. break;
  5885. case 2:
  5886. ppd->vls_supported = IB_VL_VL0_1;
  5887. break;
  5888. default:
  5889. qib_devinfo(dd->pcidev,
  5890. "Invalid num_vls %u, using 4 VLs\n",
  5891. qib_num_cfg_vls);
  5892. qib_num_cfg_vls = 4;
  5893. /* fall through */
  5894. case 4:
  5895. ppd->vls_supported = IB_VL_VL0_3;
  5896. break;
  5897. case 8:
  5898. if (mtu <= 2048)
  5899. ppd->vls_supported = IB_VL_VL0_7;
  5900. else {
  5901. qib_devinfo(dd->pcidev,
  5902. "Invalid num_vls %u for MTU %d , using 4 VLs\n",
  5903. qib_num_cfg_vls, mtu);
  5904. ppd->vls_supported = IB_VL_VL0_3;
  5905. qib_num_cfg_vls = 4;
  5906. }
  5907. break;
  5908. }
  5909. ppd->vls_operational = ppd->vls_supported;
  5910. init_waitqueue_head(&cp->autoneg_wait);
  5911. INIT_DELAYED_WORK(&cp->autoneg_work,
  5912. autoneg_7322_work);
  5913. if (ppd->dd->cspec->r1)
  5914. INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
  5915. /*
  5916. * For Mez and similar cards, no qsfp info, so do
  5917. * the "cable info" setup here. Can be overridden
  5918. * in adapter-specific routines.
  5919. */
  5920. if (!(dd->flags & QIB_HAS_QSFP)) {
  5921. if (!IS_QMH(dd) && !IS_QME(dd))
  5922. qib_devinfo(dd->pcidev,
  5923. "IB%u:%u: Unknown mezzanine card type\n",
  5924. dd->unit, ppd->port);
  5925. cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
  5926. /*
  5927. * Choose center value as default tx serdes setting
  5928. * until changed through module parameter.
  5929. */
  5930. ppd->cpspec->no_eep = IS_QMH(dd) ?
  5931. TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
  5932. } else
  5933. cp->h1_val = H1_FORCE_VAL;
  5934. /* Avoid writes to chip for mini_init */
  5935. if (!qib_mini_init)
  5936. write_7322_init_portregs(ppd);
  5937. init_timer(&cp->chase_timer);
  5938. cp->chase_timer.function = reenable_chase;
  5939. cp->chase_timer.data = (unsigned long)ppd;
  5940. ppd++;
  5941. }
  5942. dd->rcvhdrentsize = qib_rcvhdrentsize ?
  5943. qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
  5944. dd->rcvhdrsize = qib_rcvhdrsize ?
  5945. qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
  5946. dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
  5947. /* we always allocate at least 2048 bytes for eager buffers */
  5948. dd->rcvegrbufsize = max(mtu, 2048);
  5949. BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
  5950. dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
  5951. qib_7322_tidtemplate(dd);
  5952. /*
  5953. * We can request a receive interrupt for 1 or
  5954. * more packets from current offset.
  5955. */
  5956. dd->rhdrhead_intr_off =
  5957. (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
  5958. /* setup the stats timer; the add_timer is done at end of init */
  5959. init_timer(&dd->stats_timer);
  5960. dd->stats_timer.function = qib_get_7322_faststats;
  5961. dd->stats_timer.data = (unsigned long) dd;
  5962. dd->ureg_align = 0x10000; /* 64KB alignment */
  5963. dd->piosize2kmax_dwords = dd->piosize2k >> 2;
  5964. qib_7322_config_ctxts(dd);
  5965. qib_set_ctxtcnt(dd);
  5966. /*
  5967. * We do not set WC on the VL15 buffers to avoid
  5968. * a rare problem with unaligned writes from
  5969. * interrupt-flushed store buffers, so we need
  5970. * to map those separately here. We can't solve
  5971. * this for the rarely used mtrr case.
  5972. */
  5973. ret = init_chip_wc_pat(dd, 0);
  5974. if (ret)
  5975. goto bail;
  5976. /* vl15 buffers start just after the 4k buffers */
  5977. vl15off = dd->physaddr + (dd->piobufbase >> 32) +
  5978. dd->piobcnt4k * dd->align4k;
  5979. dd->piovl15base = ioremap_nocache(vl15off,
  5980. NUM_VL15_BUFS * dd->align4k);
  5981. if (!dd->piovl15base) {
  5982. ret = -ENOMEM;
  5983. goto bail;
  5984. }
  5985. qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
  5986. ret = 0;
  5987. if (qib_mini_init)
  5988. goto bail;
  5989. if (!dd->num_pports) {
  5990. qib_dev_err(dd, "No ports enabled, giving up initialization\n");
  5991. goto bail; /* no error, so can still figure out why err */
  5992. }
  5993. write_7322_initregs(dd);
  5994. ret = qib_create_ctxts(dd);
  5995. init_7322_cntrnames(dd);
  5996. updthresh = 8U; /* update threshold */
  5997. /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
  5998. * reserve the update threshold amount for other kernel use, such
  5999. * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
  6000. * unless we aren't enabling SDMA, in which case we want to use
  6001. * all the 4k bufs for the kernel.
  6002. * if this was less than the update threshold, we could wait
  6003. * a long time for an update. Coded this way because we
  6004. * sometimes change the update threshold for various reasons,
  6005. * and we want this to remain robust.
  6006. */
  6007. if (dd->flags & QIB_HAS_SEND_DMA) {
  6008. dd->cspec->sdmabufcnt = dd->piobcnt4k;
  6009. sbufs = updthresh > 3 ? updthresh : 3;
  6010. } else {
  6011. dd->cspec->sdmabufcnt = 0;
  6012. sbufs = dd->piobcnt4k;
  6013. }
  6014. dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
  6015. dd->cspec->sdmabufcnt;
  6016. dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
  6017. dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
  6018. dd->last_pio = dd->cspec->lastbuf_for_pio;
  6019. dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
  6020. dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
  6021. /*
  6022. * If we have 16 user contexts, we will have 7 sbufs
  6023. * per context, so reduce the update threshold to match. We
  6024. * want to update before we actually run out, at low pbufs/ctxt
  6025. * so give ourselves some margin.
  6026. */
  6027. if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
  6028. updthresh = dd->pbufsctxt - 2;
  6029. dd->cspec->updthresh_dflt = updthresh;
  6030. dd->cspec->updthresh = updthresh;
  6031. /* before full enable, no interrupts, no locking needed */
  6032. dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
  6033. << SYM_LSB(SendCtrl, AvailUpdThld)) |
  6034. SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
  6035. dd->psxmitwait_supported = 1;
  6036. dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
  6037. bail:
  6038. if (!dd->ctxtcnt)
  6039. dd->ctxtcnt = 1; /* for other initialization code */
  6040. return ret;
  6041. }
  6042. static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
  6043. u32 *pbufnum)
  6044. {
  6045. u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
  6046. struct qib_devdata *dd = ppd->dd;
  6047. /* last is same for 2k and 4k, because we use 4k if all 2k busy */
  6048. if (pbc & PBC_7322_VL15_SEND) {
  6049. first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
  6050. last = first;
  6051. } else {
  6052. if ((plen + 1) > dd->piosize2kmax_dwords)
  6053. first = dd->piobcnt2k;
  6054. else
  6055. first = 0;
  6056. last = dd->cspec->lastbuf_for_pio;
  6057. }
  6058. return qib_getsendbuf_range(dd, pbufnum, first, last);
  6059. }
  6060. static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
  6061. u32 start)
  6062. {
  6063. qib_write_kreg_port(ppd, krp_psinterval, intv);
  6064. qib_write_kreg_port(ppd, krp_psstart, start);
  6065. }
  6066. /*
  6067. * Must be called with sdma_lock held, or before init finished.
  6068. */
  6069. static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
  6070. {
  6071. qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
  6072. }
  6073. /*
  6074. * sdma_lock should be acquired before calling this routine
  6075. */
  6076. static void dump_sdma_7322_state(struct qib_pportdata *ppd)
  6077. {
  6078. u64 reg, reg1, reg2;
  6079. reg = qib_read_kreg_port(ppd, krp_senddmastatus);
  6080. qib_dev_porterr(ppd->dd, ppd->port,
  6081. "SDMA senddmastatus: 0x%016llx\n", reg);
  6082. reg = qib_read_kreg_port(ppd, krp_sendctrl);
  6083. qib_dev_porterr(ppd->dd, ppd->port,
  6084. "SDMA sendctrl: 0x%016llx\n", reg);
  6085. reg = qib_read_kreg_port(ppd, krp_senddmabase);
  6086. qib_dev_porterr(ppd->dd, ppd->port,
  6087. "SDMA senddmabase: 0x%016llx\n", reg);
  6088. reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
  6089. reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
  6090. reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
  6091. qib_dev_porterr(ppd->dd, ppd->port,
  6092. "SDMA senddmabufmask 0:%llx 1:%llx 2:%llx\n",
  6093. reg, reg1, reg2);
  6094. /* get bufuse bits, clear them, and print them again if non-zero */
  6095. reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
  6096. qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
  6097. reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
  6098. qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
  6099. reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
  6100. qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
  6101. /* 0 and 1 should always be zero, so print as short form */
  6102. qib_dev_porterr(ppd->dd, ppd->port,
  6103. "SDMA current senddmabuf_use 0:%llx 1:%llx 2:%llx\n",
  6104. reg, reg1, reg2);
  6105. reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
  6106. reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
  6107. reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
  6108. /* 0 and 1 should always be zero, so print as short form */
  6109. qib_dev_porterr(ppd->dd, ppd->port,
  6110. "SDMA cleared senddmabuf_use 0:%llx 1:%llx 2:%llx\n",
  6111. reg, reg1, reg2);
  6112. reg = qib_read_kreg_port(ppd, krp_senddmatail);
  6113. qib_dev_porterr(ppd->dd, ppd->port,
  6114. "SDMA senddmatail: 0x%016llx\n", reg);
  6115. reg = qib_read_kreg_port(ppd, krp_senddmahead);
  6116. qib_dev_porterr(ppd->dd, ppd->port,
  6117. "SDMA senddmahead: 0x%016llx\n", reg);
  6118. reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
  6119. qib_dev_porterr(ppd->dd, ppd->port,
  6120. "SDMA senddmaheadaddr: 0x%016llx\n", reg);
  6121. reg = qib_read_kreg_port(ppd, krp_senddmalengen);
  6122. qib_dev_porterr(ppd->dd, ppd->port,
  6123. "SDMA senddmalengen: 0x%016llx\n", reg);
  6124. reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
  6125. qib_dev_porterr(ppd->dd, ppd->port,
  6126. "SDMA senddmadesccnt: 0x%016llx\n", reg);
  6127. reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
  6128. qib_dev_porterr(ppd->dd, ppd->port,
  6129. "SDMA senddmaidlecnt: 0x%016llx\n", reg);
  6130. reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
  6131. qib_dev_porterr(ppd->dd, ppd->port,
  6132. "SDMA senddmapriorityhld: 0x%016llx\n", reg);
  6133. reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
  6134. qib_dev_porterr(ppd->dd, ppd->port,
  6135. "SDMA senddmareloadcnt: 0x%016llx\n", reg);
  6136. dump_sdma_state(ppd);
  6137. }
  6138. static struct sdma_set_state_action sdma_7322_action_table[] = {
  6139. [qib_sdma_state_s00_hw_down] = {
  6140. .go_s99_running_tofalse = 1,
  6141. .op_enable = 0,
  6142. .op_intenable = 0,
  6143. .op_halt = 0,
  6144. .op_drain = 0,
  6145. },
  6146. [qib_sdma_state_s10_hw_start_up_wait] = {
  6147. .op_enable = 0,
  6148. .op_intenable = 1,
  6149. .op_halt = 1,
  6150. .op_drain = 0,
  6151. },
  6152. [qib_sdma_state_s20_idle] = {
  6153. .op_enable = 1,
  6154. .op_intenable = 1,
  6155. .op_halt = 1,
  6156. .op_drain = 0,
  6157. },
  6158. [qib_sdma_state_s30_sw_clean_up_wait] = {
  6159. .op_enable = 0,
  6160. .op_intenable = 1,
  6161. .op_halt = 1,
  6162. .op_drain = 0,
  6163. },
  6164. [qib_sdma_state_s40_hw_clean_up_wait] = {
  6165. .op_enable = 1,
  6166. .op_intenable = 1,
  6167. .op_halt = 1,
  6168. .op_drain = 0,
  6169. },
  6170. [qib_sdma_state_s50_hw_halt_wait] = {
  6171. .op_enable = 1,
  6172. .op_intenable = 1,
  6173. .op_halt = 1,
  6174. .op_drain = 1,
  6175. },
  6176. [qib_sdma_state_s99_running] = {
  6177. .op_enable = 1,
  6178. .op_intenable = 1,
  6179. .op_halt = 0,
  6180. .op_drain = 0,
  6181. .go_s99_running_totrue = 1,
  6182. },
  6183. };
  6184. static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
  6185. {
  6186. ppd->sdma_state.set_state_action = sdma_7322_action_table;
  6187. }
  6188. static int init_sdma_7322_regs(struct qib_pportdata *ppd)
  6189. {
  6190. struct qib_devdata *dd = ppd->dd;
  6191. unsigned lastbuf, erstbuf;
  6192. u64 senddmabufmask[3] = { 0 };
  6193. int n, ret = 0;
  6194. qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
  6195. qib_sdma_7322_setlengen(ppd);
  6196. qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
  6197. qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
  6198. qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
  6199. qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
  6200. if (dd->num_pports)
  6201. n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
  6202. else
  6203. n = dd->cspec->sdmabufcnt; /* failsafe for init */
  6204. erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
  6205. ((dd->num_pports == 1 || ppd->port == 2) ? n :
  6206. dd->cspec->sdmabufcnt);
  6207. lastbuf = erstbuf + n;
  6208. ppd->sdma_state.first_sendbuf = erstbuf;
  6209. ppd->sdma_state.last_sendbuf = lastbuf;
  6210. for (; erstbuf < lastbuf; ++erstbuf) {
  6211. unsigned word = erstbuf / BITS_PER_LONG;
  6212. unsigned bit = erstbuf & (BITS_PER_LONG - 1);
  6213. BUG_ON(word >= 3);
  6214. senddmabufmask[word] |= 1ULL << bit;
  6215. }
  6216. qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
  6217. qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
  6218. qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
  6219. return ret;
  6220. }
  6221. /* sdma_lock must be held */
  6222. static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
  6223. {
  6224. struct qib_devdata *dd = ppd->dd;
  6225. int sane;
  6226. int use_dmahead;
  6227. u16 swhead;
  6228. u16 swtail;
  6229. u16 cnt;
  6230. u16 hwhead;
  6231. use_dmahead = __qib_sdma_running(ppd) &&
  6232. (dd->flags & QIB_HAS_SDMA_TIMEOUT);
  6233. retry:
  6234. hwhead = use_dmahead ?
  6235. (u16) le64_to_cpu(*ppd->sdma_head_dma) :
  6236. (u16) qib_read_kreg_port(ppd, krp_senddmahead);
  6237. swhead = ppd->sdma_descq_head;
  6238. swtail = ppd->sdma_descq_tail;
  6239. cnt = ppd->sdma_descq_cnt;
  6240. if (swhead < swtail)
  6241. /* not wrapped */
  6242. sane = (hwhead >= swhead) & (hwhead <= swtail);
  6243. else if (swhead > swtail)
  6244. /* wrapped around */
  6245. sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
  6246. (hwhead <= swtail);
  6247. else
  6248. /* empty */
  6249. sane = (hwhead == swhead);
  6250. if (unlikely(!sane)) {
  6251. if (use_dmahead) {
  6252. /* try one more time, directly from the register */
  6253. use_dmahead = 0;
  6254. goto retry;
  6255. }
  6256. /* proceed as if no progress */
  6257. hwhead = swhead;
  6258. }
  6259. return hwhead;
  6260. }
  6261. static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
  6262. {
  6263. u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
  6264. return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
  6265. (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
  6266. !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
  6267. !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
  6268. }
  6269. /*
  6270. * Compute the amount of delay before sending the next packet if the
  6271. * port's send rate differs from the static rate set for the QP.
  6272. * The delay affects the next packet and the amount of the delay is
  6273. * based on the length of the this packet.
  6274. */
  6275. static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
  6276. u8 srate, u8 vl)
  6277. {
  6278. u8 snd_mult = ppd->delay_mult;
  6279. u8 rcv_mult = ib_rate_to_delay[srate];
  6280. u32 ret;
  6281. ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
  6282. /* Indicate VL15, else set the VL in the control word */
  6283. if (vl == 15)
  6284. ret |= PBC_7322_VL15_SEND_CTRL;
  6285. else
  6286. ret |= vl << PBC_VL_NUM_LSB;
  6287. ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
  6288. return ret;
  6289. }
  6290. /*
  6291. * Enable the per-port VL15 send buffers for use.
  6292. * They follow the rest of the buffers, without a config parameter.
  6293. * This was in initregs, but that is done before the shadow
  6294. * is set up, and this has to be done after the shadow is
  6295. * set up.
  6296. */
  6297. static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
  6298. {
  6299. unsigned vl15bufs;
  6300. vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
  6301. qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
  6302. TXCHK_CHG_TYPE_KERN, NULL);
  6303. }
  6304. static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
  6305. {
  6306. if (rcd->ctxt < NUM_IB_PORTS) {
  6307. if (rcd->dd->num_pports > 1) {
  6308. rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
  6309. rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
  6310. } else {
  6311. rcd->rcvegrcnt = KCTXT0_EGRCNT;
  6312. rcd->rcvegr_tid_base = 0;
  6313. }
  6314. } else {
  6315. rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
  6316. rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
  6317. (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
  6318. }
  6319. }
  6320. #define QTXSLEEPS 5000
  6321. static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
  6322. u32 len, u32 which, struct qib_ctxtdata *rcd)
  6323. {
  6324. int i;
  6325. const int last = start + len - 1;
  6326. const int lastr = last / BITS_PER_LONG;
  6327. u32 sleeps = 0;
  6328. int wait = rcd != NULL;
  6329. unsigned long flags;
  6330. while (wait) {
  6331. unsigned long shadow;
  6332. int cstart, previ = -1;
  6333. /*
  6334. * when flipping from kernel to user, we can't change
  6335. * the checking type if the buffer is allocated to the
  6336. * driver. It's OK the other direction, because it's
  6337. * from close, and we have just disarm'ed all the
  6338. * buffers. All the kernel to kernel changes are also
  6339. * OK.
  6340. */
  6341. for (cstart = start; cstart <= last; cstart++) {
  6342. i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
  6343. / BITS_PER_LONG;
  6344. if (i != previ) {
  6345. shadow = (unsigned long)
  6346. le64_to_cpu(dd->pioavailregs_dma[i]);
  6347. previ = i;
  6348. }
  6349. if (test_bit(((2 * cstart) +
  6350. QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
  6351. % BITS_PER_LONG, &shadow))
  6352. break;
  6353. }
  6354. if (cstart > last)
  6355. break;
  6356. if (sleeps == QTXSLEEPS)
  6357. break;
  6358. /* make sure we see an updated copy next time around */
  6359. sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
  6360. sleeps++;
  6361. msleep(20);
  6362. }
  6363. switch (which) {
  6364. case TXCHK_CHG_TYPE_DIS1:
  6365. /*
  6366. * disable checking on a range; used by diags; just
  6367. * one buffer, but still written generically
  6368. */
  6369. for (i = start; i <= last; i++)
  6370. clear_bit(i, dd->cspec->sendchkenable);
  6371. break;
  6372. case TXCHK_CHG_TYPE_ENAB1:
  6373. /*
  6374. * (re)enable checking on a range; used by diags; just
  6375. * one buffer, but still written generically; read
  6376. * scratch to be sure buffer actually triggered, not
  6377. * just flushed from processor.
  6378. */
  6379. qib_read_kreg32(dd, kr_scratch);
  6380. for (i = start; i <= last; i++)
  6381. set_bit(i, dd->cspec->sendchkenable);
  6382. break;
  6383. case TXCHK_CHG_TYPE_KERN:
  6384. /* usable by kernel */
  6385. for (i = start; i <= last; i++) {
  6386. set_bit(i, dd->cspec->sendibchk);
  6387. clear_bit(i, dd->cspec->sendgrhchk);
  6388. }
  6389. spin_lock_irqsave(&dd->uctxt_lock, flags);
  6390. /* see if we need to raise avail update threshold */
  6391. for (i = dd->first_user_ctxt;
  6392. dd->cspec->updthresh != dd->cspec->updthresh_dflt
  6393. && i < dd->cfgctxts; i++)
  6394. if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
  6395. ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
  6396. < dd->cspec->updthresh_dflt)
  6397. break;
  6398. spin_unlock_irqrestore(&dd->uctxt_lock, flags);
  6399. if (i == dd->cfgctxts) {
  6400. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  6401. dd->cspec->updthresh = dd->cspec->updthresh_dflt;
  6402. dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
  6403. dd->sendctrl |= (dd->cspec->updthresh &
  6404. SYM_RMASK(SendCtrl, AvailUpdThld)) <<
  6405. SYM_LSB(SendCtrl, AvailUpdThld);
  6406. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  6407. sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
  6408. }
  6409. break;
  6410. case TXCHK_CHG_TYPE_USER:
  6411. /* for user process */
  6412. for (i = start; i <= last; i++) {
  6413. clear_bit(i, dd->cspec->sendibchk);
  6414. set_bit(i, dd->cspec->sendgrhchk);
  6415. }
  6416. spin_lock_irqsave(&dd->sendctrl_lock, flags);
  6417. if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
  6418. / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
  6419. dd->cspec->updthresh = (rcd->piocnt /
  6420. rcd->subctxt_cnt) - 1;
  6421. dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
  6422. dd->sendctrl |= (dd->cspec->updthresh &
  6423. SYM_RMASK(SendCtrl, AvailUpdThld))
  6424. << SYM_LSB(SendCtrl, AvailUpdThld);
  6425. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  6426. sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
  6427. } else
  6428. spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
  6429. break;
  6430. default:
  6431. break;
  6432. }
  6433. for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
  6434. qib_write_kreg(dd, kr_sendcheckmask + i,
  6435. dd->cspec->sendchkenable[i]);
  6436. for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
  6437. qib_write_kreg(dd, kr_sendgrhcheckmask + i,
  6438. dd->cspec->sendgrhchk[i]);
  6439. qib_write_kreg(dd, kr_sendibpktmask + i,
  6440. dd->cspec->sendibchk[i]);
  6441. }
  6442. /*
  6443. * Be sure whatever we did was seen by the chip and acted upon,
  6444. * before we return. Mostly important for which >= 2.
  6445. */
  6446. qib_read_kreg32(dd, kr_scratch);
  6447. }
  6448. /* useful for trigger analyzers, etc. */
  6449. static void writescratch(struct qib_devdata *dd, u32 val)
  6450. {
  6451. qib_write_kreg(dd, kr_scratch, val);
  6452. }
  6453. /* Dummy for now, use chip regs soon */
  6454. static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
  6455. {
  6456. return -ENXIO;
  6457. }
  6458. /**
  6459. * qib_init_iba7322_funcs - set up the chip-specific function pointers
  6460. * @dev: the pci_dev for qlogic_ib device
  6461. * @ent: pci_device_id struct for this dev
  6462. *
  6463. * Also allocates, inits, and returns the devdata struct for this
  6464. * device instance
  6465. *
  6466. * This is global, and is called directly at init to set up the
  6467. * chip-specific function pointers for later use.
  6468. */
  6469. struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
  6470. const struct pci_device_id *ent)
  6471. {
  6472. struct qib_devdata *dd;
  6473. int ret, i;
  6474. u32 tabsize, actual_cnt = 0;
  6475. dd = qib_alloc_devdata(pdev,
  6476. NUM_IB_PORTS * sizeof(struct qib_pportdata) +
  6477. sizeof(struct qib_chip_specific) +
  6478. NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
  6479. if (IS_ERR(dd))
  6480. goto bail;
  6481. dd->f_bringup_serdes = qib_7322_bringup_serdes;
  6482. dd->f_cleanup = qib_setup_7322_cleanup;
  6483. dd->f_clear_tids = qib_7322_clear_tids;
  6484. dd->f_free_irq = qib_7322_free_irq;
  6485. dd->f_get_base_info = qib_7322_get_base_info;
  6486. dd->f_get_msgheader = qib_7322_get_msgheader;
  6487. dd->f_getsendbuf = qib_7322_getsendbuf;
  6488. dd->f_gpio_mod = gpio_7322_mod;
  6489. dd->f_eeprom_wen = qib_7322_eeprom_wen;
  6490. dd->f_hdrqempty = qib_7322_hdrqempty;
  6491. dd->f_ib_updown = qib_7322_ib_updown;
  6492. dd->f_init_ctxt = qib_7322_init_ctxt;
  6493. dd->f_initvl15_bufs = qib_7322_initvl15_bufs;
  6494. dd->f_intr_fallback = qib_7322_intr_fallback;
  6495. dd->f_late_initreg = qib_late_7322_initreg;
  6496. dd->f_setpbc_control = qib_7322_setpbc_control;
  6497. dd->f_portcntr = qib_portcntr_7322;
  6498. dd->f_put_tid = qib_7322_put_tid;
  6499. dd->f_quiet_serdes = qib_7322_mini_quiet_serdes;
  6500. dd->f_rcvctrl = rcvctrl_7322_mod;
  6501. dd->f_read_cntrs = qib_read_7322cntrs;
  6502. dd->f_read_portcntrs = qib_read_7322portcntrs;
  6503. dd->f_reset = qib_do_7322_reset;
  6504. dd->f_init_sdma_regs = init_sdma_7322_regs;
  6505. dd->f_sdma_busy = qib_sdma_7322_busy;
  6506. dd->f_sdma_gethead = qib_sdma_7322_gethead;
  6507. dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl;
  6508. dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
  6509. dd->f_sdma_update_tail = qib_sdma_update_7322_tail;
  6510. dd->f_sendctrl = sendctrl_7322_mod;
  6511. dd->f_set_armlaunch = qib_set_7322_armlaunch;
  6512. dd->f_set_cntr_sample = qib_set_cntr_7322_sample;
  6513. dd->f_iblink_state = qib_7322_iblink_state;
  6514. dd->f_ibphys_portstate = qib_7322_phys_portstate;
  6515. dd->f_get_ib_cfg = qib_7322_get_ib_cfg;
  6516. dd->f_set_ib_cfg = qib_7322_set_ib_cfg;
  6517. dd->f_set_ib_loopback = qib_7322_set_loopback;
  6518. dd->f_get_ib_table = qib_7322_get_ib_table;
  6519. dd->f_set_ib_table = qib_7322_set_ib_table;
  6520. dd->f_set_intr_state = qib_7322_set_intr_state;
  6521. dd->f_setextled = qib_setup_7322_setextled;
  6522. dd->f_txchk_change = qib_7322_txchk_change;
  6523. dd->f_update_usrhead = qib_update_7322_usrhead;
  6524. dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr;
  6525. dd->f_xgxs_reset = qib_7322_mini_pcs_reset;
  6526. dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up;
  6527. dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up;
  6528. dd->f_sdma_init_early = qib_7322_sdma_init_early;
  6529. dd->f_writescratch = writescratch;
  6530. dd->f_tempsense_rd = qib_7322_tempsense_rd;
  6531. #ifdef CONFIG_INFINIBAND_QIB_DCA
  6532. dd->f_notify_dca = qib_7322_notify_dca;
  6533. #endif
  6534. /*
  6535. * Do remaining PCIe setup and save PCIe values in dd.
  6536. * Any error printing is already done by the init code.
  6537. * On return, we have the chip mapped, but chip registers
  6538. * are not set up until start of qib_init_7322_variables.
  6539. */
  6540. ret = qib_pcie_ddinit(dd, pdev, ent);
  6541. if (ret < 0)
  6542. goto bail_free;
  6543. /* initialize chip-specific variables */
  6544. ret = qib_init_7322_variables(dd);
  6545. if (ret)
  6546. goto bail_cleanup;
  6547. if (qib_mini_init || !dd->num_pports)
  6548. goto bail;
  6549. /*
  6550. * Determine number of vectors we want; depends on port count
  6551. * and number of configured kernel receive queues actually used.
  6552. * Should also depend on whether sdma is enabled or not, but
  6553. * that's such a rare testing case it's not worth worrying about.
  6554. */
  6555. tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
  6556. for (i = 0; i < tabsize; i++)
  6557. if ((i < ARRAY_SIZE(irq_table) &&
  6558. irq_table[i].port <= dd->num_pports) ||
  6559. (i >= ARRAY_SIZE(irq_table) &&
  6560. dd->rcd[i - ARRAY_SIZE(irq_table)]))
  6561. actual_cnt++;
  6562. /* reduce by ctxt's < 2 */
  6563. if (qib_krcvq01_no_msi)
  6564. actual_cnt -= dd->num_pports;
  6565. tabsize = actual_cnt;
  6566. dd->cspec->msix_entries = kzalloc(tabsize *
  6567. sizeof(struct qib_msix_entry), GFP_KERNEL);
  6568. if (!dd->cspec->msix_entries) {
  6569. qib_dev_err(dd, "No memory for MSIx table\n");
  6570. tabsize = 0;
  6571. }
  6572. for (i = 0; i < tabsize; i++)
  6573. dd->cspec->msix_entries[i].msix.entry = i;
  6574. if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
  6575. qib_dev_err(dd,
  6576. "Failed to setup PCIe or interrupts; continuing anyway\n");
  6577. /* may be less than we wanted, if not enough available */
  6578. dd->cspec->num_msix_entries = tabsize;
  6579. /* setup interrupt handler */
  6580. qib_setup_7322_interrupt(dd, 1);
  6581. /* clear diagctrl register, in case diags were running and crashed */
  6582. qib_write_kreg(dd, kr_hwdiagctrl, 0);
  6583. #ifdef CONFIG_INFINIBAND_QIB_DCA
  6584. if (!dca_add_requester(&pdev->dev)) {
  6585. qib_devinfo(dd->pcidev, "DCA enabled\n");
  6586. dd->flags |= QIB_DCA_ENABLED;
  6587. qib_setup_dca(dd);
  6588. }
  6589. #endif
  6590. goto bail;
  6591. bail_cleanup:
  6592. qib_pcie_ddcleanup(dd);
  6593. bail_free:
  6594. qib_free_devdata(dd);
  6595. dd = ERR_PTR(ret);
  6596. bail:
  6597. return dd;
  6598. }
  6599. /*
  6600. * Set the table entry at the specified index from the table specifed.
  6601. * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
  6602. * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
  6603. * 'idx' below addresses the correct entry, while its 4 LSBs select the
  6604. * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
  6605. */
  6606. #define DDS_ENT_AMP_LSB 14
  6607. #define DDS_ENT_MAIN_LSB 9
  6608. #define DDS_ENT_POST_LSB 5
  6609. #define DDS_ENT_PRE_XTRA_LSB 3
  6610. #define DDS_ENT_PRE_LSB 0
  6611. /*
  6612. * Set one entry in the TxDDS table for spec'd port
  6613. * ridx picks one of the entries, while tp points
  6614. * to the appropriate table entry.
  6615. */
  6616. static void set_txdds(struct qib_pportdata *ppd, int ridx,
  6617. const struct txdds_ent *tp)
  6618. {
  6619. struct qib_devdata *dd = ppd->dd;
  6620. u32 pack_ent;
  6621. int regidx;
  6622. /* Get correct offset in chip-space, and in source table */
  6623. regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
  6624. /*
  6625. * We do not use qib_write_kreg_port() because it was intended
  6626. * only for registers in the lower "port specific" pages.
  6627. * So do index calculation by hand.
  6628. */
  6629. if (ppd->hw_pidx)
  6630. regidx += (dd->palign / sizeof(u64));
  6631. pack_ent = tp->amp << DDS_ENT_AMP_LSB;
  6632. pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
  6633. pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
  6634. pack_ent |= tp->post << DDS_ENT_POST_LSB;
  6635. qib_write_kreg(dd, regidx, pack_ent);
  6636. /* Prevent back-to-back writes by hitting scratch */
  6637. qib_write_kreg(ppd->dd, kr_scratch, 0);
  6638. }
  6639. static const struct vendor_txdds_ent vendor_txdds[] = {
  6640. { /* Amphenol 1m 30awg NoEq */
  6641. { 0x41, 0x50, 0x48 }, "584470002 ",
  6642. { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 },
  6643. },
  6644. { /* Amphenol 3m 28awg NoEq */
  6645. { 0x41, 0x50, 0x48 }, "584470004 ",
  6646. { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 },
  6647. },
  6648. { /* Finisar 3m OM2 Optical */
  6649. { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
  6650. { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 },
  6651. },
  6652. { /* Finisar 30m OM2 Optical */
  6653. { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
  6654. { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 },
  6655. },
  6656. { /* Finisar Default OM2 Optical */
  6657. { 0x00, 0x90, 0x65 }, NULL,
  6658. { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 },
  6659. },
  6660. { /* Gore 1m 30awg NoEq */
  6661. { 0x00, 0x21, 0x77 }, "QSN3300-1 ",
  6662. { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 },
  6663. },
  6664. { /* Gore 2m 30awg NoEq */
  6665. { 0x00, 0x21, 0x77 }, "QSN3300-2 ",
  6666. { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 },
  6667. },
  6668. { /* Gore 1m 28awg NoEq */
  6669. { 0x00, 0x21, 0x77 }, "QSN3800-1 ",
  6670. { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 },
  6671. },
  6672. { /* Gore 3m 28awg NoEq */
  6673. { 0x00, 0x21, 0x77 }, "QSN3800-3 ",
  6674. { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 },
  6675. },
  6676. { /* Gore 5m 24awg Eq */
  6677. { 0x00, 0x21, 0x77 }, "QSN7000-5 ",
  6678. { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 },
  6679. },
  6680. { /* Gore 7m 24awg Eq */
  6681. { 0x00, 0x21, 0x77 }, "QSN7000-7 ",
  6682. { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 },
  6683. },
  6684. { /* Gore 5m 26awg Eq */
  6685. { 0x00, 0x21, 0x77 }, "QSN7600-5 ",
  6686. { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 },
  6687. },
  6688. { /* Gore 7m 26awg Eq */
  6689. { 0x00, 0x21, 0x77 }, "QSN7600-7 ",
  6690. { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 },
  6691. },
  6692. { /* Intersil 12m 24awg Active */
  6693. { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
  6694. { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 },
  6695. },
  6696. { /* Intersil 10m 28awg Active */
  6697. { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
  6698. { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 },
  6699. },
  6700. { /* Intersil 7m 30awg Active */
  6701. { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
  6702. { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 },
  6703. },
  6704. { /* Intersil 5m 32awg Active */
  6705. { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
  6706. { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 },
  6707. },
  6708. { /* Intersil Default Active */
  6709. { 0x00, 0x30, 0xB4 }, NULL,
  6710. { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 },
  6711. },
  6712. { /* Luxtera 20m Active Optical */
  6713. { 0x00, 0x25, 0x63 }, NULL,
  6714. { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 },
  6715. },
  6716. { /* Molex 1M Cu loopback */
  6717. { 0x00, 0x09, 0x3A }, "74763-0025 ",
  6718. { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 },
  6719. },
  6720. { /* Molex 2m 28awg NoEq */
  6721. { 0x00, 0x09, 0x3A }, "74757-2201 ",
  6722. { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 },
  6723. },
  6724. };
  6725. static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
  6726. /* amp, pre, main, post */
  6727. { 2, 2, 15, 6 }, /* Loopback */
  6728. { 0, 0, 0, 1 }, /* 2 dB */
  6729. { 0, 0, 0, 2 }, /* 3 dB */
  6730. { 0, 0, 0, 3 }, /* 4 dB */
  6731. { 0, 0, 0, 4 }, /* 5 dB */
  6732. { 0, 0, 0, 5 }, /* 6 dB */
  6733. { 0, 0, 0, 6 }, /* 7 dB */
  6734. { 0, 0, 0, 7 }, /* 8 dB */
  6735. { 0, 0, 0, 8 }, /* 9 dB */
  6736. { 0, 0, 0, 9 }, /* 10 dB */
  6737. { 0, 0, 0, 10 }, /* 11 dB */
  6738. { 0, 0, 0, 11 }, /* 12 dB */
  6739. { 0, 0, 0, 12 }, /* 13 dB */
  6740. { 0, 0, 0, 13 }, /* 14 dB */
  6741. { 0, 0, 0, 14 }, /* 15 dB */
  6742. { 0, 0, 0, 15 }, /* 16 dB */
  6743. };
  6744. static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
  6745. /* amp, pre, main, post */
  6746. { 2, 2, 15, 6 }, /* Loopback */
  6747. { 0, 0, 0, 8 }, /* 2 dB */
  6748. { 0, 0, 0, 8 }, /* 3 dB */
  6749. { 0, 0, 0, 9 }, /* 4 dB */
  6750. { 0, 0, 0, 9 }, /* 5 dB */
  6751. { 0, 0, 0, 10 }, /* 6 dB */
  6752. { 0, 0, 0, 10 }, /* 7 dB */
  6753. { 0, 0, 0, 11 }, /* 8 dB */
  6754. { 0, 0, 0, 11 }, /* 9 dB */
  6755. { 0, 0, 0, 12 }, /* 10 dB */
  6756. { 0, 0, 0, 12 }, /* 11 dB */
  6757. { 0, 0, 0, 13 }, /* 12 dB */
  6758. { 0, 0, 0, 13 }, /* 13 dB */
  6759. { 0, 0, 0, 14 }, /* 14 dB */
  6760. { 0, 0, 0, 14 }, /* 15 dB */
  6761. { 0, 0, 0, 15 }, /* 16 dB */
  6762. };
  6763. static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
  6764. /* amp, pre, main, post */
  6765. { 2, 2, 15, 6 }, /* Loopback */
  6766. { 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */
  6767. { 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */
  6768. { 0, 1, 0, 11 }, /* 4 dB */
  6769. { 0, 1, 0, 13 }, /* 5 dB */
  6770. { 0, 1, 0, 15 }, /* 6 dB */
  6771. { 0, 1, 3, 15 }, /* 7 dB */
  6772. { 0, 1, 7, 15 }, /* 8 dB */
  6773. { 0, 1, 7, 15 }, /* 9 dB */
  6774. { 0, 1, 8, 15 }, /* 10 dB */
  6775. { 0, 1, 9, 15 }, /* 11 dB */
  6776. { 0, 1, 10, 15 }, /* 12 dB */
  6777. { 0, 2, 6, 15 }, /* 13 dB */
  6778. { 0, 2, 7, 15 }, /* 14 dB */
  6779. { 0, 2, 8, 15 }, /* 15 dB */
  6780. { 0, 2, 9, 15 }, /* 16 dB */
  6781. };
  6782. /*
  6783. * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
  6784. * These are mostly used for mez cards going through connectors
  6785. * and backplane traces, but can be used to add other "unusual"
  6786. * table values as well.
  6787. */
  6788. static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
  6789. /* amp, pre, main, post */
  6790. { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
  6791. { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
  6792. { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
  6793. { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
  6794. { 0, 0, 0, 3 }, /* QMH7342 backplane settings */
  6795. { 0, 0, 0, 4 }, /* QMH7342 backplane settings */
  6796. { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
  6797. { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
  6798. { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
  6799. { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
  6800. { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
  6801. { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
  6802. { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
  6803. { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
  6804. { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
  6805. { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
  6806. { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
  6807. { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
  6808. };
  6809. static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
  6810. /* amp, pre, main, post */
  6811. { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
  6812. { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
  6813. { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
  6814. { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
  6815. { 0, 0, 0, 9 }, /* QMH7342 backplane settings */
  6816. { 0, 0, 0, 10 }, /* QMH7342 backplane settings */
  6817. { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
  6818. { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
  6819. { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
  6820. { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
  6821. { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
  6822. { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
  6823. { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
  6824. { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
  6825. { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
  6826. { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
  6827. { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
  6828. { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
  6829. };
  6830. static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
  6831. /* amp, pre, main, post */
  6832. { 0, 1, 0, 4 }, /* QMH7342 backplane settings */
  6833. { 0, 1, 0, 5 }, /* QMH7342 backplane settings */
  6834. { 0, 1, 0, 6 }, /* QMH7342 backplane settings */
  6835. { 0, 1, 0, 8 }, /* QMH7342 backplane settings */
  6836. { 0, 1, 0, 10 }, /* QMH7342 backplane settings */
  6837. { 0, 1, 0, 12 }, /* QMH7342 backplane settings */
  6838. { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
  6839. { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
  6840. { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
  6841. { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
  6842. { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
  6843. { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
  6844. { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
  6845. { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
  6846. { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
  6847. { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
  6848. { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
  6849. { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
  6850. };
  6851. static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
  6852. /* amp, pre, main, post */
  6853. { 0, 0, 0, 0 }, /* QME7342 mfg settings */
  6854. { 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */
  6855. };
  6856. static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
  6857. unsigned atten)
  6858. {
  6859. /*
  6860. * The attenuation table starts at 2dB for entry 1,
  6861. * with entry 0 being the loopback entry.
  6862. */
  6863. if (atten <= 2)
  6864. atten = 1;
  6865. else if (atten > TXDDS_TABLE_SZ)
  6866. atten = TXDDS_TABLE_SZ - 1;
  6867. else
  6868. atten--;
  6869. return txdds + atten;
  6870. }
  6871. /*
  6872. * if override is set, the module parameter txselect has a value
  6873. * for this specific port, so use it, rather than our normal mechanism.
  6874. */
  6875. static void find_best_ent(struct qib_pportdata *ppd,
  6876. const struct txdds_ent **sdr_dds,
  6877. const struct txdds_ent **ddr_dds,
  6878. const struct txdds_ent **qdr_dds, int override)
  6879. {
  6880. struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
  6881. int idx;
  6882. /* Search table of known cables */
  6883. for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
  6884. const struct vendor_txdds_ent *v = vendor_txdds + idx;
  6885. if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
  6886. (!v->partnum ||
  6887. !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
  6888. *sdr_dds = &v->sdr;
  6889. *ddr_dds = &v->ddr;
  6890. *qdr_dds = &v->qdr;
  6891. return;
  6892. }
  6893. }
  6894. /* Active cables don't have attenuation so we only set SERDES
  6895. * settings to account for the attenuation of the board traces. */
  6896. if (!override && QSFP_IS_ACTIVE(qd->tech)) {
  6897. *sdr_dds = txdds_sdr + ppd->dd->board_atten;
  6898. *ddr_dds = txdds_ddr + ppd->dd->board_atten;
  6899. *qdr_dds = txdds_qdr + ppd->dd->board_atten;
  6900. return;
  6901. }
  6902. if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
  6903. qd->atten[1])) {
  6904. *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
  6905. *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
  6906. *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
  6907. return;
  6908. } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
  6909. /*
  6910. * If we have no (or incomplete) data from the cable
  6911. * EEPROM, or no QSFP, or override is set, use the
  6912. * module parameter value to index into the attentuation
  6913. * table.
  6914. */
  6915. idx = ppd->cpspec->no_eep;
  6916. *sdr_dds = &txdds_sdr[idx];
  6917. *ddr_dds = &txdds_ddr[idx];
  6918. *qdr_dds = &txdds_qdr[idx];
  6919. } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
  6920. /* similar to above, but index into the "extra" table. */
  6921. idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
  6922. *sdr_dds = &txdds_extra_sdr[idx];
  6923. *ddr_dds = &txdds_extra_ddr[idx];
  6924. *qdr_dds = &txdds_extra_qdr[idx];
  6925. } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
  6926. ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
  6927. TXDDS_MFG_SZ)) {
  6928. idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
  6929. pr_info("IB%u:%u use idx %u into txdds_mfg\n",
  6930. ppd->dd->unit, ppd->port, idx);
  6931. *sdr_dds = &txdds_extra_mfg[idx];
  6932. *ddr_dds = &txdds_extra_mfg[idx];
  6933. *qdr_dds = &txdds_extra_mfg[idx];
  6934. } else {
  6935. /* this shouldn't happen, it's range checked */
  6936. *sdr_dds = txdds_sdr + qib_long_atten;
  6937. *ddr_dds = txdds_ddr + qib_long_atten;
  6938. *qdr_dds = txdds_qdr + qib_long_atten;
  6939. }
  6940. }
  6941. static void init_txdds_table(struct qib_pportdata *ppd, int override)
  6942. {
  6943. const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
  6944. struct txdds_ent *dds;
  6945. int idx;
  6946. int single_ent = 0;
  6947. find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
  6948. /* for mez cards or override, use the selected value for all entries */
  6949. if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
  6950. single_ent = 1;
  6951. /* Fill in the first entry with the best entry found. */
  6952. set_txdds(ppd, 0, sdr_dds);
  6953. set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
  6954. set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
  6955. if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
  6956. QIBL_LINKACTIVE)) {
  6957. dds = (struct txdds_ent *)(ppd->link_speed_active ==
  6958. QIB_IB_QDR ? qdr_dds :
  6959. (ppd->link_speed_active ==
  6960. QIB_IB_DDR ? ddr_dds : sdr_dds));
  6961. write_tx_serdes_param(ppd, dds);
  6962. }
  6963. /* Fill in the remaining entries with the default table values. */
  6964. for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
  6965. set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
  6966. set_txdds(ppd, idx + TXDDS_TABLE_SZ,
  6967. single_ent ? ddr_dds : txdds_ddr + idx);
  6968. set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
  6969. single_ent ? qdr_dds : txdds_qdr + idx);
  6970. }
  6971. }
  6972. #define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
  6973. #define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
  6974. #define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
  6975. #define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
  6976. #define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
  6977. #define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
  6978. #define AHB_TRANS_TRIES 10
  6979. /*
  6980. * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
  6981. * 5=subsystem which is why most calls have "chan + chan >> 1"
  6982. * for the channel argument.
  6983. */
  6984. static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
  6985. u32 data, u32 mask)
  6986. {
  6987. u32 rd_data, wr_data, sz_mask;
  6988. u64 trans, acc, prev_acc;
  6989. u32 ret = 0xBAD0BAD;
  6990. int tries;
  6991. prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
  6992. /* From this point on, make sure we return access */
  6993. acc = (quad << 1) | 1;
  6994. qib_write_kreg(dd, KR_AHB_ACC, acc);
  6995. for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
  6996. trans = qib_read_kreg64(dd, KR_AHB_TRANS);
  6997. if (trans & AHB_TRANS_RDY)
  6998. break;
  6999. }
  7000. if (tries >= AHB_TRANS_TRIES) {
  7001. qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
  7002. goto bail;
  7003. }
  7004. /* If mask is not all 1s, we need to read, but different SerDes
  7005. * entities have different sizes
  7006. */
  7007. sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
  7008. wr_data = data & mask & sz_mask;
  7009. if ((~mask & sz_mask) != 0) {
  7010. trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
  7011. qib_write_kreg(dd, KR_AHB_TRANS, trans);
  7012. for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
  7013. trans = qib_read_kreg64(dd, KR_AHB_TRANS);
  7014. if (trans & AHB_TRANS_RDY)
  7015. break;
  7016. }
  7017. if (tries >= AHB_TRANS_TRIES) {
  7018. qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
  7019. AHB_TRANS_TRIES);
  7020. goto bail;
  7021. }
  7022. /* Re-read in case host split reads and read data first */
  7023. trans = qib_read_kreg64(dd, KR_AHB_TRANS);
  7024. rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
  7025. wr_data |= (rd_data & ~mask & sz_mask);
  7026. }
  7027. /* If mask is not zero, we need to write. */
  7028. if (mask & sz_mask) {
  7029. trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
  7030. trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
  7031. trans |= AHB_WR;
  7032. qib_write_kreg(dd, KR_AHB_TRANS, trans);
  7033. for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
  7034. trans = qib_read_kreg64(dd, KR_AHB_TRANS);
  7035. if (trans & AHB_TRANS_RDY)
  7036. break;
  7037. }
  7038. if (tries >= AHB_TRANS_TRIES) {
  7039. qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
  7040. AHB_TRANS_TRIES);
  7041. goto bail;
  7042. }
  7043. }
  7044. ret = wr_data;
  7045. bail:
  7046. qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
  7047. return ret;
  7048. }
  7049. static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
  7050. unsigned mask)
  7051. {
  7052. struct qib_devdata *dd = ppd->dd;
  7053. int chan;
  7054. u32 rbc;
  7055. for (chan = 0; chan < SERDES_CHANS; ++chan) {
  7056. ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
  7057. data, mask);
  7058. rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7059. addr, 0, 0);
  7060. }
  7061. }
  7062. static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
  7063. {
  7064. u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
  7065. u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
  7066. if (enable && !state) {
  7067. pr_info("IB%u:%u Turning LOS on\n",
  7068. ppd->dd->unit, ppd->port);
  7069. data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
  7070. } else if (!enable && state) {
  7071. pr_info("IB%u:%u Turning LOS off\n",
  7072. ppd->dd->unit, ppd->port);
  7073. data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
  7074. }
  7075. qib_write_kreg_port(ppd, krp_serdesctrl, data);
  7076. }
  7077. static int serdes_7322_init(struct qib_pportdata *ppd)
  7078. {
  7079. int ret = 0;
  7080. if (ppd->dd->cspec->r1)
  7081. ret = serdes_7322_init_old(ppd);
  7082. else
  7083. ret = serdes_7322_init_new(ppd);
  7084. return ret;
  7085. }
  7086. static int serdes_7322_init_old(struct qib_pportdata *ppd)
  7087. {
  7088. u32 le_val;
  7089. /*
  7090. * Initialize the Tx DDS tables. Also done every QSFP event,
  7091. * for adapters with QSFP
  7092. */
  7093. init_txdds_table(ppd, 0);
  7094. /* ensure no tx overrides from earlier driver loads */
  7095. qib_write_kreg_port(ppd, krp_tx_deemph_override,
  7096. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7097. reset_tx_deemphasis_override));
  7098. /* Patch some SerDes defaults to "Better for IB" */
  7099. /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
  7100. ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
  7101. /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
  7102. ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
  7103. /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
  7104. ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
  7105. /* May be overridden in qsfp_7322_event */
  7106. le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
  7107. ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
  7108. /* enable LE1 adaptation for all but QME, which is disabled */
  7109. le_val = IS_QME(ppd->dd) ? 0 : 1;
  7110. ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
  7111. /* Clear cmode-override, may be set from older driver */
  7112. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
  7113. /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
  7114. ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
  7115. /* setup LoS params; these are subsystem, so chan == 5 */
  7116. /* LoS filter threshold_count on, ch 0-3, set to 8 */
  7117. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
  7118. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
  7119. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
  7120. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
  7121. /* LoS filter threshold_count off, ch 0-3, set to 4 */
  7122. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
  7123. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
  7124. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
  7125. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
  7126. /* LoS filter select enabled */
  7127. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
  7128. /* LoS target data: SDR=4, DDR=2, QDR=1 */
  7129. ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
  7130. ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
  7131. ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
  7132. serdes_7322_los_enable(ppd, 1);
  7133. /* rxbistena; set 0 to avoid effects of it switch later */
  7134. ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
  7135. /* Configure 4 DFE taps, and only they adapt */
  7136. ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
  7137. /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
  7138. le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
  7139. ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
  7140. /*
  7141. * Set receive adaptation mode. SDR and DDR adaptation are
  7142. * always on, and QDR is initially enabled; later disabled.
  7143. */
  7144. qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
  7145. qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
  7146. qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
  7147. ppd->dd->cspec->r1 ?
  7148. QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
  7149. ppd->cpspec->qdr_dfe_on = 1;
  7150. /* FLoop LOS gate: PPM filter enabled */
  7151. ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
  7152. /* rx offset center enabled */
  7153. ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
  7154. if (!ppd->dd->cspec->r1) {
  7155. ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
  7156. ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
  7157. }
  7158. /* Set the frequency loop bandwidth to 15 */
  7159. ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
  7160. return 0;
  7161. }
  7162. static int serdes_7322_init_new(struct qib_pportdata *ppd)
  7163. {
  7164. unsigned long tend;
  7165. u32 le_val, rxcaldone;
  7166. int chan, chan_done = (1 << SERDES_CHANS) - 1;
  7167. /* Clear cmode-override, may be set from older driver */
  7168. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
  7169. /* ensure no tx overrides from earlier driver loads */
  7170. qib_write_kreg_port(ppd, krp_tx_deemph_override,
  7171. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7172. reset_tx_deemphasis_override));
  7173. /* START OF LSI SUGGESTED SERDES BRINGUP */
  7174. /* Reset - Calibration Setup */
  7175. /* Stop DFE adaptaion */
  7176. ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
  7177. /* Disable LE1 */
  7178. ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
  7179. /* Disable autoadapt for LE1 */
  7180. ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
  7181. /* Disable LE2 */
  7182. ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
  7183. /* Disable VGA */
  7184. ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
  7185. /* Disable AFE Offset Cancel */
  7186. ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
  7187. /* Disable Timing Loop */
  7188. ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
  7189. /* Disable Frequency Loop */
  7190. ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
  7191. /* Disable Baseline Wander Correction */
  7192. ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
  7193. /* Disable RX Calibration */
  7194. ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
  7195. /* Disable RX Offset Calibration */
  7196. ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
  7197. /* Select BB CDR */
  7198. ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
  7199. /* CDR Step Size */
  7200. ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
  7201. /* Enable phase Calibration */
  7202. ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
  7203. /* DFE Bandwidth [2:14-12] */
  7204. ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
  7205. /* DFE Config (4 taps only) */
  7206. ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
  7207. /* Gain Loop Bandwidth */
  7208. if (!ppd->dd->cspec->r1) {
  7209. ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
  7210. ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
  7211. } else {
  7212. ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
  7213. }
  7214. /* Baseline Wander Correction Gain [13:4-0] (leave as default) */
  7215. /* Baseline Wander Correction Gain [3:7-5] (leave as default) */
  7216. /* Data Rate Select [5:7-6] (leave as default) */
  7217. /* RX Parallel Word Width [3:10-8] (leave as default) */
  7218. /* RX REST */
  7219. /* Single- or Multi-channel reset */
  7220. /* RX Analog reset */
  7221. /* RX Digital reset */
  7222. ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
  7223. msleep(20);
  7224. /* RX Analog reset */
  7225. ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
  7226. msleep(20);
  7227. /* RX Digital reset */
  7228. ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
  7229. msleep(20);
  7230. /* setup LoS params; these are subsystem, so chan == 5 */
  7231. /* LoS filter threshold_count on, ch 0-3, set to 8 */
  7232. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
  7233. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
  7234. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
  7235. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
  7236. /* LoS filter threshold_count off, ch 0-3, set to 4 */
  7237. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
  7238. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
  7239. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
  7240. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
  7241. /* LoS filter select enabled */
  7242. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
  7243. /* LoS target data: SDR=4, DDR=2, QDR=1 */
  7244. ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
  7245. ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
  7246. ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
  7247. /* Turn on LOS on initial SERDES init */
  7248. serdes_7322_los_enable(ppd, 1);
  7249. /* FLoop LOS gate: PPM filter enabled */
  7250. ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
  7251. /* RX LATCH CALIBRATION */
  7252. /* Enable Eyefinder Phase Calibration latch */
  7253. ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
  7254. /* Enable RX Offset Calibration latch */
  7255. ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
  7256. msleep(20);
  7257. /* Start Calibration */
  7258. ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
  7259. tend = jiffies + msecs_to_jiffies(500);
  7260. while (chan_done && !time_is_before_jiffies(tend)) {
  7261. msleep(20);
  7262. for (chan = 0; chan < SERDES_CHANS; ++chan) {
  7263. rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
  7264. (chan + (chan >> 1)),
  7265. 25, 0, 0);
  7266. if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
  7267. (~chan_done & (1 << chan)) == 0)
  7268. chan_done &= ~(1 << chan);
  7269. }
  7270. }
  7271. if (chan_done) {
  7272. pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
  7273. IBSD(ppd->hw_pidx), chan_done);
  7274. } else {
  7275. for (chan = 0; chan < SERDES_CHANS; ++chan) {
  7276. rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
  7277. (chan + (chan >> 1)),
  7278. 25, 0, 0);
  7279. if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
  7280. pr_info("Serdes %d chan %d calibration failed\n",
  7281. IBSD(ppd->hw_pidx), chan);
  7282. }
  7283. }
  7284. /* Turn off Calibration */
  7285. ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
  7286. msleep(20);
  7287. /* BRING RX UP */
  7288. /* Set LE2 value (May be overridden in qsfp_7322_event) */
  7289. le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
  7290. ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
  7291. /* Set LE2 Loop bandwidth */
  7292. ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
  7293. /* Enable LE2 */
  7294. ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
  7295. msleep(20);
  7296. /* Enable H0 only */
  7297. ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
  7298. /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
  7299. le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
  7300. ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
  7301. /* Enable VGA */
  7302. ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
  7303. msleep(20);
  7304. /* Set Frequency Loop Bandwidth */
  7305. ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
  7306. /* Enable Frequency Loop */
  7307. ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
  7308. /* Set Timing Loop Bandwidth */
  7309. ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
  7310. /* Enable Timing Loop */
  7311. ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
  7312. msleep(50);
  7313. /* Enable DFE
  7314. * Set receive adaptation mode. SDR and DDR adaptation are
  7315. * always on, and QDR is initially enabled; later disabled.
  7316. */
  7317. qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
  7318. qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
  7319. qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
  7320. ppd->dd->cspec->r1 ?
  7321. QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
  7322. ppd->cpspec->qdr_dfe_on = 1;
  7323. /* Disable LE1 */
  7324. ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
  7325. /* Disable auto adapt for LE1 */
  7326. ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
  7327. msleep(20);
  7328. /* Enable AFE Offset Cancel */
  7329. ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
  7330. /* Enable Baseline Wander Correction */
  7331. ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
  7332. /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
  7333. ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
  7334. /* VGA output common mode */
  7335. ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
  7336. /*
  7337. * Initialize the Tx DDS tables. Also done every QSFP event,
  7338. * for adapters with QSFP
  7339. */
  7340. init_txdds_table(ppd, 0);
  7341. return 0;
  7342. }
  7343. /* start adjust QMH serdes parameters */
  7344. static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
  7345. {
  7346. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7347. 9, code << 9, 0x3f << 9);
  7348. }
  7349. static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
  7350. int enable, u32 tapenable)
  7351. {
  7352. if (enable)
  7353. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7354. 1, 3 << 10, 0x1f << 10);
  7355. else
  7356. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7357. 1, 0, 0x1f << 10);
  7358. }
  7359. /* Set clock to 1, 0, 1, 0 */
  7360. static void clock_man(struct qib_pportdata *ppd, int chan)
  7361. {
  7362. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7363. 4, 0x4000, 0x4000);
  7364. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7365. 4, 0, 0x4000);
  7366. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7367. 4, 0x4000, 0x4000);
  7368. ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
  7369. 4, 0, 0x4000);
  7370. }
  7371. /*
  7372. * write the current Tx serdes pre,post,main,amp settings into the serdes.
  7373. * The caller must pass the settings appropriate for the current speed,
  7374. * or not care if they are correct for the current speed.
  7375. */
  7376. static void write_tx_serdes_param(struct qib_pportdata *ppd,
  7377. struct txdds_ent *txdds)
  7378. {
  7379. u64 deemph;
  7380. deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
  7381. /* field names for amp, main, post, pre, respectively */
  7382. deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
  7383. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
  7384. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
  7385. SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
  7386. deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7387. tx_override_deemphasis_select);
  7388. deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7389. txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7390. txampcntl_d2a);
  7391. deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7392. txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7393. txc0_ena);
  7394. deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7395. txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7396. txcp1_ena);
  7397. deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7398. txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
  7399. txcn1_ena);
  7400. qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
  7401. }
  7402. /*
  7403. * Set the parameters for mez cards on link bounce, so they are
  7404. * always exactly what was requested. Similar logic to init_txdds
  7405. * but does just the serdes.
  7406. */
  7407. static void adj_tx_serdes(struct qib_pportdata *ppd)
  7408. {
  7409. const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
  7410. struct txdds_ent *dds;
  7411. find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
  7412. dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
  7413. qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
  7414. ddr_dds : sdr_dds));
  7415. write_tx_serdes_param(ppd, dds);
  7416. }
  7417. /* set QDR forced value for H1, if needed */
  7418. static void force_h1(struct qib_pportdata *ppd)
  7419. {
  7420. int chan;
  7421. ppd->cpspec->qdr_reforce = 0;
  7422. if (!ppd->dd->cspec->r1)
  7423. return;
  7424. for (chan = 0; chan < SERDES_CHANS; chan++) {
  7425. set_man_mode_h1(ppd, chan, 1, 0);
  7426. set_man_code(ppd, chan, ppd->cpspec->h1_val);
  7427. clock_man(ppd, chan);
  7428. set_man_mode_h1(ppd, chan, 0, 0);
  7429. }
  7430. }
  7431. #define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
  7432. #define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
  7433. #define R_OPCODE_LSB 3
  7434. #define R_OP_NOP 0
  7435. #define R_OP_SHIFT 2
  7436. #define R_OP_UPDATE 3
  7437. #define R_TDI_LSB 2
  7438. #define R_TDO_LSB 1
  7439. #define R_RDY 1
  7440. static int qib_r_grab(struct qib_devdata *dd)
  7441. {
  7442. u64 val = SJA_EN;
  7443. qib_write_kreg(dd, kr_r_access, val);
  7444. qib_read_kreg32(dd, kr_scratch);
  7445. return 0;
  7446. }
  7447. /* qib_r_wait_for_rdy() not only waits for the ready bit, it
  7448. * returns the current state of R_TDO
  7449. */
  7450. static int qib_r_wait_for_rdy(struct qib_devdata *dd)
  7451. {
  7452. u64 val;
  7453. int timeout;
  7454. for (timeout = 0; timeout < 100 ; ++timeout) {
  7455. val = qib_read_kreg32(dd, kr_r_access);
  7456. if (val & R_RDY)
  7457. return (val >> R_TDO_LSB) & 1;
  7458. }
  7459. return -1;
  7460. }
  7461. static int qib_r_shift(struct qib_devdata *dd, int bisten,
  7462. int len, u8 *inp, u8 *outp)
  7463. {
  7464. u64 valbase, val;
  7465. int ret, pos;
  7466. valbase = SJA_EN | (bisten << BISTEN_LSB) |
  7467. (R_OP_SHIFT << R_OPCODE_LSB);
  7468. ret = qib_r_wait_for_rdy(dd);
  7469. if (ret < 0)
  7470. goto bail;
  7471. for (pos = 0; pos < len; ++pos) {
  7472. val = valbase;
  7473. if (outp) {
  7474. outp[pos >> 3] &= ~(1 << (pos & 7));
  7475. outp[pos >> 3] |= (ret << (pos & 7));
  7476. }
  7477. if (inp) {
  7478. int tdi = inp[pos >> 3] >> (pos & 7);
  7479. val |= ((tdi & 1) << R_TDI_LSB);
  7480. }
  7481. qib_write_kreg(dd, kr_r_access, val);
  7482. qib_read_kreg32(dd, kr_scratch);
  7483. ret = qib_r_wait_for_rdy(dd);
  7484. if (ret < 0)
  7485. break;
  7486. }
  7487. /* Restore to NOP between operations. */
  7488. val = SJA_EN | (bisten << BISTEN_LSB);
  7489. qib_write_kreg(dd, kr_r_access, val);
  7490. qib_read_kreg32(dd, kr_scratch);
  7491. ret = qib_r_wait_for_rdy(dd);
  7492. if (ret >= 0)
  7493. ret = pos;
  7494. bail:
  7495. return ret;
  7496. }
  7497. static int qib_r_update(struct qib_devdata *dd, int bisten)
  7498. {
  7499. u64 val;
  7500. int ret;
  7501. val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
  7502. ret = qib_r_wait_for_rdy(dd);
  7503. if (ret >= 0) {
  7504. qib_write_kreg(dd, kr_r_access, val);
  7505. qib_read_kreg32(dd, kr_scratch);
  7506. }
  7507. return ret;
  7508. }
  7509. #define BISTEN_PORT_SEL 15
  7510. #define LEN_PORT_SEL 625
  7511. #define BISTEN_AT 17
  7512. #define LEN_AT 156
  7513. #define BISTEN_ETM 16
  7514. #define LEN_ETM 632
  7515. #define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE)
  7516. /* these are common for all IB port use cases. */
  7517. static u8 reset_at[BIT2BYTE(LEN_AT)] = {
  7518. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7519. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
  7520. };
  7521. static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
  7522. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7523. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7524. 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
  7525. 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
  7526. 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
  7527. 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
  7528. 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7529. 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
  7530. };
  7531. static u8 at[BIT2BYTE(LEN_AT)] = {
  7532. 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
  7533. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
  7534. };
  7535. /* used for IB1 or IB2, only one in use */
  7536. static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
  7537. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7538. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7539. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7540. 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
  7541. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7542. 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
  7543. 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
  7544. 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
  7545. };
  7546. /* used when both IB1 and IB2 are in use */
  7547. static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
  7548. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7549. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
  7550. 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  7551. 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
  7552. 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
  7553. 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
  7554. 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
  7555. 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
  7556. };
  7557. /* used when only IB1 is in use */
  7558. static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
  7559. 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
  7560. 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
  7561. 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
  7562. 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
  7563. 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
  7564. 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
  7565. 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
  7566. 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
  7567. };
  7568. /* used when only IB2 is in use */
  7569. static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
  7570. 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
  7571. 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
  7572. 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
  7573. 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
  7574. 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
  7575. 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
  7576. 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
  7577. 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
  7578. };
  7579. /* used when both IB1 and IB2 are in use */
  7580. static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
  7581. 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
  7582. 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
  7583. 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
  7584. 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
  7585. 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
  7586. 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
  7587. 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
  7588. 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
  7589. };
  7590. /*
  7591. * Do setup to properly handle IB link recovery; if port is zero, we
  7592. * are initializing to cover both ports; otherwise we are initializing
  7593. * to cover a single port card, or the port has reached INIT and we may
  7594. * need to switch coverage types.
  7595. */
  7596. static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
  7597. {
  7598. u8 *portsel, *etm;
  7599. struct qib_devdata *dd = ppd->dd;
  7600. if (!ppd->dd->cspec->r1)
  7601. return;
  7602. if (!both) {
  7603. dd->cspec->recovery_ports_initted++;
  7604. ppd->cpspec->recovery_init = 1;
  7605. }
  7606. if (!both && dd->cspec->recovery_ports_initted == 1) {
  7607. portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
  7608. etm = atetm_1port;
  7609. } else {
  7610. portsel = portsel_2port;
  7611. etm = atetm_2port;
  7612. }
  7613. if (qib_r_grab(dd) < 0 ||
  7614. qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
  7615. qib_r_update(dd, BISTEN_ETM) < 0 ||
  7616. qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
  7617. qib_r_update(dd, BISTEN_AT) < 0 ||
  7618. qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
  7619. portsel, NULL) < 0 ||
  7620. qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
  7621. qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
  7622. qib_r_update(dd, BISTEN_AT) < 0 ||
  7623. qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
  7624. qib_r_update(dd, BISTEN_ETM) < 0)
  7625. qib_dev_err(dd, "Failed IB link recovery setup\n");
  7626. }
  7627. static void check_7322_rxe_status(struct qib_pportdata *ppd)
  7628. {
  7629. struct qib_devdata *dd = ppd->dd;
  7630. u64 fmask;
  7631. if (dd->cspec->recovery_ports_initted != 1)
  7632. return; /* rest doesn't apply to dualport */
  7633. qib_write_kreg(dd, kr_control, dd->control |
  7634. SYM_MASK(Control, FreezeMode));
  7635. (void)qib_read_kreg64(dd, kr_scratch);
  7636. udelay(3); /* ibcreset asserted 400ns, be sure that's over */
  7637. fmask = qib_read_kreg64(dd, kr_act_fmask);
  7638. if (!fmask) {
  7639. /*
  7640. * require a powercycle before we'll work again, and make
  7641. * sure we get no more interrupts, and don't turn off
  7642. * freeze.
  7643. */
  7644. ppd->dd->cspec->stay_in_freeze = 1;
  7645. qib_7322_set_intr_state(ppd->dd, 0);
  7646. qib_write_kreg(dd, kr_fmask, 0ULL);
  7647. qib_dev_err(dd, "HCA unusable until powercycled\n");
  7648. return; /* eventually reset */
  7649. }
  7650. qib_write_kreg(ppd->dd, kr_hwerrclear,
  7651. SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
  7652. /* don't do the full clear_freeze(), not needed for this */
  7653. qib_write_kreg(dd, kr_control, dd->control);
  7654. qib_read_kreg32(dd, kr_scratch);
  7655. /* take IBC out of reset */
  7656. if (ppd->link_speed_supported) {
  7657. ppd->cpspec->ibcctrl_a &=
  7658. ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
  7659. qib_write_kreg_port(ppd, krp_ibcctrl_a,
  7660. ppd->cpspec->ibcctrl_a);
  7661. qib_read_kreg32(dd, kr_scratch);
  7662. if (ppd->lflags & QIBL_IB_LINK_DISABLED)
  7663. qib_set_ib_7322_lstate(ppd, 0,
  7664. QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
  7665. }
  7666. }