chip.c 434 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327143281432914330143311433214333143341433514336143371433814339143401434114342143431434414345143461434714348143491435014351143521435314354143551435614357143581435914360143611436214363143641436514366143671436814369143701437114372143731437414375143761437714378143791438014381143821438314384143851438614387143881438914390143911439214393143941439514396143971439814399144001440114402144031440414405144061440714408144091441014411144121441314414144151441614417144181441914420144211442214423144241442514426144271442814429144301443114432144331443414435144361443714438144391444014441144421444314444144451444614447144481444914450144511445214453144541445514456144571445814459144601446114462144631446414465144661446714468144691447014471144721447314474144751447614477144781447914480144811448214483144841448514486144871448814489144901449114492144931449414495144961449714498144991450014501145021450314504145051450614507145081450914510145111451214513145141451514516145171451814519145201452114522145231452414525145261452714528145291453014531145321453314534145351453614537145381453914540145411454214543145441454514546145471454814549145501455114552145531455414555145561455714558145591456014561145621456314564145651456614567145681456914570145711457214573145741457514576145771457814579145801458114582145831458414585145861458714588145891459014591145921459314594145951459614597145981459914600146011460214603146041460514606146071460814609146101461114612146131461414615146161461714618146191462014621146221462314624146251462614627146281462914630146311463214633146341463514636146371463814639146401464114642146431464414645146461464714648146491465014651146521465314654146551465614657146581465914660146611466214663146641466514666146671466814669146701467114672146731467414675146761467714678146791468014681146821468314684146851468614687146881468914690146911469214693146941469514696146971469814699147001470114702147031470414705147061470714708147091471014711147121471314714147151471614717147181471914720147211472214723147241472514726147271472814729147301473114732147331473414735147361473714738147391474014741147421474314744147451474614747147481474914750147511475214753147541475514756147571475814759147601476114762147631476414765147661476714768147691477014771147721477314774147751477614777147781477914780147811478214783147841478514786147871478814789147901479114792147931479414795147961479714798147991480014801148021480314804148051480614807148081480914810148111481214813148141481514816
  1. /*
  2. * Copyright(c) 2015, 2016 Intel Corporation.
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * BSD LICENSE
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright
  25. * notice, this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright
  27. * notice, this list of conditions and the following disclaimer in
  28. * the documentation and/or other materials provided with the
  29. * distribution.
  30. * - Neither the name of Intel Corporation nor the names of its
  31. * contributors may be used to endorse or promote products derived
  32. * from this software without specific prior written permission.
  33. *
  34. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. *
  46. */
  47. /*
  48. * This file contains all of the code that is specific to the HFI chip
  49. */
  50. #include <linux/pci.h>
  51. #include <linux/delay.h>
  52. #include <linux/interrupt.h>
  53. #include <linux/module.h>
  54. #include "hfi.h"
  55. #include "trace.h"
  56. #include "mad.h"
  57. #include "pio.h"
  58. #include "sdma.h"
  59. #include "eprom.h"
  60. #include "efivar.h"
  61. #include "platform.h"
  62. #include "aspm.h"
  63. #include "affinity.h"
  64. #define NUM_IB_PORTS 1
  65. uint kdeth_qp;
  66. module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
  67. MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
  68. uint num_vls = HFI1_MAX_VLS_SUPPORTED;
  69. module_param(num_vls, uint, S_IRUGO);
  70. MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
  71. /*
  72. * Default time to aggregate two 10K packets from the idle state
  73. * (timer not running). The timer starts at the end of the first packet,
  74. * so only the time for one 10K packet and header plus a bit extra is needed.
  75. * 10 * 1024 + 64 header byte = 10304 byte
  76. * 10304 byte / 12.5 GB/s = 824.32ns
  77. */
  78. uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
  79. module_param(rcv_intr_timeout, uint, S_IRUGO);
  80. MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
  81. uint rcv_intr_count = 16; /* same as qib */
  82. module_param(rcv_intr_count, uint, S_IRUGO);
  83. MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
  84. ushort link_crc_mask = SUPPORTED_CRCS;
  85. module_param(link_crc_mask, ushort, S_IRUGO);
  86. MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
  87. uint loopback;
  88. module_param_named(loopback, loopback, uint, S_IRUGO);
  89. MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
  90. /* Other driver tunables */
  91. uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
  92. static ushort crc_14b_sideband = 1;
  93. static uint use_flr = 1;
  94. uint quick_linkup; /* skip LNI */
  95. struct flag_table {
  96. u64 flag; /* the flag */
  97. char *str; /* description string */
  98. u16 extra; /* extra information */
  99. u16 unused0;
  100. u32 unused1;
  101. };
  102. /* str must be a string constant */
  103. #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
  104. #define FLAG_ENTRY0(str, flag) {flag, str, 0}
  105. /* Send Error Consequences */
  106. #define SEC_WRITE_DROPPED 0x1
  107. #define SEC_PACKET_DROPPED 0x2
  108. #define SEC_SC_HALTED 0x4 /* per-context only */
  109. #define SEC_SPC_FREEZE 0x8 /* per-HFI only */
  110. #define DEFAULT_KRCVQS 2
  111. #define MIN_KERNEL_KCTXTS 2
  112. #define FIRST_KERNEL_KCTXT 1
  113. /* sizes for both the QP and RSM map tables */
  114. #define NUM_MAP_ENTRIES 256
  115. #define NUM_MAP_REGS 32
  116. /* Bit offset into the GUID which carries HFI id information */
  117. #define GUID_HFI_INDEX_SHIFT 39
  118. /* extract the emulation revision */
  119. #define emulator_rev(dd) ((dd)->irev >> 8)
  120. /* parallel and serial emulation versions are 3 and 4 respectively */
  121. #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
  122. #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
  123. /* RSM fields */
  124. /* packet type */
  125. #define IB_PACKET_TYPE 2ull
  126. #define QW_SHIFT 6ull
  127. /* QPN[7..1] */
  128. #define QPN_WIDTH 7ull
  129. /* LRH.BTH: QW 0, OFFSET 48 - for match */
  130. #define LRH_BTH_QW 0ull
  131. #define LRH_BTH_BIT_OFFSET 48ull
  132. #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
  133. #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
  134. #define LRH_BTH_SELECT
  135. #define LRH_BTH_MASK 3ull
  136. #define LRH_BTH_VALUE 2ull
  137. /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
  138. #define LRH_SC_QW 0ull
  139. #define LRH_SC_BIT_OFFSET 56ull
  140. #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
  141. #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
  142. #define LRH_SC_MASK 128ull
  143. #define LRH_SC_VALUE 0ull
  144. /* SC[n..0] QW 0, OFFSET 60 - for select */
  145. #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
  146. /* QPN[m+n:1] QW 1, OFFSET 1 */
  147. #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
  148. /* defines to build power on SC2VL table */
  149. #define SC2VL_VAL( \
  150. num, \
  151. sc0, sc0val, \
  152. sc1, sc1val, \
  153. sc2, sc2val, \
  154. sc3, sc3val, \
  155. sc4, sc4val, \
  156. sc5, sc5val, \
  157. sc6, sc6val, \
  158. sc7, sc7val) \
  159. ( \
  160. ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
  161. ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
  162. ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
  163. ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
  164. ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
  165. ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
  166. ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
  167. ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
  168. )
  169. #define DC_SC_VL_VAL( \
  170. range, \
  171. e0, e0val, \
  172. e1, e1val, \
  173. e2, e2val, \
  174. e3, e3val, \
  175. e4, e4val, \
  176. e5, e5val, \
  177. e6, e6val, \
  178. e7, e7val, \
  179. e8, e8val, \
  180. e9, e9val, \
  181. e10, e10val, \
  182. e11, e11val, \
  183. e12, e12val, \
  184. e13, e13val, \
  185. e14, e14val, \
  186. e15, e15val) \
  187. ( \
  188. ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
  189. ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
  190. ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
  191. ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
  192. ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
  193. ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
  194. ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
  195. ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
  196. ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
  197. ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
  198. ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
  199. ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
  200. ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
  201. ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
  202. ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
  203. ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
  204. )
  205. /* all CceStatus sub-block freeze bits */
  206. #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
  207. | CCE_STATUS_RXE_FROZE_SMASK \
  208. | CCE_STATUS_TXE_FROZE_SMASK \
  209. | CCE_STATUS_TXE_PIO_FROZE_SMASK)
  210. /* all CceStatus sub-block TXE pause bits */
  211. #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
  212. | CCE_STATUS_TXE_PAUSED_SMASK \
  213. | CCE_STATUS_SDMA_PAUSED_SMASK)
  214. /* all CceStatus sub-block RXE pause bits */
  215. #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
  216. #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
  217. #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
  218. /*
  219. * CCE Error flags.
  220. */
  221. static struct flag_table cce_err_status_flags[] = {
  222. /* 0*/ FLAG_ENTRY0("CceCsrParityErr",
  223. CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
  224. /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
  225. CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
  226. /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
  227. CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
  228. /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
  229. CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
  230. /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
  231. CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
  232. /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
  233. CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
  234. /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
  235. CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
  236. /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
  237. CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
  238. /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
  239. CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
  240. /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
  241. CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
  242. /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
  243. CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
  244. /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
  245. CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
  246. /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
  247. CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
  248. /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
  249. CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
  250. /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
  251. CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
  252. /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
  253. CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
  254. /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
  255. CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
  256. /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
  257. CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
  258. /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
  259. CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
  260. /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
  261. CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
  262. /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
  263. CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
  264. /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
  265. CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
  266. /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
  267. CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
  268. /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
  269. CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
  270. /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
  271. CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
  272. /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
  273. CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
  274. /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
  275. CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
  276. /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
  277. CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
  278. /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
  279. CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
  280. /*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
  281. CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
  282. /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
  283. CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
  284. /*31*/ FLAG_ENTRY0("LATriggered",
  285. CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
  286. /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
  287. CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
  288. /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
  289. CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
  290. /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
  291. CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
  292. /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
  293. CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
  294. /*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
  295. CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
  296. /*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
  297. CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
  298. /*38*/ FLAG_ENTRY0("CceIntMapCorErr",
  299. CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
  300. /*39*/ FLAG_ENTRY0("CceIntMapUncErr",
  301. CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
  302. /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
  303. CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
  304. /*41-63 reserved*/
  305. };
  306. /*
  307. * Misc Error flags
  308. */
  309. #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
  310. static struct flag_table misc_err_status_flags[] = {
  311. /* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
  312. /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
  313. /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
  314. /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
  315. /* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
  316. /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
  317. /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
  318. /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
  319. /* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
  320. /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
  321. /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
  322. /*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
  323. /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
  324. };
  325. /*
  326. * TXE PIO Error flags and consequences
  327. */
  328. static struct flag_table pio_err_status_flags[] = {
  329. /* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
  330. SEC_WRITE_DROPPED,
  331. SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
  332. /* 1*/ FLAG_ENTRY("PioWriteAddrParity",
  333. SEC_SPC_FREEZE,
  334. SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
  335. /* 2*/ FLAG_ENTRY("PioCsrParity",
  336. SEC_SPC_FREEZE,
  337. SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
  338. /* 3*/ FLAG_ENTRY("PioSbMemFifo0",
  339. SEC_SPC_FREEZE,
  340. SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
  341. /* 4*/ FLAG_ENTRY("PioSbMemFifo1",
  342. SEC_SPC_FREEZE,
  343. SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
  344. /* 5*/ FLAG_ENTRY("PioPccFifoParity",
  345. SEC_SPC_FREEZE,
  346. SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
  347. /* 6*/ FLAG_ENTRY("PioPecFifoParity",
  348. SEC_SPC_FREEZE,
  349. SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
  350. /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
  351. SEC_SPC_FREEZE,
  352. SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
  353. /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
  354. SEC_SPC_FREEZE,
  355. SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
  356. /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
  357. SEC_SPC_FREEZE,
  358. SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
  359. /*10*/ FLAG_ENTRY("PioSmPktResetParity",
  360. SEC_SPC_FREEZE,
  361. SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
  362. /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
  363. SEC_SPC_FREEZE,
  364. SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
  365. /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
  366. SEC_SPC_FREEZE,
  367. SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
  368. /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
  369. 0,
  370. SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
  371. /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
  372. 0,
  373. SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
  374. /*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
  375. SEC_SPC_FREEZE,
  376. SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
  377. /*16*/ FLAG_ENTRY("PioPpmcPblFifo",
  378. SEC_SPC_FREEZE,
  379. SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
  380. /*17*/ FLAG_ENTRY("PioInitSmIn",
  381. 0,
  382. SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
  383. /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
  384. SEC_SPC_FREEZE,
  385. SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
  386. /*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
  387. SEC_SPC_FREEZE,
  388. SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
  389. /*20*/ FLAG_ENTRY("PioHostAddrMemCor",
  390. 0,
  391. SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
  392. /*21*/ FLAG_ENTRY("PioWriteDataParity",
  393. SEC_SPC_FREEZE,
  394. SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
  395. /*22*/ FLAG_ENTRY("PioStateMachine",
  396. SEC_SPC_FREEZE,
  397. SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
  398. /*23*/ FLAG_ENTRY("PioWriteQwValidParity",
  399. SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
  400. SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
  401. /*24*/ FLAG_ENTRY("PioBlockQwCountParity",
  402. SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
  403. SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
  404. /*25*/ FLAG_ENTRY("PioVlfVlLenParity",
  405. SEC_SPC_FREEZE,
  406. SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
  407. /*26*/ FLAG_ENTRY("PioVlfSopParity",
  408. SEC_SPC_FREEZE,
  409. SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
  410. /*27*/ FLAG_ENTRY("PioVlFifoParity",
  411. SEC_SPC_FREEZE,
  412. SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
  413. /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
  414. SEC_SPC_FREEZE,
  415. SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
  416. /*29*/ FLAG_ENTRY("PioPpmcSopLen",
  417. SEC_SPC_FREEZE,
  418. SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
  419. /*30-31 reserved*/
  420. /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
  421. SEC_SPC_FREEZE,
  422. SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
  423. /*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
  424. SEC_SPC_FREEZE,
  425. SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
  426. /*34*/ FLAG_ENTRY("PioPccSopHeadParity",
  427. SEC_SPC_FREEZE,
  428. SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
  429. /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
  430. SEC_SPC_FREEZE,
  431. SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
  432. /*36-63 reserved*/
  433. };
  434. /* TXE PIO errors that cause an SPC freeze */
  435. #define ALL_PIO_FREEZE_ERR \
  436. (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
  437. | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
  438. | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
  439. | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
  440. | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
  441. | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
  442. | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
  443. | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
  444. | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
  445. | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
  446. | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
  447. | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
  448. | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
  449. | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
  450. | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
  451. | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
  452. | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
  453. | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
  454. | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
  455. | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
  456. | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
  457. | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
  458. | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
  459. | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
  460. | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
  461. | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
  462. | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
  463. | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
  464. | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
  465. /*
  466. * TXE SDMA Error flags
  467. */
  468. static struct flag_table sdma_err_status_flags[] = {
  469. /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
  470. SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
  471. /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
  472. SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
  473. /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
  474. SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
  475. /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
  476. SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
  477. /*04-63 reserved*/
  478. };
  479. /* TXE SDMA errors that cause an SPC freeze */
  480. #define ALL_SDMA_FREEZE_ERR \
  481. (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
  482. | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
  483. | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
  484. /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
  485. #define PORT_DISCARD_EGRESS_ERRS \
  486. (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
  487. | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
  488. | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
  489. /*
  490. * TXE Egress Error flags
  491. */
  492. #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
  493. static struct flag_table egress_err_status_flags[] = {
  494. /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
  495. /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
  496. /* 2 reserved */
  497. /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
  498. SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
  499. /* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
  500. /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
  501. /* 6 reserved */
  502. /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
  503. SEES(TX_PIO_LAUNCH_INTF_PARITY)),
  504. /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
  505. SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
  506. /* 9-10 reserved */
  507. /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
  508. SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
  509. /*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
  510. /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
  511. /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
  512. /*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
  513. /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
  514. SEES(TX_SDMA0_DISALLOWED_PACKET)),
  515. /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
  516. SEES(TX_SDMA1_DISALLOWED_PACKET)),
  517. /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
  518. SEES(TX_SDMA2_DISALLOWED_PACKET)),
  519. /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
  520. SEES(TX_SDMA3_DISALLOWED_PACKET)),
  521. /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
  522. SEES(TX_SDMA4_DISALLOWED_PACKET)),
  523. /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
  524. SEES(TX_SDMA5_DISALLOWED_PACKET)),
  525. /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
  526. SEES(TX_SDMA6_DISALLOWED_PACKET)),
  527. /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
  528. SEES(TX_SDMA7_DISALLOWED_PACKET)),
  529. /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
  530. SEES(TX_SDMA8_DISALLOWED_PACKET)),
  531. /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
  532. SEES(TX_SDMA9_DISALLOWED_PACKET)),
  533. /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
  534. SEES(TX_SDMA10_DISALLOWED_PACKET)),
  535. /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
  536. SEES(TX_SDMA11_DISALLOWED_PACKET)),
  537. /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
  538. SEES(TX_SDMA12_DISALLOWED_PACKET)),
  539. /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
  540. SEES(TX_SDMA13_DISALLOWED_PACKET)),
  541. /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
  542. SEES(TX_SDMA14_DISALLOWED_PACKET)),
  543. /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
  544. SEES(TX_SDMA15_DISALLOWED_PACKET)),
  545. /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
  546. SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
  547. /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
  548. SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
  549. /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
  550. SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
  551. /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
  552. SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
  553. /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
  554. SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
  555. /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
  556. SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
  557. /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
  558. SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
  559. /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
  560. SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
  561. /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
  562. SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
  563. /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
  564. /*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
  565. /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
  566. /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
  567. /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
  568. /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
  569. /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
  570. /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
  571. /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
  572. /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
  573. /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
  574. /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
  575. /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
  576. /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
  577. /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
  578. /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
  579. /*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
  580. /*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
  581. /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
  582. /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
  583. /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
  584. /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
  585. SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
  586. /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
  587. SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
  588. };
  589. /*
  590. * TXE Egress Error Info flags
  591. */
  592. #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
  593. static struct flag_table egress_err_info_flags[] = {
  594. /* 0*/ FLAG_ENTRY0("Reserved", 0ull),
  595. /* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
  596. /* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
  597. /* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
  598. /* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
  599. /* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
  600. /* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
  601. /* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
  602. /* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
  603. /* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
  604. /*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
  605. /*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
  606. /*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
  607. /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
  608. /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
  609. /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
  610. /*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
  611. /*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
  612. /*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
  613. /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
  614. /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
  615. /*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
  616. };
  617. /* TXE Egress errors that cause an SPC freeze */
  618. #define ALL_TXE_EGRESS_FREEZE_ERR \
  619. (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
  620. | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
  621. | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
  622. | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
  623. | SEES(TX_LAUNCH_CSR_PARITY) \
  624. | SEES(TX_SBRD_CTL_CSR_PARITY) \
  625. | SEES(TX_CONFIG_PARITY) \
  626. | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
  627. | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
  628. | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
  629. | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
  630. | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
  631. | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
  632. | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
  633. | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
  634. | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
  635. | SEES(TX_CREDIT_RETURN_PARITY))
  636. /*
  637. * TXE Send error flags
  638. */
  639. #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
  640. static struct flag_table send_err_status_flags[] = {
  641. /* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
  642. /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
  643. /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
  644. };
  645. /*
  646. * TXE Send Context Error flags and consequences
  647. */
  648. static struct flag_table sc_err_status_flags[] = {
  649. /* 0*/ FLAG_ENTRY("InconsistentSop",
  650. SEC_PACKET_DROPPED | SEC_SC_HALTED,
  651. SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
  652. /* 1*/ FLAG_ENTRY("DisallowedPacket",
  653. SEC_PACKET_DROPPED | SEC_SC_HALTED,
  654. SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
  655. /* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
  656. SEC_WRITE_DROPPED | SEC_SC_HALTED,
  657. SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
  658. /* 3*/ FLAG_ENTRY("WriteOverflow",
  659. SEC_WRITE_DROPPED | SEC_SC_HALTED,
  660. SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
  661. /* 4*/ FLAG_ENTRY("WriteOutOfBounds",
  662. SEC_WRITE_DROPPED | SEC_SC_HALTED,
  663. SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
  664. /* 5-63 reserved*/
  665. };
  666. /*
  667. * RXE Receive Error flags
  668. */
  669. #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
  670. static struct flag_table rxe_err_status_flags[] = {
  671. /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
  672. /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
  673. /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
  674. /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
  675. /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
  676. /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
  677. /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
  678. /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
  679. /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
  680. /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
  681. /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
  682. /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
  683. /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
  684. /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
  685. /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
  686. /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
  687. /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
  688. RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
  689. /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
  690. /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
  691. /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
  692. RXES(RBUF_BLOCK_LIST_READ_UNC)),
  693. /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
  694. RXES(RBUF_BLOCK_LIST_READ_COR)),
  695. /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
  696. RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
  697. /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
  698. RXES(RBUF_CSR_QENT_CNT_PARITY)),
  699. /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
  700. RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
  701. /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
  702. RXES(RBUF_CSR_QVLD_BIT_PARITY)),
  703. /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
  704. /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
  705. /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
  706. RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
  707. /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
  708. /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
  709. /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
  710. /*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
  711. /*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
  712. /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
  713. /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
  714. /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
  715. RXES(RBUF_FL_INITDONE_PARITY)),
  716. /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
  717. RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
  718. /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
  719. /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
  720. /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
  721. /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
  722. RXES(LOOKUP_DES_PART1_UNC_COR)),
  723. /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
  724. RXES(LOOKUP_DES_PART2_PARITY)),
  725. /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
  726. /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
  727. /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
  728. /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
  729. /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
  730. /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
  731. /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
  732. /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
  733. /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
  734. /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
  735. /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
  736. /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
  737. /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
  738. /*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
  739. /*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
  740. /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
  741. /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
  742. /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
  743. /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
  744. /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
  745. /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
  746. /*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
  747. };
  748. /* RXE errors that will trigger an SPC freeze */
  749. #define ALL_RXE_FREEZE_ERR \
  750. (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
  751. | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
  752. | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
  753. | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
  754. | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
  755. | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
  756. | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
  757. | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
  758. | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
  759. | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
  760. | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
  761. | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
  762. | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
  763. | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
  764. | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
  765. | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
  766. | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
  767. | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
  768. | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
  769. | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
  770. | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
  771. | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
  772. | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
  773. | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
  774. | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
  775. | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
  776. | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
  777. | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
  778. | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
  779. | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
  780. | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
  781. | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
  782. | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
  783. | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
  784. | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
  785. | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
  786. | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
  787. | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
  788. | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
  789. | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
  790. | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
  791. | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
  792. | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
  793. | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
  794. #define RXE_FREEZE_ABORT_MASK \
  795. (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
  796. RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
  797. RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
  798. /*
  799. * DCC Error Flags
  800. */
  801. #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
  802. static struct flag_table dcc_err_flags[] = {
  803. FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
  804. FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
  805. FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
  806. FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
  807. FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
  808. FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
  809. FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
  810. FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
  811. FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
  812. FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
  813. FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
  814. FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
  815. FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
  816. FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
  817. FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
  818. FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
  819. FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
  820. FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
  821. FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
  822. FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
  823. FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
  824. FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
  825. FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
  826. FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
  827. FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
  828. FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
  829. FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
  830. FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
  831. FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
  832. FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
  833. FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
  834. FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
  835. FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
  836. FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
  837. FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
  838. FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
  839. FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
  840. FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
  841. FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
  842. FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
  843. FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
  844. FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
  845. FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
  846. FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
  847. FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
  848. FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
  849. };
  850. /*
  851. * LCB error flags
  852. */
  853. #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
  854. static struct flag_table lcb_err_flags[] = {
  855. /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
  856. /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
  857. /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
  858. /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
  859. LCBE(ALL_LNS_FAILED_REINIT_TEST)),
  860. /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
  861. /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
  862. /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
  863. /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
  864. /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
  865. /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
  866. /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
  867. /*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
  868. /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
  869. /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
  870. LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
  871. /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
  872. /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
  873. /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
  874. /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
  875. /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
  876. /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
  877. LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
  878. /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
  879. /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
  880. /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
  881. /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
  882. /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
  883. /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
  884. /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
  885. LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
  886. /*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
  887. /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
  888. LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
  889. /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
  890. LCBE(REDUNDANT_FLIT_PARITY_ERR))
  891. };
  892. /*
  893. * DC8051 Error Flags
  894. */
  895. #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
  896. static struct flag_table dc8051_err_flags[] = {
  897. FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
  898. FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
  899. FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
  900. FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
  901. FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
  902. FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
  903. FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
  904. FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
  905. FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
  906. D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
  907. FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
  908. };
  909. /*
  910. * DC8051 Information Error flags
  911. *
  912. * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
  913. */
  914. static struct flag_table dc8051_info_err_flags[] = {
  915. FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
  916. FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
  917. FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
  918. FLAG_ENTRY0("Serdes internal loopback failure",
  919. FAILED_SERDES_INTERNAL_LOOPBACK),
  920. FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
  921. FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
  922. FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
  923. FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
  924. FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
  925. FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
  926. FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
  927. FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
  928. FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT)
  929. };
  930. /*
  931. * DC8051 Information Host Information flags
  932. *
  933. * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
  934. */
  935. static struct flag_table dc8051_info_host_msg_flags[] = {
  936. FLAG_ENTRY0("Host request done", 0x0001),
  937. FLAG_ENTRY0("BC SMA message", 0x0002),
  938. FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
  939. FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
  940. FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
  941. FLAG_ENTRY0("External device config request", 0x0020),
  942. FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
  943. FLAG_ENTRY0("LinkUp achieved", 0x0080),
  944. FLAG_ENTRY0("Link going down", 0x0100),
  945. };
  946. static u32 encoded_size(u32 size);
  947. static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
  948. static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
  949. static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
  950. u8 *continuous);
  951. static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
  952. u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
  953. static void read_vc_remote_link_width(struct hfi1_devdata *dd,
  954. u8 *remote_tx_rate, u16 *link_widths);
  955. static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
  956. u8 *flag_bits, u16 *link_widths);
  957. static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
  958. u8 *device_rev);
  959. static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
  960. static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
  961. static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
  962. u8 *tx_polarity_inversion,
  963. u8 *rx_polarity_inversion, u8 *max_rate);
  964. static void handle_sdma_eng_err(struct hfi1_devdata *dd,
  965. unsigned int context, u64 err_status);
  966. static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
  967. static void handle_dcc_err(struct hfi1_devdata *dd,
  968. unsigned int context, u64 err_status);
  969. static void handle_lcb_err(struct hfi1_devdata *dd,
  970. unsigned int context, u64 err_status);
  971. static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
  972. static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
  973. static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
  974. static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
  975. static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
  976. static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
  977. static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
  978. static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
  979. static void set_partition_keys(struct hfi1_pportdata *);
  980. static const char *link_state_name(u32 state);
  981. static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
  982. u32 state);
  983. static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
  984. u64 *out_data);
  985. static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
  986. static int thermal_init(struct hfi1_devdata *dd);
  987. static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
  988. int msecs);
  989. static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
  990. static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
  991. static void handle_temp_err(struct hfi1_devdata *);
  992. static void dc_shutdown(struct hfi1_devdata *);
  993. static void dc_start(struct hfi1_devdata *);
  994. static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
  995. unsigned int *np);
  996. static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
  997. /*
  998. * Error interrupt table entry. This is used as input to the interrupt
  999. * "clear down" routine used for all second tier error interrupt register.
  1000. * Second tier interrupt registers have a single bit representing them
  1001. * in the top-level CceIntStatus.
  1002. */
  1003. struct err_reg_info {
  1004. u32 status; /* status CSR offset */
  1005. u32 clear; /* clear CSR offset */
  1006. u32 mask; /* mask CSR offset */
  1007. void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
  1008. const char *desc;
  1009. };
  1010. #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
  1011. #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
  1012. #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
  1013. /*
  1014. * Helpers for building HFI and DC error interrupt table entries. Different
  1015. * helpers are needed because of inconsistent register names.
  1016. */
  1017. #define EE(reg, handler, desc) \
  1018. { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
  1019. handler, desc }
  1020. #define DC_EE1(reg, handler, desc) \
  1021. { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
  1022. #define DC_EE2(reg, handler, desc) \
  1023. { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
  1024. /*
  1025. * Table of the "misc" grouping of error interrupts. Each entry refers to
  1026. * another register containing more information.
  1027. */
  1028. static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
  1029. /* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
  1030. /* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
  1031. /* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
  1032. /* 3*/ { 0, 0, 0, NULL }, /* reserved */
  1033. /* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
  1034. /* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
  1035. /* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
  1036. /* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
  1037. /* the rest are reserved */
  1038. };
  1039. /*
  1040. * Index into the Various section of the interrupt sources
  1041. * corresponding to the Critical Temperature interrupt.
  1042. */
  1043. #define TCRIT_INT_SOURCE 4
  1044. /*
  1045. * SDMA error interrupt entry - refers to another register containing more
  1046. * information.
  1047. */
  1048. static const struct err_reg_info sdma_eng_err =
  1049. EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
  1050. static const struct err_reg_info various_err[NUM_VARIOUS] = {
  1051. /* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
  1052. /* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
  1053. /* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
  1054. /* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
  1055. /* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
  1056. /* rest are reserved */
  1057. };
  1058. /*
  1059. * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
  1060. * register can not be derived from the MTU value because 10K is not
  1061. * a power of 2. Therefore, we need a constant. Everything else can
  1062. * be calculated.
  1063. */
  1064. #define DCC_CFG_PORT_MTU_CAP_10240 7
  1065. /*
  1066. * Table of the DC grouping of error interrupts. Each entry refers to
  1067. * another register containing more information.
  1068. */
  1069. static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
  1070. /* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
  1071. /* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
  1072. /* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
  1073. /* 3*/ /* dc_lbm_int - special, see is_dc_int() */
  1074. /* the rest are reserved */
  1075. };
  1076. struct cntr_entry {
  1077. /*
  1078. * counter name
  1079. */
  1080. char *name;
  1081. /*
  1082. * csr to read for name (if applicable)
  1083. */
  1084. u64 csr;
  1085. /*
  1086. * offset into dd or ppd to store the counter's value
  1087. */
  1088. int offset;
  1089. /*
  1090. * flags
  1091. */
  1092. u8 flags;
  1093. /*
  1094. * accessor for stat element, context either dd or ppd
  1095. */
  1096. u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
  1097. int mode, u64 data);
  1098. };
  1099. #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
  1100. #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
  1101. #define CNTR_ELEM(name, csr, offset, flags, accessor) \
  1102. { \
  1103. name, \
  1104. csr, \
  1105. offset, \
  1106. flags, \
  1107. accessor \
  1108. }
  1109. /* 32bit RXE */
  1110. #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
  1111. CNTR_ELEM(#name, \
  1112. (counter * 8 + RCV_COUNTER_ARRAY32), \
  1113. 0, flags | CNTR_32BIT, \
  1114. port_access_u32_csr)
  1115. #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
  1116. CNTR_ELEM(#name, \
  1117. (counter * 8 + RCV_COUNTER_ARRAY32), \
  1118. 0, flags | CNTR_32BIT, \
  1119. dev_access_u32_csr)
  1120. /* 64bit RXE */
  1121. #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
  1122. CNTR_ELEM(#name, \
  1123. (counter * 8 + RCV_COUNTER_ARRAY64), \
  1124. 0, flags, \
  1125. port_access_u64_csr)
  1126. #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
  1127. CNTR_ELEM(#name, \
  1128. (counter * 8 + RCV_COUNTER_ARRAY64), \
  1129. 0, flags, \
  1130. dev_access_u64_csr)
  1131. #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
  1132. #define OVR_ELM(ctx) \
  1133. CNTR_ELEM("RcvHdrOvr" #ctx, \
  1134. (RCV_HDR_OVFL_CNT + ctx * 0x100), \
  1135. 0, CNTR_NORMAL, port_access_u64_csr)
  1136. /* 32bit TXE */
  1137. #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
  1138. CNTR_ELEM(#name, \
  1139. (counter * 8 + SEND_COUNTER_ARRAY32), \
  1140. 0, flags | CNTR_32BIT, \
  1141. port_access_u32_csr)
  1142. /* 64bit TXE */
  1143. #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
  1144. CNTR_ELEM(#name, \
  1145. (counter * 8 + SEND_COUNTER_ARRAY64), \
  1146. 0, flags, \
  1147. port_access_u64_csr)
  1148. # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
  1149. CNTR_ELEM(#name,\
  1150. counter * 8 + SEND_COUNTER_ARRAY64, \
  1151. 0, \
  1152. flags, \
  1153. dev_access_u64_csr)
  1154. /* CCE */
  1155. #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
  1156. CNTR_ELEM(#name, \
  1157. (counter * 8 + CCE_COUNTER_ARRAY32), \
  1158. 0, flags | CNTR_32BIT, \
  1159. dev_access_u32_csr)
  1160. #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
  1161. CNTR_ELEM(#name, \
  1162. (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
  1163. 0, flags | CNTR_32BIT, \
  1164. dev_access_u32_csr)
  1165. /* DC */
  1166. #define DC_PERF_CNTR(name, counter, flags) \
  1167. CNTR_ELEM(#name, \
  1168. counter, \
  1169. 0, \
  1170. flags, \
  1171. dev_access_u64_csr)
  1172. #define DC_PERF_CNTR_LCB(name, counter, flags) \
  1173. CNTR_ELEM(#name, \
  1174. counter, \
  1175. 0, \
  1176. flags, \
  1177. dc_access_lcb_cntr)
  1178. /* ibp counters */
  1179. #define SW_IBP_CNTR(name, cntr) \
  1180. CNTR_ELEM(#name, \
  1181. 0, \
  1182. 0, \
  1183. CNTR_SYNTH, \
  1184. access_ibp_##cntr)
  1185. u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
  1186. {
  1187. if (dd->flags & HFI1_PRESENT) {
  1188. return readq((void __iomem *)dd->kregbase + offset);
  1189. }
  1190. return -1;
  1191. }
  1192. void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
  1193. {
  1194. if (dd->flags & HFI1_PRESENT)
  1195. writeq(value, (void __iomem *)dd->kregbase + offset);
  1196. }
  1197. void __iomem *get_csr_addr(
  1198. struct hfi1_devdata *dd,
  1199. u32 offset)
  1200. {
  1201. return (void __iomem *)dd->kregbase + offset;
  1202. }
  1203. static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
  1204. int mode, u64 value)
  1205. {
  1206. u64 ret;
  1207. if (mode == CNTR_MODE_R) {
  1208. ret = read_csr(dd, csr);
  1209. } else if (mode == CNTR_MODE_W) {
  1210. write_csr(dd, csr, value);
  1211. ret = value;
  1212. } else {
  1213. dd_dev_err(dd, "Invalid cntr register access mode");
  1214. return 0;
  1215. }
  1216. hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
  1217. return ret;
  1218. }
  1219. /* Dev Access */
  1220. static u64 dev_access_u32_csr(const struct cntr_entry *entry,
  1221. void *context, int vl, int mode, u64 data)
  1222. {
  1223. struct hfi1_devdata *dd = context;
  1224. u64 csr = entry->csr;
  1225. if (entry->flags & CNTR_SDMA) {
  1226. if (vl == CNTR_INVALID_VL)
  1227. return 0;
  1228. csr += 0x100 * vl;
  1229. } else {
  1230. if (vl != CNTR_INVALID_VL)
  1231. return 0;
  1232. }
  1233. return read_write_csr(dd, csr, mode, data);
  1234. }
  1235. static u64 access_sde_err_cnt(const struct cntr_entry *entry,
  1236. void *context, int idx, int mode, u64 data)
  1237. {
  1238. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1239. if (dd->per_sdma && idx < dd->num_sdma)
  1240. return dd->per_sdma[idx].err_cnt;
  1241. return 0;
  1242. }
  1243. static u64 access_sde_int_cnt(const struct cntr_entry *entry,
  1244. void *context, int idx, int mode, u64 data)
  1245. {
  1246. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1247. if (dd->per_sdma && idx < dd->num_sdma)
  1248. return dd->per_sdma[idx].sdma_int_cnt;
  1249. return 0;
  1250. }
  1251. static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
  1252. void *context, int idx, int mode, u64 data)
  1253. {
  1254. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1255. if (dd->per_sdma && idx < dd->num_sdma)
  1256. return dd->per_sdma[idx].idle_int_cnt;
  1257. return 0;
  1258. }
  1259. static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
  1260. void *context, int idx, int mode,
  1261. u64 data)
  1262. {
  1263. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1264. if (dd->per_sdma && idx < dd->num_sdma)
  1265. return dd->per_sdma[idx].progress_int_cnt;
  1266. return 0;
  1267. }
  1268. static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
  1269. int vl, int mode, u64 data)
  1270. {
  1271. struct hfi1_devdata *dd = context;
  1272. u64 val = 0;
  1273. u64 csr = entry->csr;
  1274. if (entry->flags & CNTR_VL) {
  1275. if (vl == CNTR_INVALID_VL)
  1276. return 0;
  1277. csr += 8 * vl;
  1278. } else {
  1279. if (vl != CNTR_INVALID_VL)
  1280. return 0;
  1281. }
  1282. val = read_write_csr(dd, csr, mode, data);
  1283. return val;
  1284. }
  1285. static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
  1286. int vl, int mode, u64 data)
  1287. {
  1288. struct hfi1_devdata *dd = context;
  1289. u32 csr = entry->csr;
  1290. int ret = 0;
  1291. if (vl != CNTR_INVALID_VL)
  1292. return 0;
  1293. if (mode == CNTR_MODE_R)
  1294. ret = read_lcb_csr(dd, csr, &data);
  1295. else if (mode == CNTR_MODE_W)
  1296. ret = write_lcb_csr(dd, csr, data);
  1297. if (ret) {
  1298. dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
  1299. return 0;
  1300. }
  1301. hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
  1302. return data;
  1303. }
  1304. /* Port Access */
  1305. static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
  1306. int vl, int mode, u64 data)
  1307. {
  1308. struct hfi1_pportdata *ppd = context;
  1309. if (vl != CNTR_INVALID_VL)
  1310. return 0;
  1311. return read_write_csr(ppd->dd, entry->csr, mode, data);
  1312. }
  1313. static u64 port_access_u64_csr(const struct cntr_entry *entry,
  1314. void *context, int vl, int mode, u64 data)
  1315. {
  1316. struct hfi1_pportdata *ppd = context;
  1317. u64 val;
  1318. u64 csr = entry->csr;
  1319. if (entry->flags & CNTR_VL) {
  1320. if (vl == CNTR_INVALID_VL)
  1321. return 0;
  1322. csr += 8 * vl;
  1323. } else {
  1324. if (vl != CNTR_INVALID_VL)
  1325. return 0;
  1326. }
  1327. val = read_write_csr(ppd->dd, csr, mode, data);
  1328. return val;
  1329. }
  1330. /* Software defined */
  1331. static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
  1332. u64 data)
  1333. {
  1334. u64 ret;
  1335. if (mode == CNTR_MODE_R) {
  1336. ret = *cntr;
  1337. } else if (mode == CNTR_MODE_W) {
  1338. *cntr = data;
  1339. ret = data;
  1340. } else {
  1341. dd_dev_err(dd, "Invalid cntr sw access mode");
  1342. return 0;
  1343. }
  1344. hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
  1345. return ret;
  1346. }
  1347. static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
  1348. int vl, int mode, u64 data)
  1349. {
  1350. struct hfi1_pportdata *ppd = context;
  1351. if (vl != CNTR_INVALID_VL)
  1352. return 0;
  1353. return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
  1354. }
  1355. static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
  1356. int vl, int mode, u64 data)
  1357. {
  1358. struct hfi1_pportdata *ppd = context;
  1359. if (vl != CNTR_INVALID_VL)
  1360. return 0;
  1361. return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
  1362. }
  1363. static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
  1364. void *context, int vl, int mode,
  1365. u64 data)
  1366. {
  1367. struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
  1368. if (vl != CNTR_INVALID_VL)
  1369. return 0;
  1370. return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
  1371. }
  1372. static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
  1373. void *context, int vl, int mode, u64 data)
  1374. {
  1375. struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
  1376. u64 zero = 0;
  1377. u64 *counter;
  1378. if (vl == CNTR_INVALID_VL)
  1379. counter = &ppd->port_xmit_discards;
  1380. else if (vl >= 0 && vl < C_VL_COUNT)
  1381. counter = &ppd->port_xmit_discards_vl[vl];
  1382. else
  1383. counter = &zero;
  1384. return read_write_sw(ppd->dd, counter, mode, data);
  1385. }
  1386. static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
  1387. void *context, int vl, int mode,
  1388. u64 data)
  1389. {
  1390. struct hfi1_pportdata *ppd = context;
  1391. if (vl != CNTR_INVALID_VL)
  1392. return 0;
  1393. return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
  1394. mode, data);
  1395. }
  1396. static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
  1397. void *context, int vl, int mode, u64 data)
  1398. {
  1399. struct hfi1_pportdata *ppd = context;
  1400. if (vl != CNTR_INVALID_VL)
  1401. return 0;
  1402. return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
  1403. mode, data);
  1404. }
  1405. u64 get_all_cpu_total(u64 __percpu *cntr)
  1406. {
  1407. int cpu;
  1408. u64 counter = 0;
  1409. for_each_possible_cpu(cpu)
  1410. counter += *per_cpu_ptr(cntr, cpu);
  1411. return counter;
  1412. }
  1413. static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
  1414. u64 __percpu *cntr,
  1415. int vl, int mode, u64 data)
  1416. {
  1417. u64 ret = 0;
  1418. if (vl != CNTR_INVALID_VL)
  1419. return 0;
  1420. if (mode == CNTR_MODE_R) {
  1421. ret = get_all_cpu_total(cntr) - *z_val;
  1422. } else if (mode == CNTR_MODE_W) {
  1423. /* A write can only zero the counter */
  1424. if (data == 0)
  1425. *z_val = get_all_cpu_total(cntr);
  1426. else
  1427. dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
  1428. } else {
  1429. dd_dev_err(dd, "Invalid cntr sw cpu access mode");
  1430. return 0;
  1431. }
  1432. return ret;
  1433. }
  1434. static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
  1435. void *context, int vl, int mode, u64 data)
  1436. {
  1437. struct hfi1_devdata *dd = context;
  1438. return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
  1439. mode, data);
  1440. }
  1441. static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
  1442. void *context, int vl, int mode, u64 data)
  1443. {
  1444. struct hfi1_devdata *dd = context;
  1445. return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
  1446. mode, data);
  1447. }
  1448. static u64 access_sw_pio_wait(const struct cntr_entry *entry,
  1449. void *context, int vl, int mode, u64 data)
  1450. {
  1451. struct hfi1_devdata *dd = context;
  1452. return dd->verbs_dev.n_piowait;
  1453. }
  1454. static u64 access_sw_pio_drain(const struct cntr_entry *entry,
  1455. void *context, int vl, int mode, u64 data)
  1456. {
  1457. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1458. return dd->verbs_dev.n_piodrain;
  1459. }
  1460. static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
  1461. void *context, int vl, int mode, u64 data)
  1462. {
  1463. struct hfi1_devdata *dd = context;
  1464. return dd->verbs_dev.n_txwait;
  1465. }
  1466. static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
  1467. void *context, int vl, int mode, u64 data)
  1468. {
  1469. struct hfi1_devdata *dd = context;
  1470. return dd->verbs_dev.n_kmem_wait;
  1471. }
  1472. static u64 access_sw_send_schedule(const struct cntr_entry *entry,
  1473. void *context, int vl, int mode, u64 data)
  1474. {
  1475. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1476. return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
  1477. mode, data);
  1478. }
  1479. /* Software counters for the error status bits within MISC_ERR_STATUS */
  1480. static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
  1481. void *context, int vl, int mode,
  1482. u64 data)
  1483. {
  1484. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1485. return dd->misc_err_status_cnt[12];
  1486. }
  1487. static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
  1488. void *context, int vl, int mode,
  1489. u64 data)
  1490. {
  1491. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1492. return dd->misc_err_status_cnt[11];
  1493. }
  1494. static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
  1495. void *context, int vl, int mode,
  1496. u64 data)
  1497. {
  1498. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1499. return dd->misc_err_status_cnt[10];
  1500. }
  1501. static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
  1502. void *context, int vl,
  1503. int mode, u64 data)
  1504. {
  1505. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1506. return dd->misc_err_status_cnt[9];
  1507. }
  1508. static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
  1509. void *context, int vl, int mode,
  1510. u64 data)
  1511. {
  1512. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1513. return dd->misc_err_status_cnt[8];
  1514. }
  1515. static u64 access_misc_efuse_read_bad_addr_err_cnt(
  1516. const struct cntr_entry *entry,
  1517. void *context, int vl, int mode, u64 data)
  1518. {
  1519. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1520. return dd->misc_err_status_cnt[7];
  1521. }
  1522. static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
  1523. void *context, int vl,
  1524. int mode, u64 data)
  1525. {
  1526. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1527. return dd->misc_err_status_cnt[6];
  1528. }
  1529. static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
  1530. void *context, int vl, int mode,
  1531. u64 data)
  1532. {
  1533. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1534. return dd->misc_err_status_cnt[5];
  1535. }
  1536. static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
  1537. void *context, int vl, int mode,
  1538. u64 data)
  1539. {
  1540. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1541. return dd->misc_err_status_cnt[4];
  1542. }
  1543. static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
  1544. void *context, int vl,
  1545. int mode, u64 data)
  1546. {
  1547. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1548. return dd->misc_err_status_cnt[3];
  1549. }
  1550. static u64 access_misc_csr_write_bad_addr_err_cnt(
  1551. const struct cntr_entry *entry,
  1552. void *context, int vl, int mode, u64 data)
  1553. {
  1554. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1555. return dd->misc_err_status_cnt[2];
  1556. }
  1557. static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
  1558. void *context, int vl,
  1559. int mode, u64 data)
  1560. {
  1561. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1562. return dd->misc_err_status_cnt[1];
  1563. }
  1564. static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
  1565. void *context, int vl, int mode,
  1566. u64 data)
  1567. {
  1568. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1569. return dd->misc_err_status_cnt[0];
  1570. }
  1571. /*
  1572. * Software counter for the aggregate of
  1573. * individual CceErrStatus counters
  1574. */
  1575. static u64 access_sw_cce_err_status_aggregated_cnt(
  1576. const struct cntr_entry *entry,
  1577. void *context, int vl, int mode, u64 data)
  1578. {
  1579. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1580. return dd->sw_cce_err_status_aggregate;
  1581. }
  1582. /*
  1583. * Software counters corresponding to each of the
  1584. * error status bits within CceErrStatus
  1585. */
  1586. static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
  1587. void *context, int vl, int mode,
  1588. u64 data)
  1589. {
  1590. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1591. return dd->cce_err_status_cnt[40];
  1592. }
  1593. static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
  1594. void *context, int vl, int mode,
  1595. u64 data)
  1596. {
  1597. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1598. return dd->cce_err_status_cnt[39];
  1599. }
  1600. static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
  1601. void *context, int vl, int mode,
  1602. u64 data)
  1603. {
  1604. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1605. return dd->cce_err_status_cnt[38];
  1606. }
  1607. static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
  1608. void *context, int vl, int mode,
  1609. u64 data)
  1610. {
  1611. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1612. return dd->cce_err_status_cnt[37];
  1613. }
  1614. static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
  1615. void *context, int vl, int mode,
  1616. u64 data)
  1617. {
  1618. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1619. return dd->cce_err_status_cnt[36];
  1620. }
  1621. static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
  1622. const struct cntr_entry *entry,
  1623. void *context, int vl, int mode, u64 data)
  1624. {
  1625. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1626. return dd->cce_err_status_cnt[35];
  1627. }
  1628. static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
  1629. const struct cntr_entry *entry,
  1630. void *context, int vl, int mode, u64 data)
  1631. {
  1632. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1633. return dd->cce_err_status_cnt[34];
  1634. }
  1635. static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
  1636. void *context, int vl,
  1637. int mode, u64 data)
  1638. {
  1639. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1640. return dd->cce_err_status_cnt[33];
  1641. }
  1642. static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
  1643. void *context, int vl, int mode,
  1644. u64 data)
  1645. {
  1646. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1647. return dd->cce_err_status_cnt[32];
  1648. }
  1649. static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
  1650. void *context, int vl, int mode, u64 data)
  1651. {
  1652. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1653. return dd->cce_err_status_cnt[31];
  1654. }
  1655. static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
  1656. void *context, int vl, int mode,
  1657. u64 data)
  1658. {
  1659. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1660. return dd->cce_err_status_cnt[30];
  1661. }
  1662. static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
  1663. void *context, int vl, int mode,
  1664. u64 data)
  1665. {
  1666. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1667. return dd->cce_err_status_cnt[29];
  1668. }
  1669. static u64 access_pcic_transmit_back_parity_err_cnt(
  1670. const struct cntr_entry *entry,
  1671. void *context, int vl, int mode, u64 data)
  1672. {
  1673. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1674. return dd->cce_err_status_cnt[28];
  1675. }
  1676. static u64 access_pcic_transmit_front_parity_err_cnt(
  1677. const struct cntr_entry *entry,
  1678. void *context, int vl, int mode, u64 data)
  1679. {
  1680. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1681. return dd->cce_err_status_cnt[27];
  1682. }
  1683. static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
  1684. void *context, int vl, int mode,
  1685. u64 data)
  1686. {
  1687. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1688. return dd->cce_err_status_cnt[26];
  1689. }
  1690. static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
  1691. void *context, int vl, int mode,
  1692. u64 data)
  1693. {
  1694. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1695. return dd->cce_err_status_cnt[25];
  1696. }
  1697. static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
  1698. void *context, int vl, int mode,
  1699. u64 data)
  1700. {
  1701. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1702. return dd->cce_err_status_cnt[24];
  1703. }
  1704. static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
  1705. void *context, int vl, int mode,
  1706. u64 data)
  1707. {
  1708. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1709. return dd->cce_err_status_cnt[23];
  1710. }
  1711. static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
  1712. void *context, int vl,
  1713. int mode, u64 data)
  1714. {
  1715. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1716. return dd->cce_err_status_cnt[22];
  1717. }
  1718. static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
  1719. void *context, int vl, int mode,
  1720. u64 data)
  1721. {
  1722. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1723. return dd->cce_err_status_cnt[21];
  1724. }
  1725. static u64 access_pcic_n_post_dat_q_parity_err_cnt(
  1726. const struct cntr_entry *entry,
  1727. void *context, int vl, int mode, u64 data)
  1728. {
  1729. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1730. return dd->cce_err_status_cnt[20];
  1731. }
  1732. static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
  1733. void *context, int vl,
  1734. int mode, u64 data)
  1735. {
  1736. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1737. return dd->cce_err_status_cnt[19];
  1738. }
  1739. static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
  1740. void *context, int vl, int mode,
  1741. u64 data)
  1742. {
  1743. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1744. return dd->cce_err_status_cnt[18];
  1745. }
  1746. static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
  1747. void *context, int vl, int mode,
  1748. u64 data)
  1749. {
  1750. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1751. return dd->cce_err_status_cnt[17];
  1752. }
  1753. static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
  1754. void *context, int vl, int mode,
  1755. u64 data)
  1756. {
  1757. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1758. return dd->cce_err_status_cnt[16];
  1759. }
  1760. static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
  1761. void *context, int vl, int mode,
  1762. u64 data)
  1763. {
  1764. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1765. return dd->cce_err_status_cnt[15];
  1766. }
  1767. static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
  1768. void *context, int vl,
  1769. int mode, u64 data)
  1770. {
  1771. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1772. return dd->cce_err_status_cnt[14];
  1773. }
  1774. static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
  1775. void *context, int vl, int mode,
  1776. u64 data)
  1777. {
  1778. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1779. return dd->cce_err_status_cnt[13];
  1780. }
  1781. static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
  1782. const struct cntr_entry *entry,
  1783. void *context, int vl, int mode, u64 data)
  1784. {
  1785. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1786. return dd->cce_err_status_cnt[12];
  1787. }
  1788. static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
  1789. const struct cntr_entry *entry,
  1790. void *context, int vl, int mode, u64 data)
  1791. {
  1792. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1793. return dd->cce_err_status_cnt[11];
  1794. }
  1795. static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
  1796. const struct cntr_entry *entry,
  1797. void *context, int vl, int mode, u64 data)
  1798. {
  1799. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1800. return dd->cce_err_status_cnt[10];
  1801. }
  1802. static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
  1803. const struct cntr_entry *entry,
  1804. void *context, int vl, int mode, u64 data)
  1805. {
  1806. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1807. return dd->cce_err_status_cnt[9];
  1808. }
  1809. static u64 access_cce_cli2_async_fifo_parity_err_cnt(
  1810. const struct cntr_entry *entry,
  1811. void *context, int vl, int mode, u64 data)
  1812. {
  1813. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1814. return dd->cce_err_status_cnt[8];
  1815. }
  1816. static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
  1817. void *context, int vl,
  1818. int mode, u64 data)
  1819. {
  1820. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1821. return dd->cce_err_status_cnt[7];
  1822. }
  1823. static u64 access_cce_cli0_async_fifo_parity_err_cnt(
  1824. const struct cntr_entry *entry,
  1825. void *context, int vl, int mode, u64 data)
  1826. {
  1827. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1828. return dd->cce_err_status_cnt[6];
  1829. }
  1830. static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
  1831. void *context, int vl, int mode,
  1832. u64 data)
  1833. {
  1834. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1835. return dd->cce_err_status_cnt[5];
  1836. }
  1837. static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
  1838. void *context, int vl, int mode,
  1839. u64 data)
  1840. {
  1841. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1842. return dd->cce_err_status_cnt[4];
  1843. }
  1844. static u64 access_cce_trgt_async_fifo_parity_err_cnt(
  1845. const struct cntr_entry *entry,
  1846. void *context, int vl, int mode, u64 data)
  1847. {
  1848. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1849. return dd->cce_err_status_cnt[3];
  1850. }
  1851. static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
  1852. void *context, int vl,
  1853. int mode, u64 data)
  1854. {
  1855. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1856. return dd->cce_err_status_cnt[2];
  1857. }
  1858. static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
  1859. void *context, int vl,
  1860. int mode, u64 data)
  1861. {
  1862. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1863. return dd->cce_err_status_cnt[1];
  1864. }
  1865. static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
  1866. void *context, int vl, int mode,
  1867. u64 data)
  1868. {
  1869. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1870. return dd->cce_err_status_cnt[0];
  1871. }
  1872. /*
  1873. * Software counters corresponding to each of the
  1874. * error status bits within RcvErrStatus
  1875. */
  1876. static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
  1877. void *context, int vl, int mode,
  1878. u64 data)
  1879. {
  1880. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1881. return dd->rcv_err_status_cnt[63];
  1882. }
  1883. static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
  1884. void *context, int vl,
  1885. int mode, u64 data)
  1886. {
  1887. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1888. return dd->rcv_err_status_cnt[62];
  1889. }
  1890. static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
  1891. void *context, int vl, int mode,
  1892. u64 data)
  1893. {
  1894. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1895. return dd->rcv_err_status_cnt[61];
  1896. }
  1897. static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
  1898. void *context, int vl, int mode,
  1899. u64 data)
  1900. {
  1901. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1902. return dd->rcv_err_status_cnt[60];
  1903. }
  1904. static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
  1905. void *context, int vl,
  1906. int mode, u64 data)
  1907. {
  1908. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1909. return dd->rcv_err_status_cnt[59];
  1910. }
  1911. static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
  1912. void *context, int vl,
  1913. int mode, u64 data)
  1914. {
  1915. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1916. return dd->rcv_err_status_cnt[58];
  1917. }
  1918. static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
  1919. void *context, int vl, int mode,
  1920. u64 data)
  1921. {
  1922. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1923. return dd->rcv_err_status_cnt[57];
  1924. }
  1925. static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
  1926. void *context, int vl, int mode,
  1927. u64 data)
  1928. {
  1929. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1930. return dd->rcv_err_status_cnt[56];
  1931. }
  1932. static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
  1933. void *context, int vl, int mode,
  1934. u64 data)
  1935. {
  1936. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1937. return dd->rcv_err_status_cnt[55];
  1938. }
  1939. static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
  1940. const struct cntr_entry *entry,
  1941. void *context, int vl, int mode, u64 data)
  1942. {
  1943. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1944. return dd->rcv_err_status_cnt[54];
  1945. }
  1946. static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
  1947. const struct cntr_entry *entry,
  1948. void *context, int vl, int mode, u64 data)
  1949. {
  1950. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1951. return dd->rcv_err_status_cnt[53];
  1952. }
  1953. static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
  1954. void *context, int vl,
  1955. int mode, u64 data)
  1956. {
  1957. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1958. return dd->rcv_err_status_cnt[52];
  1959. }
  1960. static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
  1961. void *context, int vl,
  1962. int mode, u64 data)
  1963. {
  1964. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1965. return dd->rcv_err_status_cnt[51];
  1966. }
  1967. static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
  1968. void *context, int vl,
  1969. int mode, u64 data)
  1970. {
  1971. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1972. return dd->rcv_err_status_cnt[50];
  1973. }
  1974. static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
  1975. void *context, int vl,
  1976. int mode, u64 data)
  1977. {
  1978. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1979. return dd->rcv_err_status_cnt[49];
  1980. }
  1981. static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
  1982. void *context, int vl,
  1983. int mode, u64 data)
  1984. {
  1985. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1986. return dd->rcv_err_status_cnt[48];
  1987. }
  1988. static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
  1989. void *context, int vl,
  1990. int mode, u64 data)
  1991. {
  1992. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1993. return dd->rcv_err_status_cnt[47];
  1994. }
  1995. static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
  1996. void *context, int vl, int mode,
  1997. u64 data)
  1998. {
  1999. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2000. return dd->rcv_err_status_cnt[46];
  2001. }
  2002. static u64 access_rx_hq_intr_csr_parity_err_cnt(
  2003. const struct cntr_entry *entry,
  2004. void *context, int vl, int mode, u64 data)
  2005. {
  2006. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2007. return dd->rcv_err_status_cnt[45];
  2008. }
  2009. static u64 access_rx_lookup_csr_parity_err_cnt(
  2010. const struct cntr_entry *entry,
  2011. void *context, int vl, int mode, u64 data)
  2012. {
  2013. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2014. return dd->rcv_err_status_cnt[44];
  2015. }
  2016. static u64 access_rx_lookup_rcv_array_cor_err_cnt(
  2017. const struct cntr_entry *entry,
  2018. void *context, int vl, int mode, u64 data)
  2019. {
  2020. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2021. return dd->rcv_err_status_cnt[43];
  2022. }
  2023. static u64 access_rx_lookup_rcv_array_unc_err_cnt(
  2024. const struct cntr_entry *entry,
  2025. void *context, int vl, int mode, u64 data)
  2026. {
  2027. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2028. return dd->rcv_err_status_cnt[42];
  2029. }
  2030. static u64 access_rx_lookup_des_part2_parity_err_cnt(
  2031. const struct cntr_entry *entry,
  2032. void *context, int vl, int mode, u64 data)
  2033. {
  2034. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2035. return dd->rcv_err_status_cnt[41];
  2036. }
  2037. static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
  2038. const struct cntr_entry *entry,
  2039. void *context, int vl, int mode, u64 data)
  2040. {
  2041. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2042. return dd->rcv_err_status_cnt[40];
  2043. }
  2044. static u64 access_rx_lookup_des_part1_unc_err_cnt(
  2045. const struct cntr_entry *entry,
  2046. void *context, int vl, int mode, u64 data)
  2047. {
  2048. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2049. return dd->rcv_err_status_cnt[39];
  2050. }
  2051. static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
  2052. const struct cntr_entry *entry,
  2053. void *context, int vl, int mode, u64 data)
  2054. {
  2055. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2056. return dd->rcv_err_status_cnt[38];
  2057. }
  2058. static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
  2059. const struct cntr_entry *entry,
  2060. void *context, int vl, int mode, u64 data)
  2061. {
  2062. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2063. return dd->rcv_err_status_cnt[37];
  2064. }
  2065. static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
  2066. const struct cntr_entry *entry,
  2067. void *context, int vl, int mode, u64 data)
  2068. {
  2069. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2070. return dd->rcv_err_status_cnt[36];
  2071. }
  2072. static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
  2073. const struct cntr_entry *entry,
  2074. void *context, int vl, int mode, u64 data)
  2075. {
  2076. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2077. return dd->rcv_err_status_cnt[35];
  2078. }
  2079. static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
  2080. const struct cntr_entry *entry,
  2081. void *context, int vl, int mode, u64 data)
  2082. {
  2083. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2084. return dd->rcv_err_status_cnt[34];
  2085. }
  2086. static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
  2087. const struct cntr_entry *entry,
  2088. void *context, int vl, int mode, u64 data)
  2089. {
  2090. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2091. return dd->rcv_err_status_cnt[33];
  2092. }
  2093. static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
  2094. void *context, int vl, int mode,
  2095. u64 data)
  2096. {
  2097. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2098. return dd->rcv_err_status_cnt[32];
  2099. }
  2100. static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
  2101. void *context, int vl, int mode,
  2102. u64 data)
  2103. {
  2104. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2105. return dd->rcv_err_status_cnt[31];
  2106. }
  2107. static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
  2108. void *context, int vl, int mode,
  2109. u64 data)
  2110. {
  2111. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2112. return dd->rcv_err_status_cnt[30];
  2113. }
  2114. static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
  2115. void *context, int vl, int mode,
  2116. u64 data)
  2117. {
  2118. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2119. return dd->rcv_err_status_cnt[29];
  2120. }
  2121. static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
  2122. void *context, int vl,
  2123. int mode, u64 data)
  2124. {
  2125. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2126. return dd->rcv_err_status_cnt[28];
  2127. }
  2128. static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
  2129. const struct cntr_entry *entry,
  2130. void *context, int vl, int mode, u64 data)
  2131. {
  2132. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2133. return dd->rcv_err_status_cnt[27];
  2134. }
  2135. static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
  2136. const struct cntr_entry *entry,
  2137. void *context, int vl, int mode, u64 data)
  2138. {
  2139. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2140. return dd->rcv_err_status_cnt[26];
  2141. }
  2142. static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
  2143. const struct cntr_entry *entry,
  2144. void *context, int vl, int mode, u64 data)
  2145. {
  2146. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2147. return dd->rcv_err_status_cnt[25];
  2148. }
  2149. static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
  2150. const struct cntr_entry *entry,
  2151. void *context, int vl, int mode, u64 data)
  2152. {
  2153. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2154. return dd->rcv_err_status_cnt[24];
  2155. }
  2156. static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
  2157. const struct cntr_entry *entry,
  2158. void *context, int vl, int mode, u64 data)
  2159. {
  2160. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2161. return dd->rcv_err_status_cnt[23];
  2162. }
  2163. static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
  2164. const struct cntr_entry *entry,
  2165. void *context, int vl, int mode, u64 data)
  2166. {
  2167. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2168. return dd->rcv_err_status_cnt[22];
  2169. }
  2170. static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
  2171. const struct cntr_entry *entry,
  2172. void *context, int vl, int mode, u64 data)
  2173. {
  2174. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2175. return dd->rcv_err_status_cnt[21];
  2176. }
  2177. static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
  2178. const struct cntr_entry *entry,
  2179. void *context, int vl, int mode, u64 data)
  2180. {
  2181. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2182. return dd->rcv_err_status_cnt[20];
  2183. }
  2184. static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
  2185. const struct cntr_entry *entry,
  2186. void *context, int vl, int mode, u64 data)
  2187. {
  2188. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2189. return dd->rcv_err_status_cnt[19];
  2190. }
  2191. static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
  2192. void *context, int vl,
  2193. int mode, u64 data)
  2194. {
  2195. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2196. return dd->rcv_err_status_cnt[18];
  2197. }
  2198. static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
  2199. void *context, int vl,
  2200. int mode, u64 data)
  2201. {
  2202. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2203. return dd->rcv_err_status_cnt[17];
  2204. }
  2205. static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
  2206. const struct cntr_entry *entry,
  2207. void *context, int vl, int mode, u64 data)
  2208. {
  2209. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2210. return dd->rcv_err_status_cnt[16];
  2211. }
  2212. static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
  2213. const struct cntr_entry *entry,
  2214. void *context, int vl, int mode, u64 data)
  2215. {
  2216. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2217. return dd->rcv_err_status_cnt[15];
  2218. }
  2219. static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
  2220. void *context, int vl,
  2221. int mode, u64 data)
  2222. {
  2223. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2224. return dd->rcv_err_status_cnt[14];
  2225. }
  2226. static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
  2227. void *context, int vl,
  2228. int mode, u64 data)
  2229. {
  2230. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2231. return dd->rcv_err_status_cnt[13];
  2232. }
  2233. static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
  2234. void *context, int vl, int mode,
  2235. u64 data)
  2236. {
  2237. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2238. return dd->rcv_err_status_cnt[12];
  2239. }
  2240. static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
  2241. void *context, int vl, int mode,
  2242. u64 data)
  2243. {
  2244. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2245. return dd->rcv_err_status_cnt[11];
  2246. }
  2247. static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
  2248. void *context, int vl, int mode,
  2249. u64 data)
  2250. {
  2251. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2252. return dd->rcv_err_status_cnt[10];
  2253. }
  2254. static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
  2255. void *context, int vl, int mode,
  2256. u64 data)
  2257. {
  2258. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2259. return dd->rcv_err_status_cnt[9];
  2260. }
  2261. static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
  2262. void *context, int vl, int mode,
  2263. u64 data)
  2264. {
  2265. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2266. return dd->rcv_err_status_cnt[8];
  2267. }
  2268. static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
  2269. const struct cntr_entry *entry,
  2270. void *context, int vl, int mode, u64 data)
  2271. {
  2272. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2273. return dd->rcv_err_status_cnt[7];
  2274. }
  2275. static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
  2276. const struct cntr_entry *entry,
  2277. void *context, int vl, int mode, u64 data)
  2278. {
  2279. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2280. return dd->rcv_err_status_cnt[6];
  2281. }
  2282. static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
  2283. void *context, int vl, int mode,
  2284. u64 data)
  2285. {
  2286. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2287. return dd->rcv_err_status_cnt[5];
  2288. }
  2289. static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
  2290. void *context, int vl, int mode,
  2291. u64 data)
  2292. {
  2293. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2294. return dd->rcv_err_status_cnt[4];
  2295. }
  2296. static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
  2297. void *context, int vl, int mode,
  2298. u64 data)
  2299. {
  2300. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2301. return dd->rcv_err_status_cnt[3];
  2302. }
  2303. static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
  2304. void *context, int vl, int mode,
  2305. u64 data)
  2306. {
  2307. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2308. return dd->rcv_err_status_cnt[2];
  2309. }
  2310. static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
  2311. void *context, int vl, int mode,
  2312. u64 data)
  2313. {
  2314. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2315. return dd->rcv_err_status_cnt[1];
  2316. }
  2317. static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
  2318. void *context, int vl, int mode,
  2319. u64 data)
  2320. {
  2321. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2322. return dd->rcv_err_status_cnt[0];
  2323. }
  2324. /*
  2325. * Software counters corresponding to each of the
  2326. * error status bits within SendPioErrStatus
  2327. */
  2328. static u64 access_pio_pec_sop_head_parity_err_cnt(
  2329. const struct cntr_entry *entry,
  2330. void *context, int vl, int mode, u64 data)
  2331. {
  2332. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2333. return dd->send_pio_err_status_cnt[35];
  2334. }
  2335. static u64 access_pio_pcc_sop_head_parity_err_cnt(
  2336. const struct cntr_entry *entry,
  2337. void *context, int vl, int mode, u64 data)
  2338. {
  2339. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2340. return dd->send_pio_err_status_cnt[34];
  2341. }
  2342. static u64 access_pio_last_returned_cnt_parity_err_cnt(
  2343. const struct cntr_entry *entry,
  2344. void *context, int vl, int mode, u64 data)
  2345. {
  2346. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2347. return dd->send_pio_err_status_cnt[33];
  2348. }
  2349. static u64 access_pio_current_free_cnt_parity_err_cnt(
  2350. const struct cntr_entry *entry,
  2351. void *context, int vl, int mode, u64 data)
  2352. {
  2353. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2354. return dd->send_pio_err_status_cnt[32];
  2355. }
  2356. static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
  2357. void *context, int vl, int mode,
  2358. u64 data)
  2359. {
  2360. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2361. return dd->send_pio_err_status_cnt[31];
  2362. }
  2363. static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
  2364. void *context, int vl, int mode,
  2365. u64 data)
  2366. {
  2367. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2368. return dd->send_pio_err_status_cnt[30];
  2369. }
  2370. static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
  2371. void *context, int vl, int mode,
  2372. u64 data)
  2373. {
  2374. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2375. return dd->send_pio_err_status_cnt[29];
  2376. }
  2377. static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
  2378. const struct cntr_entry *entry,
  2379. void *context, int vl, int mode, u64 data)
  2380. {
  2381. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2382. return dd->send_pio_err_status_cnt[28];
  2383. }
  2384. static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
  2385. void *context, int vl, int mode,
  2386. u64 data)
  2387. {
  2388. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2389. return dd->send_pio_err_status_cnt[27];
  2390. }
  2391. static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
  2392. void *context, int vl, int mode,
  2393. u64 data)
  2394. {
  2395. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2396. return dd->send_pio_err_status_cnt[26];
  2397. }
  2398. static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
  2399. void *context, int vl,
  2400. int mode, u64 data)
  2401. {
  2402. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2403. return dd->send_pio_err_status_cnt[25];
  2404. }
  2405. static u64 access_pio_block_qw_count_parity_err_cnt(
  2406. const struct cntr_entry *entry,
  2407. void *context, int vl, int mode, u64 data)
  2408. {
  2409. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2410. return dd->send_pio_err_status_cnt[24];
  2411. }
  2412. static u64 access_pio_write_qw_valid_parity_err_cnt(
  2413. const struct cntr_entry *entry,
  2414. void *context, int vl, int mode, u64 data)
  2415. {
  2416. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2417. return dd->send_pio_err_status_cnt[23];
  2418. }
  2419. static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
  2420. void *context, int vl, int mode,
  2421. u64 data)
  2422. {
  2423. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2424. return dd->send_pio_err_status_cnt[22];
  2425. }
  2426. static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
  2427. void *context, int vl,
  2428. int mode, u64 data)
  2429. {
  2430. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2431. return dd->send_pio_err_status_cnt[21];
  2432. }
  2433. static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
  2434. void *context, int vl,
  2435. int mode, u64 data)
  2436. {
  2437. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2438. return dd->send_pio_err_status_cnt[20];
  2439. }
  2440. static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
  2441. void *context, int vl,
  2442. int mode, u64 data)
  2443. {
  2444. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2445. return dd->send_pio_err_status_cnt[19];
  2446. }
  2447. static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
  2448. const struct cntr_entry *entry,
  2449. void *context, int vl, int mode, u64 data)
  2450. {
  2451. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2452. return dd->send_pio_err_status_cnt[18];
  2453. }
  2454. static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
  2455. void *context, int vl, int mode,
  2456. u64 data)
  2457. {
  2458. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2459. return dd->send_pio_err_status_cnt[17];
  2460. }
  2461. static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
  2462. void *context, int vl, int mode,
  2463. u64 data)
  2464. {
  2465. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2466. return dd->send_pio_err_status_cnt[16];
  2467. }
  2468. static u64 access_pio_credit_ret_fifo_parity_err_cnt(
  2469. const struct cntr_entry *entry,
  2470. void *context, int vl, int mode, u64 data)
  2471. {
  2472. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2473. return dd->send_pio_err_status_cnt[15];
  2474. }
  2475. static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
  2476. const struct cntr_entry *entry,
  2477. void *context, int vl, int mode, u64 data)
  2478. {
  2479. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2480. return dd->send_pio_err_status_cnt[14];
  2481. }
  2482. static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
  2483. const struct cntr_entry *entry,
  2484. void *context, int vl, int mode, u64 data)
  2485. {
  2486. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2487. return dd->send_pio_err_status_cnt[13];
  2488. }
  2489. static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
  2490. const struct cntr_entry *entry,
  2491. void *context, int vl, int mode, u64 data)
  2492. {
  2493. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2494. return dd->send_pio_err_status_cnt[12];
  2495. }
  2496. static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
  2497. const struct cntr_entry *entry,
  2498. void *context, int vl, int mode, u64 data)
  2499. {
  2500. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2501. return dd->send_pio_err_status_cnt[11];
  2502. }
  2503. static u64 access_pio_sm_pkt_reset_parity_err_cnt(
  2504. const struct cntr_entry *entry,
  2505. void *context, int vl, int mode, u64 data)
  2506. {
  2507. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2508. return dd->send_pio_err_status_cnt[10];
  2509. }
  2510. static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
  2511. const struct cntr_entry *entry,
  2512. void *context, int vl, int mode, u64 data)
  2513. {
  2514. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2515. return dd->send_pio_err_status_cnt[9];
  2516. }
  2517. static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
  2518. const struct cntr_entry *entry,
  2519. void *context, int vl, int mode, u64 data)
  2520. {
  2521. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2522. return dd->send_pio_err_status_cnt[8];
  2523. }
  2524. static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
  2525. const struct cntr_entry *entry,
  2526. void *context, int vl, int mode, u64 data)
  2527. {
  2528. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2529. return dd->send_pio_err_status_cnt[7];
  2530. }
  2531. static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
  2532. void *context, int vl, int mode,
  2533. u64 data)
  2534. {
  2535. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2536. return dd->send_pio_err_status_cnt[6];
  2537. }
  2538. static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
  2539. void *context, int vl, int mode,
  2540. u64 data)
  2541. {
  2542. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2543. return dd->send_pio_err_status_cnt[5];
  2544. }
  2545. static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
  2546. void *context, int vl, int mode,
  2547. u64 data)
  2548. {
  2549. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2550. return dd->send_pio_err_status_cnt[4];
  2551. }
  2552. static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
  2553. void *context, int vl, int mode,
  2554. u64 data)
  2555. {
  2556. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2557. return dd->send_pio_err_status_cnt[3];
  2558. }
  2559. static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
  2560. void *context, int vl, int mode,
  2561. u64 data)
  2562. {
  2563. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2564. return dd->send_pio_err_status_cnt[2];
  2565. }
  2566. static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
  2567. void *context, int vl,
  2568. int mode, u64 data)
  2569. {
  2570. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2571. return dd->send_pio_err_status_cnt[1];
  2572. }
  2573. static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
  2574. void *context, int vl, int mode,
  2575. u64 data)
  2576. {
  2577. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2578. return dd->send_pio_err_status_cnt[0];
  2579. }
  2580. /*
  2581. * Software counters corresponding to each of the
  2582. * error status bits within SendDmaErrStatus
  2583. */
  2584. static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
  2585. const struct cntr_entry *entry,
  2586. void *context, int vl, int mode, u64 data)
  2587. {
  2588. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2589. return dd->send_dma_err_status_cnt[3];
  2590. }
  2591. static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
  2592. const struct cntr_entry *entry,
  2593. void *context, int vl, int mode, u64 data)
  2594. {
  2595. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2596. return dd->send_dma_err_status_cnt[2];
  2597. }
  2598. static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
  2599. void *context, int vl, int mode,
  2600. u64 data)
  2601. {
  2602. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2603. return dd->send_dma_err_status_cnt[1];
  2604. }
  2605. static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
  2606. void *context, int vl, int mode,
  2607. u64 data)
  2608. {
  2609. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2610. return dd->send_dma_err_status_cnt[0];
  2611. }
  2612. /*
  2613. * Software counters corresponding to each of the
  2614. * error status bits within SendEgressErrStatus
  2615. */
  2616. static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
  2617. const struct cntr_entry *entry,
  2618. void *context, int vl, int mode, u64 data)
  2619. {
  2620. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2621. return dd->send_egress_err_status_cnt[63];
  2622. }
  2623. static u64 access_tx_read_sdma_memory_csr_err_cnt(
  2624. const struct cntr_entry *entry,
  2625. void *context, int vl, int mode, u64 data)
  2626. {
  2627. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2628. return dd->send_egress_err_status_cnt[62];
  2629. }
  2630. static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
  2631. void *context, int vl, int mode,
  2632. u64 data)
  2633. {
  2634. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2635. return dd->send_egress_err_status_cnt[61];
  2636. }
  2637. static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
  2638. void *context, int vl,
  2639. int mode, u64 data)
  2640. {
  2641. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2642. return dd->send_egress_err_status_cnt[60];
  2643. }
  2644. static u64 access_tx_read_sdma_memory_cor_err_cnt(
  2645. const struct cntr_entry *entry,
  2646. void *context, int vl, int mode, u64 data)
  2647. {
  2648. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2649. return dd->send_egress_err_status_cnt[59];
  2650. }
  2651. static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
  2652. void *context, int vl, int mode,
  2653. u64 data)
  2654. {
  2655. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2656. return dd->send_egress_err_status_cnt[58];
  2657. }
  2658. static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
  2659. void *context, int vl, int mode,
  2660. u64 data)
  2661. {
  2662. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2663. return dd->send_egress_err_status_cnt[57];
  2664. }
  2665. static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
  2666. void *context, int vl, int mode,
  2667. u64 data)
  2668. {
  2669. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2670. return dd->send_egress_err_status_cnt[56];
  2671. }
  2672. static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
  2673. void *context, int vl, int mode,
  2674. u64 data)
  2675. {
  2676. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2677. return dd->send_egress_err_status_cnt[55];
  2678. }
  2679. static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
  2680. void *context, int vl, int mode,
  2681. u64 data)
  2682. {
  2683. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2684. return dd->send_egress_err_status_cnt[54];
  2685. }
  2686. static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
  2687. void *context, int vl, int mode,
  2688. u64 data)
  2689. {
  2690. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2691. return dd->send_egress_err_status_cnt[53];
  2692. }
  2693. static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
  2694. void *context, int vl, int mode,
  2695. u64 data)
  2696. {
  2697. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2698. return dd->send_egress_err_status_cnt[52];
  2699. }
  2700. static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
  2701. void *context, int vl, int mode,
  2702. u64 data)
  2703. {
  2704. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2705. return dd->send_egress_err_status_cnt[51];
  2706. }
  2707. static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
  2708. void *context, int vl, int mode,
  2709. u64 data)
  2710. {
  2711. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2712. return dd->send_egress_err_status_cnt[50];
  2713. }
  2714. static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
  2715. void *context, int vl, int mode,
  2716. u64 data)
  2717. {
  2718. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2719. return dd->send_egress_err_status_cnt[49];
  2720. }
  2721. static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
  2722. void *context, int vl, int mode,
  2723. u64 data)
  2724. {
  2725. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2726. return dd->send_egress_err_status_cnt[48];
  2727. }
  2728. static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
  2729. void *context, int vl, int mode,
  2730. u64 data)
  2731. {
  2732. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2733. return dd->send_egress_err_status_cnt[47];
  2734. }
  2735. static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
  2736. void *context, int vl, int mode,
  2737. u64 data)
  2738. {
  2739. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2740. return dd->send_egress_err_status_cnt[46];
  2741. }
  2742. static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
  2743. void *context, int vl, int mode,
  2744. u64 data)
  2745. {
  2746. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2747. return dd->send_egress_err_status_cnt[45];
  2748. }
  2749. static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
  2750. void *context, int vl,
  2751. int mode, u64 data)
  2752. {
  2753. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2754. return dd->send_egress_err_status_cnt[44];
  2755. }
  2756. static u64 access_tx_read_sdma_memory_unc_err_cnt(
  2757. const struct cntr_entry *entry,
  2758. void *context, int vl, int mode, u64 data)
  2759. {
  2760. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2761. return dd->send_egress_err_status_cnt[43];
  2762. }
  2763. static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
  2764. void *context, int vl, int mode,
  2765. u64 data)
  2766. {
  2767. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2768. return dd->send_egress_err_status_cnt[42];
  2769. }
  2770. static u64 access_tx_credit_return_partiy_err_cnt(
  2771. const struct cntr_entry *entry,
  2772. void *context, int vl, int mode, u64 data)
  2773. {
  2774. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2775. return dd->send_egress_err_status_cnt[41];
  2776. }
  2777. static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
  2778. const struct cntr_entry *entry,
  2779. void *context, int vl, int mode, u64 data)
  2780. {
  2781. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2782. return dd->send_egress_err_status_cnt[40];
  2783. }
  2784. static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
  2785. const struct cntr_entry *entry,
  2786. void *context, int vl, int mode, u64 data)
  2787. {
  2788. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2789. return dd->send_egress_err_status_cnt[39];
  2790. }
  2791. static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
  2792. const struct cntr_entry *entry,
  2793. void *context, int vl, int mode, u64 data)
  2794. {
  2795. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2796. return dd->send_egress_err_status_cnt[38];
  2797. }
  2798. static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
  2799. const struct cntr_entry *entry,
  2800. void *context, int vl, int mode, u64 data)
  2801. {
  2802. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2803. return dd->send_egress_err_status_cnt[37];
  2804. }
  2805. static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
  2806. const struct cntr_entry *entry,
  2807. void *context, int vl, int mode, u64 data)
  2808. {
  2809. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2810. return dd->send_egress_err_status_cnt[36];
  2811. }
  2812. static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
  2813. const struct cntr_entry *entry,
  2814. void *context, int vl, int mode, u64 data)
  2815. {
  2816. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2817. return dd->send_egress_err_status_cnt[35];
  2818. }
  2819. static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
  2820. const struct cntr_entry *entry,
  2821. void *context, int vl, int mode, u64 data)
  2822. {
  2823. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2824. return dd->send_egress_err_status_cnt[34];
  2825. }
  2826. static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
  2827. const struct cntr_entry *entry,
  2828. void *context, int vl, int mode, u64 data)
  2829. {
  2830. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2831. return dd->send_egress_err_status_cnt[33];
  2832. }
  2833. static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
  2834. const struct cntr_entry *entry,
  2835. void *context, int vl, int mode, u64 data)
  2836. {
  2837. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2838. return dd->send_egress_err_status_cnt[32];
  2839. }
  2840. static u64 access_tx_sdma15_disallowed_packet_err_cnt(
  2841. const struct cntr_entry *entry,
  2842. void *context, int vl, int mode, u64 data)
  2843. {
  2844. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2845. return dd->send_egress_err_status_cnt[31];
  2846. }
  2847. static u64 access_tx_sdma14_disallowed_packet_err_cnt(
  2848. const struct cntr_entry *entry,
  2849. void *context, int vl, int mode, u64 data)
  2850. {
  2851. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2852. return dd->send_egress_err_status_cnt[30];
  2853. }
  2854. static u64 access_tx_sdma13_disallowed_packet_err_cnt(
  2855. const struct cntr_entry *entry,
  2856. void *context, int vl, int mode, u64 data)
  2857. {
  2858. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2859. return dd->send_egress_err_status_cnt[29];
  2860. }
  2861. static u64 access_tx_sdma12_disallowed_packet_err_cnt(
  2862. const struct cntr_entry *entry,
  2863. void *context, int vl, int mode, u64 data)
  2864. {
  2865. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2866. return dd->send_egress_err_status_cnt[28];
  2867. }
  2868. static u64 access_tx_sdma11_disallowed_packet_err_cnt(
  2869. const struct cntr_entry *entry,
  2870. void *context, int vl, int mode, u64 data)
  2871. {
  2872. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2873. return dd->send_egress_err_status_cnt[27];
  2874. }
  2875. static u64 access_tx_sdma10_disallowed_packet_err_cnt(
  2876. const struct cntr_entry *entry,
  2877. void *context, int vl, int mode, u64 data)
  2878. {
  2879. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2880. return dd->send_egress_err_status_cnt[26];
  2881. }
  2882. static u64 access_tx_sdma9_disallowed_packet_err_cnt(
  2883. const struct cntr_entry *entry,
  2884. void *context, int vl, int mode, u64 data)
  2885. {
  2886. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2887. return dd->send_egress_err_status_cnt[25];
  2888. }
  2889. static u64 access_tx_sdma8_disallowed_packet_err_cnt(
  2890. const struct cntr_entry *entry,
  2891. void *context, int vl, int mode, u64 data)
  2892. {
  2893. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2894. return dd->send_egress_err_status_cnt[24];
  2895. }
  2896. static u64 access_tx_sdma7_disallowed_packet_err_cnt(
  2897. const struct cntr_entry *entry,
  2898. void *context, int vl, int mode, u64 data)
  2899. {
  2900. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2901. return dd->send_egress_err_status_cnt[23];
  2902. }
  2903. static u64 access_tx_sdma6_disallowed_packet_err_cnt(
  2904. const struct cntr_entry *entry,
  2905. void *context, int vl, int mode, u64 data)
  2906. {
  2907. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2908. return dd->send_egress_err_status_cnt[22];
  2909. }
  2910. static u64 access_tx_sdma5_disallowed_packet_err_cnt(
  2911. const struct cntr_entry *entry,
  2912. void *context, int vl, int mode, u64 data)
  2913. {
  2914. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2915. return dd->send_egress_err_status_cnt[21];
  2916. }
  2917. static u64 access_tx_sdma4_disallowed_packet_err_cnt(
  2918. const struct cntr_entry *entry,
  2919. void *context, int vl, int mode, u64 data)
  2920. {
  2921. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2922. return dd->send_egress_err_status_cnt[20];
  2923. }
  2924. static u64 access_tx_sdma3_disallowed_packet_err_cnt(
  2925. const struct cntr_entry *entry,
  2926. void *context, int vl, int mode, u64 data)
  2927. {
  2928. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2929. return dd->send_egress_err_status_cnt[19];
  2930. }
  2931. static u64 access_tx_sdma2_disallowed_packet_err_cnt(
  2932. const struct cntr_entry *entry,
  2933. void *context, int vl, int mode, u64 data)
  2934. {
  2935. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2936. return dd->send_egress_err_status_cnt[18];
  2937. }
  2938. static u64 access_tx_sdma1_disallowed_packet_err_cnt(
  2939. const struct cntr_entry *entry,
  2940. void *context, int vl, int mode, u64 data)
  2941. {
  2942. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2943. return dd->send_egress_err_status_cnt[17];
  2944. }
  2945. static u64 access_tx_sdma0_disallowed_packet_err_cnt(
  2946. const struct cntr_entry *entry,
  2947. void *context, int vl, int mode, u64 data)
  2948. {
  2949. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2950. return dd->send_egress_err_status_cnt[16];
  2951. }
  2952. static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
  2953. void *context, int vl, int mode,
  2954. u64 data)
  2955. {
  2956. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2957. return dd->send_egress_err_status_cnt[15];
  2958. }
  2959. static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
  2960. void *context, int vl,
  2961. int mode, u64 data)
  2962. {
  2963. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2964. return dd->send_egress_err_status_cnt[14];
  2965. }
  2966. static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
  2967. void *context, int vl, int mode,
  2968. u64 data)
  2969. {
  2970. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2971. return dd->send_egress_err_status_cnt[13];
  2972. }
  2973. static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
  2974. void *context, int vl, int mode,
  2975. u64 data)
  2976. {
  2977. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2978. return dd->send_egress_err_status_cnt[12];
  2979. }
  2980. static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
  2981. const struct cntr_entry *entry,
  2982. void *context, int vl, int mode, u64 data)
  2983. {
  2984. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2985. return dd->send_egress_err_status_cnt[11];
  2986. }
  2987. static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
  2988. void *context, int vl, int mode,
  2989. u64 data)
  2990. {
  2991. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2992. return dd->send_egress_err_status_cnt[10];
  2993. }
  2994. static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
  2995. void *context, int vl, int mode,
  2996. u64 data)
  2997. {
  2998. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2999. return dd->send_egress_err_status_cnt[9];
  3000. }
  3001. static u64 access_tx_sdma_launch_intf_parity_err_cnt(
  3002. const struct cntr_entry *entry,
  3003. void *context, int vl, int mode, u64 data)
  3004. {
  3005. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3006. return dd->send_egress_err_status_cnt[8];
  3007. }
  3008. static u64 access_tx_pio_launch_intf_parity_err_cnt(
  3009. const struct cntr_entry *entry,
  3010. void *context, int vl, int mode, u64 data)
  3011. {
  3012. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3013. return dd->send_egress_err_status_cnt[7];
  3014. }
  3015. static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
  3016. void *context, int vl, int mode,
  3017. u64 data)
  3018. {
  3019. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3020. return dd->send_egress_err_status_cnt[6];
  3021. }
  3022. static u64 access_tx_incorrect_link_state_err_cnt(
  3023. const struct cntr_entry *entry,
  3024. void *context, int vl, int mode, u64 data)
  3025. {
  3026. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3027. return dd->send_egress_err_status_cnt[5];
  3028. }
  3029. static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
  3030. void *context, int vl, int mode,
  3031. u64 data)
  3032. {
  3033. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3034. return dd->send_egress_err_status_cnt[4];
  3035. }
  3036. static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
  3037. const struct cntr_entry *entry,
  3038. void *context, int vl, int mode, u64 data)
  3039. {
  3040. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3041. return dd->send_egress_err_status_cnt[3];
  3042. }
  3043. static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
  3044. void *context, int vl, int mode,
  3045. u64 data)
  3046. {
  3047. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3048. return dd->send_egress_err_status_cnt[2];
  3049. }
  3050. static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
  3051. const struct cntr_entry *entry,
  3052. void *context, int vl, int mode, u64 data)
  3053. {
  3054. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3055. return dd->send_egress_err_status_cnt[1];
  3056. }
  3057. static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
  3058. const struct cntr_entry *entry,
  3059. void *context, int vl, int mode, u64 data)
  3060. {
  3061. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3062. return dd->send_egress_err_status_cnt[0];
  3063. }
  3064. /*
  3065. * Software counters corresponding to each of the
  3066. * error status bits within SendErrStatus
  3067. */
  3068. static u64 access_send_csr_write_bad_addr_err_cnt(
  3069. const struct cntr_entry *entry,
  3070. void *context, int vl, int mode, u64 data)
  3071. {
  3072. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3073. return dd->send_err_status_cnt[2];
  3074. }
  3075. static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
  3076. void *context, int vl,
  3077. int mode, u64 data)
  3078. {
  3079. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3080. return dd->send_err_status_cnt[1];
  3081. }
  3082. static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
  3083. void *context, int vl, int mode,
  3084. u64 data)
  3085. {
  3086. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3087. return dd->send_err_status_cnt[0];
  3088. }
  3089. /*
  3090. * Software counters corresponding to each of the
  3091. * error status bits within SendCtxtErrStatus
  3092. */
  3093. static u64 access_pio_write_out_of_bounds_err_cnt(
  3094. const struct cntr_entry *entry,
  3095. void *context, int vl, int mode, u64 data)
  3096. {
  3097. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3098. return dd->sw_ctxt_err_status_cnt[4];
  3099. }
  3100. static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
  3101. void *context, int vl, int mode,
  3102. u64 data)
  3103. {
  3104. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3105. return dd->sw_ctxt_err_status_cnt[3];
  3106. }
  3107. static u64 access_pio_write_crosses_boundary_err_cnt(
  3108. const struct cntr_entry *entry,
  3109. void *context, int vl, int mode, u64 data)
  3110. {
  3111. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3112. return dd->sw_ctxt_err_status_cnt[2];
  3113. }
  3114. static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
  3115. void *context, int vl,
  3116. int mode, u64 data)
  3117. {
  3118. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3119. return dd->sw_ctxt_err_status_cnt[1];
  3120. }
  3121. static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
  3122. void *context, int vl, int mode,
  3123. u64 data)
  3124. {
  3125. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3126. return dd->sw_ctxt_err_status_cnt[0];
  3127. }
  3128. /*
  3129. * Software counters corresponding to each of the
  3130. * error status bits within SendDmaEngErrStatus
  3131. */
  3132. static u64 access_sdma_header_request_fifo_cor_err_cnt(
  3133. const struct cntr_entry *entry,
  3134. void *context, int vl, int mode, u64 data)
  3135. {
  3136. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3137. return dd->sw_send_dma_eng_err_status_cnt[23];
  3138. }
  3139. static u64 access_sdma_header_storage_cor_err_cnt(
  3140. const struct cntr_entry *entry,
  3141. void *context, int vl, int mode, u64 data)
  3142. {
  3143. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3144. return dd->sw_send_dma_eng_err_status_cnt[22];
  3145. }
  3146. static u64 access_sdma_packet_tracking_cor_err_cnt(
  3147. const struct cntr_entry *entry,
  3148. void *context, int vl, int mode, u64 data)
  3149. {
  3150. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3151. return dd->sw_send_dma_eng_err_status_cnt[21];
  3152. }
  3153. static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
  3154. void *context, int vl, int mode,
  3155. u64 data)
  3156. {
  3157. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3158. return dd->sw_send_dma_eng_err_status_cnt[20];
  3159. }
  3160. static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
  3161. void *context, int vl, int mode,
  3162. u64 data)
  3163. {
  3164. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3165. return dd->sw_send_dma_eng_err_status_cnt[19];
  3166. }
  3167. static u64 access_sdma_header_request_fifo_unc_err_cnt(
  3168. const struct cntr_entry *entry,
  3169. void *context, int vl, int mode, u64 data)
  3170. {
  3171. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3172. return dd->sw_send_dma_eng_err_status_cnt[18];
  3173. }
  3174. static u64 access_sdma_header_storage_unc_err_cnt(
  3175. const struct cntr_entry *entry,
  3176. void *context, int vl, int mode, u64 data)
  3177. {
  3178. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3179. return dd->sw_send_dma_eng_err_status_cnt[17];
  3180. }
  3181. static u64 access_sdma_packet_tracking_unc_err_cnt(
  3182. const struct cntr_entry *entry,
  3183. void *context, int vl, int mode, u64 data)
  3184. {
  3185. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3186. return dd->sw_send_dma_eng_err_status_cnt[16];
  3187. }
  3188. static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
  3189. void *context, int vl, int mode,
  3190. u64 data)
  3191. {
  3192. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3193. return dd->sw_send_dma_eng_err_status_cnt[15];
  3194. }
  3195. static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
  3196. void *context, int vl, int mode,
  3197. u64 data)
  3198. {
  3199. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3200. return dd->sw_send_dma_eng_err_status_cnt[14];
  3201. }
  3202. static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
  3203. void *context, int vl, int mode,
  3204. u64 data)
  3205. {
  3206. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3207. return dd->sw_send_dma_eng_err_status_cnt[13];
  3208. }
  3209. static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
  3210. void *context, int vl, int mode,
  3211. u64 data)
  3212. {
  3213. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3214. return dd->sw_send_dma_eng_err_status_cnt[12];
  3215. }
  3216. static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
  3217. void *context, int vl, int mode,
  3218. u64 data)
  3219. {
  3220. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3221. return dd->sw_send_dma_eng_err_status_cnt[11];
  3222. }
  3223. static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
  3224. void *context, int vl, int mode,
  3225. u64 data)
  3226. {
  3227. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3228. return dd->sw_send_dma_eng_err_status_cnt[10];
  3229. }
  3230. static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
  3231. void *context, int vl, int mode,
  3232. u64 data)
  3233. {
  3234. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3235. return dd->sw_send_dma_eng_err_status_cnt[9];
  3236. }
  3237. static u64 access_sdma_packet_desc_overflow_err_cnt(
  3238. const struct cntr_entry *entry,
  3239. void *context, int vl, int mode, u64 data)
  3240. {
  3241. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3242. return dd->sw_send_dma_eng_err_status_cnt[8];
  3243. }
  3244. static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
  3245. void *context, int vl,
  3246. int mode, u64 data)
  3247. {
  3248. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3249. return dd->sw_send_dma_eng_err_status_cnt[7];
  3250. }
  3251. static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
  3252. void *context, int vl, int mode, u64 data)
  3253. {
  3254. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3255. return dd->sw_send_dma_eng_err_status_cnt[6];
  3256. }
  3257. static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
  3258. void *context, int vl, int mode,
  3259. u64 data)
  3260. {
  3261. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3262. return dd->sw_send_dma_eng_err_status_cnt[5];
  3263. }
  3264. static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
  3265. void *context, int vl, int mode,
  3266. u64 data)
  3267. {
  3268. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3269. return dd->sw_send_dma_eng_err_status_cnt[4];
  3270. }
  3271. static u64 access_sdma_tail_out_of_bounds_err_cnt(
  3272. const struct cntr_entry *entry,
  3273. void *context, int vl, int mode, u64 data)
  3274. {
  3275. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3276. return dd->sw_send_dma_eng_err_status_cnt[3];
  3277. }
  3278. static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
  3279. void *context, int vl, int mode,
  3280. u64 data)
  3281. {
  3282. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3283. return dd->sw_send_dma_eng_err_status_cnt[2];
  3284. }
  3285. static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
  3286. void *context, int vl, int mode,
  3287. u64 data)
  3288. {
  3289. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3290. return dd->sw_send_dma_eng_err_status_cnt[1];
  3291. }
  3292. static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
  3293. void *context, int vl, int mode,
  3294. u64 data)
  3295. {
  3296. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3297. return dd->sw_send_dma_eng_err_status_cnt[0];
  3298. }
  3299. static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
  3300. void *context, int vl, int mode,
  3301. u64 data)
  3302. {
  3303. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3304. u64 val = 0;
  3305. u64 csr = entry->csr;
  3306. val = read_write_csr(dd, csr, mode, data);
  3307. if (mode == CNTR_MODE_R) {
  3308. val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
  3309. CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
  3310. } else if (mode == CNTR_MODE_W) {
  3311. dd->sw_rcv_bypass_packet_errors = 0;
  3312. } else {
  3313. dd_dev_err(dd, "Invalid cntr register access mode");
  3314. return 0;
  3315. }
  3316. return val;
  3317. }
  3318. #define def_access_sw_cpu(cntr) \
  3319. static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
  3320. void *context, int vl, int mode, u64 data) \
  3321. { \
  3322. struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
  3323. return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
  3324. ppd->ibport_data.rvp.cntr, vl, \
  3325. mode, data); \
  3326. }
  3327. def_access_sw_cpu(rc_acks);
  3328. def_access_sw_cpu(rc_qacks);
  3329. def_access_sw_cpu(rc_delayed_comp);
  3330. #define def_access_ibp_counter(cntr) \
  3331. static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
  3332. void *context, int vl, int mode, u64 data) \
  3333. { \
  3334. struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
  3335. \
  3336. if (vl != CNTR_INVALID_VL) \
  3337. return 0; \
  3338. \
  3339. return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
  3340. mode, data); \
  3341. }
  3342. def_access_ibp_counter(loop_pkts);
  3343. def_access_ibp_counter(rc_resends);
  3344. def_access_ibp_counter(rnr_naks);
  3345. def_access_ibp_counter(other_naks);
  3346. def_access_ibp_counter(rc_timeouts);
  3347. def_access_ibp_counter(pkt_drops);
  3348. def_access_ibp_counter(dmawait);
  3349. def_access_ibp_counter(rc_seqnak);
  3350. def_access_ibp_counter(rc_dupreq);
  3351. def_access_ibp_counter(rdma_seq);
  3352. def_access_ibp_counter(unaligned);
  3353. def_access_ibp_counter(seq_naks);
  3354. static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
  3355. [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
  3356. [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
  3357. CNTR_NORMAL),
  3358. [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
  3359. CNTR_NORMAL),
  3360. [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
  3361. RCV_TID_FLOW_GEN_MISMATCH_CNT,
  3362. CNTR_NORMAL),
  3363. [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
  3364. CNTR_NORMAL),
  3365. [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
  3366. RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
  3367. [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
  3368. CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
  3369. [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
  3370. CNTR_NORMAL),
  3371. [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
  3372. CNTR_NORMAL),
  3373. [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
  3374. CNTR_NORMAL),
  3375. [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
  3376. CNTR_NORMAL),
  3377. [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
  3378. CNTR_NORMAL),
  3379. [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
  3380. CNTR_NORMAL),
  3381. [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
  3382. CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
  3383. [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
  3384. CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
  3385. [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
  3386. CNTR_SYNTH),
  3387. [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
  3388. access_dc_rcv_err_cnt),
  3389. [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
  3390. CNTR_SYNTH),
  3391. [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
  3392. CNTR_SYNTH),
  3393. [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
  3394. CNTR_SYNTH),
  3395. [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
  3396. DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
  3397. [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
  3398. DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
  3399. CNTR_SYNTH),
  3400. [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
  3401. DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
  3402. [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
  3403. CNTR_SYNTH),
  3404. [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
  3405. CNTR_SYNTH),
  3406. [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
  3407. CNTR_SYNTH),
  3408. [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
  3409. CNTR_SYNTH),
  3410. [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
  3411. CNTR_SYNTH),
  3412. [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
  3413. CNTR_SYNTH),
  3414. [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
  3415. CNTR_SYNTH),
  3416. [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
  3417. CNTR_SYNTH | CNTR_VL),
  3418. [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
  3419. CNTR_SYNTH | CNTR_VL),
  3420. [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
  3421. [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
  3422. CNTR_SYNTH | CNTR_VL),
  3423. [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
  3424. [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
  3425. CNTR_SYNTH | CNTR_VL),
  3426. [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
  3427. CNTR_SYNTH),
  3428. [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
  3429. CNTR_SYNTH | CNTR_VL),
  3430. [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
  3431. CNTR_SYNTH),
  3432. [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
  3433. CNTR_SYNTH | CNTR_VL),
  3434. [C_DC_TOTAL_CRC] =
  3435. DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
  3436. CNTR_SYNTH),
  3437. [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
  3438. CNTR_SYNTH),
  3439. [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
  3440. CNTR_SYNTH),
  3441. [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
  3442. CNTR_SYNTH),
  3443. [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
  3444. CNTR_SYNTH),
  3445. [C_DC_CRC_MULT_LN] =
  3446. DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
  3447. CNTR_SYNTH),
  3448. [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
  3449. CNTR_SYNTH),
  3450. [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
  3451. CNTR_SYNTH),
  3452. [C_DC_SEQ_CRC_CNT] =
  3453. DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
  3454. CNTR_SYNTH),
  3455. [C_DC_ESC0_ONLY_CNT] =
  3456. DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
  3457. CNTR_SYNTH),
  3458. [C_DC_ESC0_PLUS1_CNT] =
  3459. DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
  3460. CNTR_SYNTH),
  3461. [C_DC_ESC0_PLUS2_CNT] =
  3462. DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
  3463. CNTR_SYNTH),
  3464. [C_DC_REINIT_FROM_PEER_CNT] =
  3465. DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
  3466. CNTR_SYNTH),
  3467. [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
  3468. CNTR_SYNTH),
  3469. [C_DC_MISC_FLG_CNT] =
  3470. DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
  3471. CNTR_SYNTH),
  3472. [C_DC_PRF_GOOD_LTP_CNT] =
  3473. DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
  3474. [C_DC_PRF_ACCEPTED_LTP_CNT] =
  3475. DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
  3476. CNTR_SYNTH),
  3477. [C_DC_PRF_RX_FLIT_CNT] =
  3478. DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
  3479. [C_DC_PRF_TX_FLIT_CNT] =
  3480. DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
  3481. [C_DC_PRF_CLK_CNTR] =
  3482. DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
  3483. [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
  3484. DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
  3485. [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
  3486. DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
  3487. CNTR_SYNTH),
  3488. [C_DC_PG_STS_TX_SBE_CNT] =
  3489. DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
  3490. [C_DC_PG_STS_TX_MBE_CNT] =
  3491. DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
  3492. CNTR_SYNTH),
  3493. [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
  3494. access_sw_cpu_intr),
  3495. [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
  3496. access_sw_cpu_rcv_limit),
  3497. [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
  3498. access_sw_vtx_wait),
  3499. [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
  3500. access_sw_pio_wait),
  3501. [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
  3502. access_sw_pio_drain),
  3503. [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
  3504. access_sw_kmem_wait),
  3505. [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
  3506. access_sw_send_schedule),
  3507. [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
  3508. SEND_DMA_DESC_FETCHED_CNT, 0,
  3509. CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
  3510. dev_access_u32_csr),
  3511. [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
  3512. CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
  3513. access_sde_int_cnt),
  3514. [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
  3515. CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
  3516. access_sde_err_cnt),
  3517. [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
  3518. CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
  3519. access_sde_idle_int_cnt),
  3520. [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
  3521. CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
  3522. access_sde_progress_int_cnt),
  3523. /* MISC_ERR_STATUS */
  3524. [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
  3525. CNTR_NORMAL,
  3526. access_misc_pll_lock_fail_err_cnt),
  3527. [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
  3528. CNTR_NORMAL,
  3529. access_misc_mbist_fail_err_cnt),
  3530. [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
  3531. CNTR_NORMAL,
  3532. access_misc_invalid_eep_cmd_err_cnt),
  3533. [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
  3534. CNTR_NORMAL,
  3535. access_misc_efuse_done_parity_err_cnt),
  3536. [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
  3537. CNTR_NORMAL,
  3538. access_misc_efuse_write_err_cnt),
  3539. [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
  3540. 0, CNTR_NORMAL,
  3541. access_misc_efuse_read_bad_addr_err_cnt),
  3542. [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
  3543. CNTR_NORMAL,
  3544. access_misc_efuse_csr_parity_err_cnt),
  3545. [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
  3546. CNTR_NORMAL,
  3547. access_misc_fw_auth_failed_err_cnt),
  3548. [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
  3549. CNTR_NORMAL,
  3550. access_misc_key_mismatch_err_cnt),
  3551. [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
  3552. CNTR_NORMAL,
  3553. access_misc_sbus_write_failed_err_cnt),
  3554. [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
  3555. CNTR_NORMAL,
  3556. access_misc_csr_write_bad_addr_err_cnt),
  3557. [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
  3558. CNTR_NORMAL,
  3559. access_misc_csr_read_bad_addr_err_cnt),
  3560. [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
  3561. CNTR_NORMAL,
  3562. access_misc_csr_parity_err_cnt),
  3563. /* CceErrStatus */
  3564. [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
  3565. CNTR_NORMAL,
  3566. access_sw_cce_err_status_aggregated_cnt),
  3567. [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
  3568. CNTR_NORMAL,
  3569. access_cce_msix_csr_parity_err_cnt),
  3570. [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
  3571. CNTR_NORMAL,
  3572. access_cce_int_map_unc_err_cnt),
  3573. [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
  3574. CNTR_NORMAL,
  3575. access_cce_int_map_cor_err_cnt),
  3576. [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
  3577. CNTR_NORMAL,
  3578. access_cce_msix_table_unc_err_cnt),
  3579. [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
  3580. CNTR_NORMAL,
  3581. access_cce_msix_table_cor_err_cnt),
  3582. [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
  3583. 0, CNTR_NORMAL,
  3584. access_cce_rxdma_conv_fifo_parity_err_cnt),
  3585. [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
  3586. 0, CNTR_NORMAL,
  3587. access_cce_rcpl_async_fifo_parity_err_cnt),
  3588. [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
  3589. CNTR_NORMAL,
  3590. access_cce_seg_write_bad_addr_err_cnt),
  3591. [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
  3592. CNTR_NORMAL,
  3593. access_cce_seg_read_bad_addr_err_cnt),
  3594. [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
  3595. CNTR_NORMAL,
  3596. access_la_triggered_cnt),
  3597. [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
  3598. CNTR_NORMAL,
  3599. access_cce_trgt_cpl_timeout_err_cnt),
  3600. [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
  3601. CNTR_NORMAL,
  3602. access_pcic_receive_parity_err_cnt),
  3603. [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
  3604. CNTR_NORMAL,
  3605. access_pcic_transmit_back_parity_err_cnt),
  3606. [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
  3607. 0, CNTR_NORMAL,
  3608. access_pcic_transmit_front_parity_err_cnt),
  3609. [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
  3610. CNTR_NORMAL,
  3611. access_pcic_cpl_dat_q_unc_err_cnt),
  3612. [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
  3613. CNTR_NORMAL,
  3614. access_pcic_cpl_hd_q_unc_err_cnt),
  3615. [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
  3616. CNTR_NORMAL,
  3617. access_pcic_post_dat_q_unc_err_cnt),
  3618. [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
  3619. CNTR_NORMAL,
  3620. access_pcic_post_hd_q_unc_err_cnt),
  3621. [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
  3622. CNTR_NORMAL,
  3623. access_pcic_retry_sot_mem_unc_err_cnt),
  3624. [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
  3625. CNTR_NORMAL,
  3626. access_pcic_retry_mem_unc_err),
  3627. [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
  3628. CNTR_NORMAL,
  3629. access_pcic_n_post_dat_q_parity_err_cnt),
  3630. [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
  3631. CNTR_NORMAL,
  3632. access_pcic_n_post_h_q_parity_err_cnt),
  3633. [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
  3634. CNTR_NORMAL,
  3635. access_pcic_cpl_dat_q_cor_err_cnt),
  3636. [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
  3637. CNTR_NORMAL,
  3638. access_pcic_cpl_hd_q_cor_err_cnt),
  3639. [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
  3640. CNTR_NORMAL,
  3641. access_pcic_post_dat_q_cor_err_cnt),
  3642. [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
  3643. CNTR_NORMAL,
  3644. access_pcic_post_hd_q_cor_err_cnt),
  3645. [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
  3646. CNTR_NORMAL,
  3647. access_pcic_retry_sot_mem_cor_err_cnt),
  3648. [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
  3649. CNTR_NORMAL,
  3650. access_pcic_retry_mem_cor_err_cnt),
  3651. [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
  3652. "CceCli1AsyncFifoDbgParityError", 0, 0,
  3653. CNTR_NORMAL,
  3654. access_cce_cli1_async_fifo_dbg_parity_err_cnt),
  3655. [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
  3656. "CceCli1AsyncFifoRxdmaParityError", 0, 0,
  3657. CNTR_NORMAL,
  3658. access_cce_cli1_async_fifo_rxdma_parity_err_cnt
  3659. ),
  3660. [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
  3661. "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
  3662. CNTR_NORMAL,
  3663. access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
  3664. [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
  3665. "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
  3666. CNTR_NORMAL,
  3667. access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
  3668. [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
  3669. 0, CNTR_NORMAL,
  3670. access_cce_cli2_async_fifo_parity_err_cnt),
  3671. [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
  3672. CNTR_NORMAL,
  3673. access_cce_csr_cfg_bus_parity_err_cnt),
  3674. [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
  3675. 0, CNTR_NORMAL,
  3676. access_cce_cli0_async_fifo_parity_err_cnt),
  3677. [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
  3678. CNTR_NORMAL,
  3679. access_cce_rspd_data_parity_err_cnt),
  3680. [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
  3681. CNTR_NORMAL,
  3682. access_cce_trgt_access_err_cnt),
  3683. [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
  3684. 0, CNTR_NORMAL,
  3685. access_cce_trgt_async_fifo_parity_err_cnt),
  3686. [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
  3687. CNTR_NORMAL,
  3688. access_cce_csr_write_bad_addr_err_cnt),
  3689. [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
  3690. CNTR_NORMAL,
  3691. access_cce_csr_read_bad_addr_err_cnt),
  3692. [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
  3693. CNTR_NORMAL,
  3694. access_ccs_csr_parity_err_cnt),
  3695. /* RcvErrStatus */
  3696. [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
  3697. CNTR_NORMAL,
  3698. access_rx_csr_parity_err_cnt),
  3699. [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
  3700. CNTR_NORMAL,
  3701. access_rx_csr_write_bad_addr_err_cnt),
  3702. [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
  3703. CNTR_NORMAL,
  3704. access_rx_csr_read_bad_addr_err_cnt),
  3705. [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
  3706. CNTR_NORMAL,
  3707. access_rx_dma_csr_unc_err_cnt),
  3708. [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
  3709. CNTR_NORMAL,
  3710. access_rx_dma_dq_fsm_encoding_err_cnt),
  3711. [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
  3712. CNTR_NORMAL,
  3713. access_rx_dma_eq_fsm_encoding_err_cnt),
  3714. [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
  3715. CNTR_NORMAL,
  3716. access_rx_dma_csr_parity_err_cnt),
  3717. [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
  3718. CNTR_NORMAL,
  3719. access_rx_rbuf_data_cor_err_cnt),
  3720. [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
  3721. CNTR_NORMAL,
  3722. access_rx_rbuf_data_unc_err_cnt),
  3723. [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
  3724. CNTR_NORMAL,
  3725. access_rx_dma_data_fifo_rd_cor_err_cnt),
  3726. [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
  3727. CNTR_NORMAL,
  3728. access_rx_dma_data_fifo_rd_unc_err_cnt),
  3729. [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
  3730. CNTR_NORMAL,
  3731. access_rx_dma_hdr_fifo_rd_cor_err_cnt),
  3732. [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
  3733. CNTR_NORMAL,
  3734. access_rx_dma_hdr_fifo_rd_unc_err_cnt),
  3735. [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
  3736. CNTR_NORMAL,
  3737. access_rx_rbuf_desc_part2_cor_err_cnt),
  3738. [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
  3739. CNTR_NORMAL,
  3740. access_rx_rbuf_desc_part2_unc_err_cnt),
  3741. [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
  3742. CNTR_NORMAL,
  3743. access_rx_rbuf_desc_part1_cor_err_cnt),
  3744. [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
  3745. CNTR_NORMAL,
  3746. access_rx_rbuf_desc_part1_unc_err_cnt),
  3747. [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
  3748. CNTR_NORMAL,
  3749. access_rx_hq_intr_fsm_err_cnt),
  3750. [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
  3751. CNTR_NORMAL,
  3752. access_rx_hq_intr_csr_parity_err_cnt),
  3753. [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
  3754. CNTR_NORMAL,
  3755. access_rx_lookup_csr_parity_err_cnt),
  3756. [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
  3757. CNTR_NORMAL,
  3758. access_rx_lookup_rcv_array_cor_err_cnt),
  3759. [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
  3760. CNTR_NORMAL,
  3761. access_rx_lookup_rcv_array_unc_err_cnt),
  3762. [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
  3763. 0, CNTR_NORMAL,
  3764. access_rx_lookup_des_part2_parity_err_cnt),
  3765. [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
  3766. 0, CNTR_NORMAL,
  3767. access_rx_lookup_des_part1_unc_cor_err_cnt),
  3768. [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
  3769. CNTR_NORMAL,
  3770. access_rx_lookup_des_part1_unc_err_cnt),
  3771. [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
  3772. CNTR_NORMAL,
  3773. access_rx_rbuf_next_free_buf_cor_err_cnt),
  3774. [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
  3775. CNTR_NORMAL,
  3776. access_rx_rbuf_next_free_buf_unc_err_cnt),
  3777. [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
  3778. "RxRbufFlInitWrAddrParityErr", 0, 0,
  3779. CNTR_NORMAL,
  3780. access_rbuf_fl_init_wr_addr_parity_err_cnt),
  3781. [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
  3782. 0, CNTR_NORMAL,
  3783. access_rx_rbuf_fl_initdone_parity_err_cnt),
  3784. [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
  3785. 0, CNTR_NORMAL,
  3786. access_rx_rbuf_fl_write_addr_parity_err_cnt),
  3787. [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
  3788. CNTR_NORMAL,
  3789. access_rx_rbuf_fl_rd_addr_parity_err_cnt),
  3790. [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
  3791. CNTR_NORMAL,
  3792. access_rx_rbuf_empty_err_cnt),
  3793. [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
  3794. CNTR_NORMAL,
  3795. access_rx_rbuf_full_err_cnt),
  3796. [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
  3797. CNTR_NORMAL,
  3798. access_rbuf_bad_lookup_err_cnt),
  3799. [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
  3800. CNTR_NORMAL,
  3801. access_rbuf_ctx_id_parity_err_cnt),
  3802. [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
  3803. CNTR_NORMAL,
  3804. access_rbuf_csr_qeopdw_parity_err_cnt),
  3805. [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
  3806. "RxRbufCsrQNumOfPktParityErr", 0, 0,
  3807. CNTR_NORMAL,
  3808. access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
  3809. [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
  3810. "RxRbufCsrQTlPtrParityErr", 0, 0,
  3811. CNTR_NORMAL,
  3812. access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
  3813. [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
  3814. 0, CNTR_NORMAL,
  3815. access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
  3816. [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
  3817. 0, CNTR_NORMAL,
  3818. access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
  3819. [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
  3820. 0, 0, CNTR_NORMAL,
  3821. access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
  3822. [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
  3823. 0, CNTR_NORMAL,
  3824. access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
  3825. [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
  3826. "RxRbufCsrQHeadBufNumParityErr", 0, 0,
  3827. CNTR_NORMAL,
  3828. access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
  3829. [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
  3830. 0, CNTR_NORMAL,
  3831. access_rx_rbuf_block_list_read_cor_err_cnt),
  3832. [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
  3833. 0, CNTR_NORMAL,
  3834. access_rx_rbuf_block_list_read_unc_err_cnt),
  3835. [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
  3836. CNTR_NORMAL,
  3837. access_rx_rbuf_lookup_des_cor_err_cnt),
  3838. [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
  3839. CNTR_NORMAL,
  3840. access_rx_rbuf_lookup_des_unc_err_cnt),
  3841. [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
  3842. "RxRbufLookupDesRegUncCorErr", 0, 0,
  3843. CNTR_NORMAL,
  3844. access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
  3845. [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
  3846. CNTR_NORMAL,
  3847. access_rx_rbuf_lookup_des_reg_unc_err_cnt),
  3848. [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
  3849. CNTR_NORMAL,
  3850. access_rx_rbuf_free_list_cor_err_cnt),
  3851. [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
  3852. CNTR_NORMAL,
  3853. access_rx_rbuf_free_list_unc_err_cnt),
  3854. [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
  3855. CNTR_NORMAL,
  3856. access_rx_rcv_fsm_encoding_err_cnt),
  3857. [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
  3858. CNTR_NORMAL,
  3859. access_rx_dma_flag_cor_err_cnt),
  3860. [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
  3861. CNTR_NORMAL,
  3862. access_rx_dma_flag_unc_err_cnt),
  3863. [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
  3864. CNTR_NORMAL,
  3865. access_rx_dc_sop_eop_parity_err_cnt),
  3866. [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
  3867. CNTR_NORMAL,
  3868. access_rx_rcv_csr_parity_err_cnt),
  3869. [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
  3870. CNTR_NORMAL,
  3871. access_rx_rcv_qp_map_table_cor_err_cnt),
  3872. [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
  3873. CNTR_NORMAL,
  3874. access_rx_rcv_qp_map_table_unc_err_cnt),
  3875. [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
  3876. CNTR_NORMAL,
  3877. access_rx_rcv_data_cor_err_cnt),
  3878. [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
  3879. CNTR_NORMAL,
  3880. access_rx_rcv_data_unc_err_cnt),
  3881. [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
  3882. CNTR_NORMAL,
  3883. access_rx_rcv_hdr_cor_err_cnt),
  3884. [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
  3885. CNTR_NORMAL,
  3886. access_rx_rcv_hdr_unc_err_cnt),
  3887. [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
  3888. CNTR_NORMAL,
  3889. access_rx_dc_intf_parity_err_cnt),
  3890. [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
  3891. CNTR_NORMAL,
  3892. access_rx_dma_csr_cor_err_cnt),
  3893. /* SendPioErrStatus */
  3894. [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
  3895. CNTR_NORMAL,
  3896. access_pio_pec_sop_head_parity_err_cnt),
  3897. [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
  3898. CNTR_NORMAL,
  3899. access_pio_pcc_sop_head_parity_err_cnt),
  3900. [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
  3901. 0, 0, CNTR_NORMAL,
  3902. access_pio_last_returned_cnt_parity_err_cnt),
  3903. [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
  3904. 0, CNTR_NORMAL,
  3905. access_pio_current_free_cnt_parity_err_cnt),
  3906. [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
  3907. CNTR_NORMAL,
  3908. access_pio_reserved_31_err_cnt),
  3909. [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
  3910. CNTR_NORMAL,
  3911. access_pio_reserved_30_err_cnt),
  3912. [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
  3913. CNTR_NORMAL,
  3914. access_pio_ppmc_sop_len_err_cnt),
  3915. [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
  3916. CNTR_NORMAL,
  3917. access_pio_ppmc_bqc_mem_parity_err_cnt),
  3918. [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
  3919. CNTR_NORMAL,
  3920. access_pio_vl_fifo_parity_err_cnt),
  3921. [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
  3922. CNTR_NORMAL,
  3923. access_pio_vlf_sop_parity_err_cnt),
  3924. [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
  3925. CNTR_NORMAL,
  3926. access_pio_vlf_v1_len_parity_err_cnt),
  3927. [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
  3928. CNTR_NORMAL,
  3929. access_pio_block_qw_count_parity_err_cnt),
  3930. [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
  3931. CNTR_NORMAL,
  3932. access_pio_write_qw_valid_parity_err_cnt),
  3933. [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
  3934. CNTR_NORMAL,
  3935. access_pio_state_machine_err_cnt),
  3936. [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
  3937. CNTR_NORMAL,
  3938. access_pio_write_data_parity_err_cnt),
  3939. [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
  3940. CNTR_NORMAL,
  3941. access_pio_host_addr_mem_cor_err_cnt),
  3942. [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
  3943. CNTR_NORMAL,
  3944. access_pio_host_addr_mem_unc_err_cnt),
  3945. [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
  3946. CNTR_NORMAL,
  3947. access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
  3948. [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
  3949. CNTR_NORMAL,
  3950. access_pio_init_sm_in_err_cnt),
  3951. [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
  3952. CNTR_NORMAL,
  3953. access_pio_ppmc_pbl_fifo_err_cnt),
  3954. [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
  3955. 0, CNTR_NORMAL,
  3956. access_pio_credit_ret_fifo_parity_err_cnt),
  3957. [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
  3958. CNTR_NORMAL,
  3959. access_pio_v1_len_mem_bank1_cor_err_cnt),
  3960. [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
  3961. CNTR_NORMAL,
  3962. access_pio_v1_len_mem_bank0_cor_err_cnt),
  3963. [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
  3964. CNTR_NORMAL,
  3965. access_pio_v1_len_mem_bank1_unc_err_cnt),
  3966. [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
  3967. CNTR_NORMAL,
  3968. access_pio_v1_len_mem_bank0_unc_err_cnt),
  3969. [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
  3970. CNTR_NORMAL,
  3971. access_pio_sm_pkt_reset_parity_err_cnt),
  3972. [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
  3973. CNTR_NORMAL,
  3974. access_pio_pkt_evict_fifo_parity_err_cnt),
  3975. [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
  3976. "PioSbrdctrlCrrelFifoParityErr", 0, 0,
  3977. CNTR_NORMAL,
  3978. access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
  3979. [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
  3980. CNTR_NORMAL,
  3981. access_pio_sbrdctl_crrel_parity_err_cnt),
  3982. [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
  3983. CNTR_NORMAL,
  3984. access_pio_pec_fifo_parity_err_cnt),
  3985. [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
  3986. CNTR_NORMAL,
  3987. access_pio_pcc_fifo_parity_err_cnt),
  3988. [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
  3989. CNTR_NORMAL,
  3990. access_pio_sb_mem_fifo1_err_cnt),
  3991. [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
  3992. CNTR_NORMAL,
  3993. access_pio_sb_mem_fifo0_err_cnt),
  3994. [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
  3995. CNTR_NORMAL,
  3996. access_pio_csr_parity_err_cnt),
  3997. [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
  3998. CNTR_NORMAL,
  3999. access_pio_write_addr_parity_err_cnt),
  4000. [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
  4001. CNTR_NORMAL,
  4002. access_pio_write_bad_ctxt_err_cnt),
  4003. /* SendDmaErrStatus */
  4004. [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
  4005. 0, CNTR_NORMAL,
  4006. access_sdma_pcie_req_tracking_cor_err_cnt),
  4007. [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
  4008. 0, CNTR_NORMAL,
  4009. access_sdma_pcie_req_tracking_unc_err_cnt),
  4010. [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
  4011. CNTR_NORMAL,
  4012. access_sdma_csr_parity_err_cnt),
  4013. [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
  4014. CNTR_NORMAL,
  4015. access_sdma_rpy_tag_err_cnt),
  4016. /* SendEgressErrStatus */
  4017. [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
  4018. CNTR_NORMAL,
  4019. access_tx_read_pio_memory_csr_unc_err_cnt),
  4020. [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
  4021. 0, CNTR_NORMAL,
  4022. access_tx_read_sdma_memory_csr_err_cnt),
  4023. [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
  4024. CNTR_NORMAL,
  4025. access_tx_egress_fifo_cor_err_cnt),
  4026. [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
  4027. CNTR_NORMAL,
  4028. access_tx_read_pio_memory_cor_err_cnt),
  4029. [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
  4030. CNTR_NORMAL,
  4031. access_tx_read_sdma_memory_cor_err_cnt),
  4032. [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
  4033. CNTR_NORMAL,
  4034. access_tx_sb_hdr_cor_err_cnt),
  4035. [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
  4036. CNTR_NORMAL,
  4037. access_tx_credit_overrun_err_cnt),
  4038. [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
  4039. CNTR_NORMAL,
  4040. access_tx_launch_fifo8_cor_err_cnt),
  4041. [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
  4042. CNTR_NORMAL,
  4043. access_tx_launch_fifo7_cor_err_cnt),
  4044. [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
  4045. CNTR_NORMAL,
  4046. access_tx_launch_fifo6_cor_err_cnt),
  4047. [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
  4048. CNTR_NORMAL,
  4049. access_tx_launch_fifo5_cor_err_cnt),
  4050. [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
  4051. CNTR_NORMAL,
  4052. access_tx_launch_fifo4_cor_err_cnt),
  4053. [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
  4054. CNTR_NORMAL,
  4055. access_tx_launch_fifo3_cor_err_cnt),
  4056. [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
  4057. CNTR_NORMAL,
  4058. access_tx_launch_fifo2_cor_err_cnt),
  4059. [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
  4060. CNTR_NORMAL,
  4061. access_tx_launch_fifo1_cor_err_cnt),
  4062. [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
  4063. CNTR_NORMAL,
  4064. access_tx_launch_fifo0_cor_err_cnt),
  4065. [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
  4066. CNTR_NORMAL,
  4067. access_tx_credit_return_vl_err_cnt),
  4068. [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
  4069. CNTR_NORMAL,
  4070. access_tx_hcrc_insertion_err_cnt),
  4071. [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
  4072. CNTR_NORMAL,
  4073. access_tx_egress_fifo_unc_err_cnt),
  4074. [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
  4075. CNTR_NORMAL,
  4076. access_tx_read_pio_memory_unc_err_cnt),
  4077. [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
  4078. CNTR_NORMAL,
  4079. access_tx_read_sdma_memory_unc_err_cnt),
  4080. [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
  4081. CNTR_NORMAL,
  4082. access_tx_sb_hdr_unc_err_cnt),
  4083. [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
  4084. CNTR_NORMAL,
  4085. access_tx_credit_return_partiy_err_cnt),
  4086. [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
  4087. 0, 0, CNTR_NORMAL,
  4088. access_tx_launch_fifo8_unc_or_parity_err_cnt),
  4089. [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
  4090. 0, 0, CNTR_NORMAL,
  4091. access_tx_launch_fifo7_unc_or_parity_err_cnt),
  4092. [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
  4093. 0, 0, CNTR_NORMAL,
  4094. access_tx_launch_fifo6_unc_or_parity_err_cnt),
  4095. [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
  4096. 0, 0, CNTR_NORMAL,
  4097. access_tx_launch_fifo5_unc_or_parity_err_cnt),
  4098. [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
  4099. 0, 0, CNTR_NORMAL,
  4100. access_tx_launch_fifo4_unc_or_parity_err_cnt),
  4101. [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
  4102. 0, 0, CNTR_NORMAL,
  4103. access_tx_launch_fifo3_unc_or_parity_err_cnt),
  4104. [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
  4105. 0, 0, CNTR_NORMAL,
  4106. access_tx_launch_fifo2_unc_or_parity_err_cnt),
  4107. [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
  4108. 0, 0, CNTR_NORMAL,
  4109. access_tx_launch_fifo1_unc_or_parity_err_cnt),
  4110. [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
  4111. 0, 0, CNTR_NORMAL,
  4112. access_tx_launch_fifo0_unc_or_parity_err_cnt),
  4113. [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
  4114. 0, 0, CNTR_NORMAL,
  4115. access_tx_sdma15_disallowed_packet_err_cnt),
  4116. [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
  4117. 0, 0, CNTR_NORMAL,
  4118. access_tx_sdma14_disallowed_packet_err_cnt),
  4119. [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
  4120. 0, 0, CNTR_NORMAL,
  4121. access_tx_sdma13_disallowed_packet_err_cnt),
  4122. [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
  4123. 0, 0, CNTR_NORMAL,
  4124. access_tx_sdma12_disallowed_packet_err_cnt),
  4125. [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
  4126. 0, 0, CNTR_NORMAL,
  4127. access_tx_sdma11_disallowed_packet_err_cnt),
  4128. [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
  4129. 0, 0, CNTR_NORMAL,
  4130. access_tx_sdma10_disallowed_packet_err_cnt),
  4131. [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
  4132. 0, 0, CNTR_NORMAL,
  4133. access_tx_sdma9_disallowed_packet_err_cnt),
  4134. [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
  4135. 0, 0, CNTR_NORMAL,
  4136. access_tx_sdma8_disallowed_packet_err_cnt),
  4137. [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
  4138. 0, 0, CNTR_NORMAL,
  4139. access_tx_sdma7_disallowed_packet_err_cnt),
  4140. [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
  4141. 0, 0, CNTR_NORMAL,
  4142. access_tx_sdma6_disallowed_packet_err_cnt),
  4143. [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
  4144. 0, 0, CNTR_NORMAL,
  4145. access_tx_sdma5_disallowed_packet_err_cnt),
  4146. [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
  4147. 0, 0, CNTR_NORMAL,
  4148. access_tx_sdma4_disallowed_packet_err_cnt),
  4149. [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
  4150. 0, 0, CNTR_NORMAL,
  4151. access_tx_sdma3_disallowed_packet_err_cnt),
  4152. [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
  4153. 0, 0, CNTR_NORMAL,
  4154. access_tx_sdma2_disallowed_packet_err_cnt),
  4155. [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
  4156. 0, 0, CNTR_NORMAL,
  4157. access_tx_sdma1_disallowed_packet_err_cnt),
  4158. [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
  4159. 0, 0, CNTR_NORMAL,
  4160. access_tx_sdma0_disallowed_packet_err_cnt),
  4161. [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
  4162. CNTR_NORMAL,
  4163. access_tx_config_parity_err_cnt),
  4164. [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
  4165. CNTR_NORMAL,
  4166. access_tx_sbrd_ctl_csr_parity_err_cnt),
  4167. [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
  4168. CNTR_NORMAL,
  4169. access_tx_launch_csr_parity_err_cnt),
  4170. [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
  4171. CNTR_NORMAL,
  4172. access_tx_illegal_vl_err_cnt),
  4173. [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
  4174. "TxSbrdCtlStateMachineParityErr", 0, 0,
  4175. CNTR_NORMAL,
  4176. access_tx_sbrd_ctl_state_machine_parity_err_cnt),
  4177. [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
  4178. CNTR_NORMAL,
  4179. access_egress_reserved_10_err_cnt),
  4180. [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
  4181. CNTR_NORMAL,
  4182. access_egress_reserved_9_err_cnt),
  4183. [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
  4184. 0, 0, CNTR_NORMAL,
  4185. access_tx_sdma_launch_intf_parity_err_cnt),
  4186. [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
  4187. CNTR_NORMAL,
  4188. access_tx_pio_launch_intf_parity_err_cnt),
  4189. [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
  4190. CNTR_NORMAL,
  4191. access_egress_reserved_6_err_cnt),
  4192. [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
  4193. CNTR_NORMAL,
  4194. access_tx_incorrect_link_state_err_cnt),
  4195. [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
  4196. CNTR_NORMAL,
  4197. access_tx_linkdown_err_cnt),
  4198. [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
  4199. "EgressFifoUnderrunOrParityErr", 0, 0,
  4200. CNTR_NORMAL,
  4201. access_tx_egress_fifi_underrun_or_parity_err_cnt),
  4202. [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
  4203. CNTR_NORMAL,
  4204. access_egress_reserved_2_err_cnt),
  4205. [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
  4206. CNTR_NORMAL,
  4207. access_tx_pkt_integrity_mem_unc_err_cnt),
  4208. [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
  4209. CNTR_NORMAL,
  4210. access_tx_pkt_integrity_mem_cor_err_cnt),
  4211. /* SendErrStatus */
  4212. [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
  4213. CNTR_NORMAL,
  4214. access_send_csr_write_bad_addr_err_cnt),
  4215. [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
  4216. CNTR_NORMAL,
  4217. access_send_csr_read_bad_addr_err_cnt),
  4218. [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
  4219. CNTR_NORMAL,
  4220. access_send_csr_parity_cnt),
  4221. /* SendCtxtErrStatus */
  4222. [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
  4223. CNTR_NORMAL,
  4224. access_pio_write_out_of_bounds_err_cnt),
  4225. [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
  4226. CNTR_NORMAL,
  4227. access_pio_write_overflow_err_cnt),
  4228. [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
  4229. 0, 0, CNTR_NORMAL,
  4230. access_pio_write_crosses_boundary_err_cnt),
  4231. [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
  4232. CNTR_NORMAL,
  4233. access_pio_disallowed_packet_err_cnt),
  4234. [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
  4235. CNTR_NORMAL,
  4236. access_pio_inconsistent_sop_err_cnt),
  4237. /* SendDmaEngErrStatus */
  4238. [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
  4239. 0, 0, CNTR_NORMAL,
  4240. access_sdma_header_request_fifo_cor_err_cnt),
  4241. [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
  4242. CNTR_NORMAL,
  4243. access_sdma_header_storage_cor_err_cnt),
  4244. [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
  4245. CNTR_NORMAL,
  4246. access_sdma_packet_tracking_cor_err_cnt),
  4247. [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
  4248. CNTR_NORMAL,
  4249. access_sdma_assembly_cor_err_cnt),
  4250. [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
  4251. CNTR_NORMAL,
  4252. access_sdma_desc_table_cor_err_cnt),
  4253. [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
  4254. 0, 0, CNTR_NORMAL,
  4255. access_sdma_header_request_fifo_unc_err_cnt),
  4256. [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
  4257. CNTR_NORMAL,
  4258. access_sdma_header_storage_unc_err_cnt),
  4259. [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
  4260. CNTR_NORMAL,
  4261. access_sdma_packet_tracking_unc_err_cnt),
  4262. [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
  4263. CNTR_NORMAL,
  4264. access_sdma_assembly_unc_err_cnt),
  4265. [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
  4266. CNTR_NORMAL,
  4267. access_sdma_desc_table_unc_err_cnt),
  4268. [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
  4269. CNTR_NORMAL,
  4270. access_sdma_timeout_err_cnt),
  4271. [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
  4272. CNTR_NORMAL,
  4273. access_sdma_header_length_err_cnt),
  4274. [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
  4275. CNTR_NORMAL,
  4276. access_sdma_header_address_err_cnt),
  4277. [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
  4278. CNTR_NORMAL,
  4279. access_sdma_header_select_err_cnt),
  4280. [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
  4281. CNTR_NORMAL,
  4282. access_sdma_reserved_9_err_cnt),
  4283. [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
  4284. CNTR_NORMAL,
  4285. access_sdma_packet_desc_overflow_err_cnt),
  4286. [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
  4287. CNTR_NORMAL,
  4288. access_sdma_length_mismatch_err_cnt),
  4289. [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
  4290. CNTR_NORMAL,
  4291. access_sdma_halt_err_cnt),
  4292. [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
  4293. CNTR_NORMAL,
  4294. access_sdma_mem_read_err_cnt),
  4295. [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
  4296. CNTR_NORMAL,
  4297. access_sdma_first_desc_err_cnt),
  4298. [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
  4299. CNTR_NORMAL,
  4300. access_sdma_tail_out_of_bounds_err_cnt),
  4301. [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
  4302. CNTR_NORMAL,
  4303. access_sdma_too_long_err_cnt),
  4304. [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
  4305. CNTR_NORMAL,
  4306. access_sdma_gen_mismatch_err_cnt),
  4307. [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
  4308. CNTR_NORMAL,
  4309. access_sdma_wrong_dw_err_cnt),
  4310. };
  4311. static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
  4312. [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
  4313. CNTR_NORMAL),
  4314. [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
  4315. CNTR_NORMAL),
  4316. [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
  4317. CNTR_NORMAL),
  4318. [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
  4319. CNTR_NORMAL),
  4320. [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
  4321. CNTR_NORMAL),
  4322. [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
  4323. CNTR_NORMAL),
  4324. [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
  4325. CNTR_NORMAL),
  4326. [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
  4327. [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
  4328. [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
  4329. [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
  4330. CNTR_SYNTH | CNTR_VL),
  4331. [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
  4332. CNTR_SYNTH | CNTR_VL),
  4333. [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
  4334. CNTR_SYNTH | CNTR_VL),
  4335. [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
  4336. [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
  4337. [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
  4338. access_sw_link_dn_cnt),
  4339. [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
  4340. access_sw_link_up_cnt),
  4341. [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
  4342. access_sw_unknown_frame_cnt),
  4343. [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
  4344. access_sw_xmit_discards),
  4345. [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
  4346. CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
  4347. access_sw_xmit_discards),
  4348. [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
  4349. access_xmit_constraint_errs),
  4350. [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
  4351. access_rcv_constraint_errs),
  4352. [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
  4353. [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
  4354. [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
  4355. [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
  4356. [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
  4357. [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
  4358. [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
  4359. [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
  4360. [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
  4361. [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
  4362. [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
  4363. [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
  4364. [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
  4365. access_sw_cpu_rc_acks),
  4366. [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
  4367. access_sw_cpu_rc_qacks),
  4368. [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
  4369. access_sw_cpu_rc_delayed_comp),
  4370. [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
  4371. [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
  4372. [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
  4373. [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
  4374. [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
  4375. [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
  4376. [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
  4377. [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
  4378. [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
  4379. [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
  4380. [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
  4381. [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
  4382. [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
  4383. [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
  4384. [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
  4385. [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
  4386. [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
  4387. [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
  4388. [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
  4389. [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
  4390. [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
  4391. [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
  4392. [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
  4393. [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
  4394. [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
  4395. [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
  4396. [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
  4397. [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
  4398. [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
  4399. [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
  4400. [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
  4401. [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
  4402. [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
  4403. [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
  4404. [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
  4405. [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
  4406. [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
  4407. [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
  4408. [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
  4409. [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
  4410. [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
  4411. [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
  4412. [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
  4413. [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
  4414. [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
  4415. [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
  4416. [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
  4417. [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
  4418. [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
  4419. [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
  4420. [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
  4421. [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
  4422. [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
  4423. [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
  4424. [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
  4425. [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
  4426. [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
  4427. [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
  4428. [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
  4429. [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
  4430. [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
  4431. [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
  4432. [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
  4433. [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
  4434. [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
  4435. [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
  4436. [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
  4437. [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
  4438. [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
  4439. [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
  4440. [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
  4441. [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
  4442. [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
  4443. [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
  4444. [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
  4445. [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
  4446. [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
  4447. [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
  4448. [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
  4449. [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
  4450. };
  4451. /* ======================================================================== */
  4452. /* return true if this is chip revision revision a */
  4453. int is_ax(struct hfi1_devdata *dd)
  4454. {
  4455. u8 chip_rev_minor =
  4456. dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
  4457. & CCE_REVISION_CHIP_REV_MINOR_MASK;
  4458. return (chip_rev_minor & 0xf0) == 0;
  4459. }
  4460. /* return true if this is chip revision revision b */
  4461. int is_bx(struct hfi1_devdata *dd)
  4462. {
  4463. u8 chip_rev_minor =
  4464. dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
  4465. & CCE_REVISION_CHIP_REV_MINOR_MASK;
  4466. return (chip_rev_minor & 0xF0) == 0x10;
  4467. }
  4468. /*
  4469. * Append string s to buffer buf. Arguments curp and len are the current
  4470. * position and remaining length, respectively.
  4471. *
  4472. * return 0 on success, 1 on out of room
  4473. */
  4474. static int append_str(char *buf, char **curp, int *lenp, const char *s)
  4475. {
  4476. char *p = *curp;
  4477. int len = *lenp;
  4478. int result = 0; /* success */
  4479. char c;
  4480. /* add a comma, if first in the buffer */
  4481. if (p != buf) {
  4482. if (len == 0) {
  4483. result = 1; /* out of room */
  4484. goto done;
  4485. }
  4486. *p++ = ',';
  4487. len--;
  4488. }
  4489. /* copy the string */
  4490. while ((c = *s++) != 0) {
  4491. if (len == 0) {
  4492. result = 1; /* out of room */
  4493. goto done;
  4494. }
  4495. *p++ = c;
  4496. len--;
  4497. }
  4498. done:
  4499. /* write return values */
  4500. *curp = p;
  4501. *lenp = len;
  4502. return result;
  4503. }
  4504. /*
  4505. * Using the given flag table, print a comma separated string into
  4506. * the buffer. End in '*' if the buffer is too short.
  4507. */
  4508. static char *flag_string(char *buf, int buf_len, u64 flags,
  4509. struct flag_table *table, int table_size)
  4510. {
  4511. char extra[32];
  4512. char *p = buf;
  4513. int len = buf_len;
  4514. int no_room = 0;
  4515. int i;
  4516. /* make sure there is at least 2 so we can form "*" */
  4517. if (len < 2)
  4518. return "";
  4519. len--; /* leave room for a nul */
  4520. for (i = 0; i < table_size; i++) {
  4521. if (flags & table[i].flag) {
  4522. no_room = append_str(buf, &p, &len, table[i].str);
  4523. if (no_room)
  4524. break;
  4525. flags &= ~table[i].flag;
  4526. }
  4527. }
  4528. /* any undocumented bits left? */
  4529. if (!no_room && flags) {
  4530. snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
  4531. no_room = append_str(buf, &p, &len, extra);
  4532. }
  4533. /* add * if ran out of room */
  4534. if (no_room) {
  4535. /* may need to back up to add space for a '*' */
  4536. if (len == 0)
  4537. --p;
  4538. *p++ = '*';
  4539. }
  4540. /* add final nul - space already allocated above */
  4541. *p = 0;
  4542. return buf;
  4543. }
  4544. /* first 8 CCE error interrupt source names */
  4545. static const char * const cce_misc_names[] = {
  4546. "CceErrInt", /* 0 */
  4547. "RxeErrInt", /* 1 */
  4548. "MiscErrInt", /* 2 */
  4549. "Reserved3", /* 3 */
  4550. "PioErrInt", /* 4 */
  4551. "SDmaErrInt", /* 5 */
  4552. "EgressErrInt", /* 6 */
  4553. "TxeErrInt" /* 7 */
  4554. };
  4555. /*
  4556. * Return the miscellaneous error interrupt name.
  4557. */
  4558. static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
  4559. {
  4560. if (source < ARRAY_SIZE(cce_misc_names))
  4561. strncpy(buf, cce_misc_names[source], bsize);
  4562. else
  4563. snprintf(buf, bsize, "Reserved%u",
  4564. source + IS_GENERAL_ERR_START);
  4565. return buf;
  4566. }
  4567. /*
  4568. * Return the SDMA engine error interrupt name.
  4569. */
  4570. static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
  4571. {
  4572. snprintf(buf, bsize, "SDmaEngErrInt%u", source);
  4573. return buf;
  4574. }
  4575. /*
  4576. * Return the send context error interrupt name.
  4577. */
  4578. static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
  4579. {
  4580. snprintf(buf, bsize, "SendCtxtErrInt%u", source);
  4581. return buf;
  4582. }
  4583. static const char * const various_names[] = {
  4584. "PbcInt",
  4585. "GpioAssertInt",
  4586. "Qsfp1Int",
  4587. "Qsfp2Int",
  4588. "TCritInt"
  4589. };
  4590. /*
  4591. * Return the various interrupt name.
  4592. */
  4593. static char *is_various_name(char *buf, size_t bsize, unsigned int source)
  4594. {
  4595. if (source < ARRAY_SIZE(various_names))
  4596. strncpy(buf, various_names[source], bsize);
  4597. else
  4598. snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
  4599. return buf;
  4600. }
  4601. /*
  4602. * Return the DC interrupt name.
  4603. */
  4604. static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
  4605. {
  4606. static const char * const dc_int_names[] = {
  4607. "common",
  4608. "lcb",
  4609. "8051",
  4610. "lbm" /* local block merge */
  4611. };
  4612. if (source < ARRAY_SIZE(dc_int_names))
  4613. snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
  4614. else
  4615. snprintf(buf, bsize, "DCInt%u", source);
  4616. return buf;
  4617. }
  4618. static const char * const sdma_int_names[] = {
  4619. "SDmaInt",
  4620. "SdmaIdleInt",
  4621. "SdmaProgressInt",
  4622. };
  4623. /*
  4624. * Return the SDMA engine interrupt name.
  4625. */
  4626. static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
  4627. {
  4628. /* what interrupt */
  4629. unsigned int what = source / TXE_NUM_SDMA_ENGINES;
  4630. /* which engine */
  4631. unsigned int which = source % TXE_NUM_SDMA_ENGINES;
  4632. if (likely(what < 3))
  4633. snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
  4634. else
  4635. snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
  4636. return buf;
  4637. }
  4638. /*
  4639. * Return the receive available interrupt name.
  4640. */
  4641. static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
  4642. {
  4643. snprintf(buf, bsize, "RcvAvailInt%u", source);
  4644. return buf;
  4645. }
  4646. /*
  4647. * Return the receive urgent interrupt name.
  4648. */
  4649. static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
  4650. {
  4651. snprintf(buf, bsize, "RcvUrgentInt%u", source);
  4652. return buf;
  4653. }
  4654. /*
  4655. * Return the send credit interrupt name.
  4656. */
  4657. static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
  4658. {
  4659. snprintf(buf, bsize, "SendCreditInt%u", source);
  4660. return buf;
  4661. }
  4662. /*
  4663. * Return the reserved interrupt name.
  4664. */
  4665. static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
  4666. {
  4667. snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
  4668. return buf;
  4669. }
  4670. static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
  4671. {
  4672. return flag_string(buf, buf_len, flags,
  4673. cce_err_status_flags,
  4674. ARRAY_SIZE(cce_err_status_flags));
  4675. }
  4676. static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
  4677. {
  4678. return flag_string(buf, buf_len, flags,
  4679. rxe_err_status_flags,
  4680. ARRAY_SIZE(rxe_err_status_flags));
  4681. }
  4682. static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
  4683. {
  4684. return flag_string(buf, buf_len, flags, misc_err_status_flags,
  4685. ARRAY_SIZE(misc_err_status_flags));
  4686. }
  4687. static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
  4688. {
  4689. return flag_string(buf, buf_len, flags,
  4690. pio_err_status_flags,
  4691. ARRAY_SIZE(pio_err_status_flags));
  4692. }
  4693. static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
  4694. {
  4695. return flag_string(buf, buf_len, flags,
  4696. sdma_err_status_flags,
  4697. ARRAY_SIZE(sdma_err_status_flags));
  4698. }
  4699. static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
  4700. {
  4701. return flag_string(buf, buf_len, flags,
  4702. egress_err_status_flags,
  4703. ARRAY_SIZE(egress_err_status_flags));
  4704. }
  4705. static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
  4706. {
  4707. return flag_string(buf, buf_len, flags,
  4708. egress_err_info_flags,
  4709. ARRAY_SIZE(egress_err_info_flags));
  4710. }
  4711. static char *send_err_status_string(char *buf, int buf_len, u64 flags)
  4712. {
  4713. return flag_string(buf, buf_len, flags,
  4714. send_err_status_flags,
  4715. ARRAY_SIZE(send_err_status_flags));
  4716. }
  4717. static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  4718. {
  4719. char buf[96];
  4720. int i = 0;
  4721. /*
  4722. * For most these errors, there is nothing that can be done except
  4723. * report or record it.
  4724. */
  4725. dd_dev_info(dd, "CCE Error: %s\n",
  4726. cce_err_status_string(buf, sizeof(buf), reg));
  4727. if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
  4728. is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
  4729. /* this error requires a manual drop into SPC freeze mode */
  4730. /* then a fix up */
  4731. start_freeze_handling(dd->pport, FREEZE_SELF);
  4732. }
  4733. for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
  4734. if (reg & (1ull << i)) {
  4735. incr_cntr64(&dd->cce_err_status_cnt[i]);
  4736. /* maintain a counter over all cce_err_status errors */
  4737. incr_cntr64(&dd->sw_cce_err_status_aggregate);
  4738. }
  4739. }
  4740. }
  4741. /*
  4742. * Check counters for receive errors that do not have an interrupt
  4743. * associated with them.
  4744. */
  4745. #define RCVERR_CHECK_TIME 10
  4746. static void update_rcverr_timer(unsigned long opaque)
  4747. {
  4748. struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
  4749. struct hfi1_pportdata *ppd = dd->pport;
  4750. u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
  4751. if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
  4752. ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
  4753. dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
  4754. set_link_down_reason(
  4755. ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
  4756. OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
  4757. queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
  4758. }
  4759. dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
  4760. mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
  4761. }
  4762. static int init_rcverr(struct hfi1_devdata *dd)
  4763. {
  4764. setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
  4765. /* Assume the hardware counter has been reset */
  4766. dd->rcv_ovfl_cnt = 0;
  4767. return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
  4768. }
  4769. static void free_rcverr(struct hfi1_devdata *dd)
  4770. {
  4771. if (dd->rcverr_timer.data)
  4772. del_timer_sync(&dd->rcverr_timer);
  4773. dd->rcverr_timer.data = 0;
  4774. }
  4775. static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  4776. {
  4777. char buf[96];
  4778. int i = 0;
  4779. dd_dev_info(dd, "Receive Error: %s\n",
  4780. rxe_err_status_string(buf, sizeof(buf), reg));
  4781. if (reg & ALL_RXE_FREEZE_ERR) {
  4782. int flags = 0;
  4783. /*
  4784. * Freeze mode recovery is disabled for the errors
  4785. * in RXE_FREEZE_ABORT_MASK
  4786. */
  4787. if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
  4788. flags = FREEZE_ABORT;
  4789. start_freeze_handling(dd->pport, flags);
  4790. }
  4791. for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
  4792. if (reg & (1ull << i))
  4793. incr_cntr64(&dd->rcv_err_status_cnt[i]);
  4794. }
  4795. }
  4796. static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  4797. {
  4798. char buf[96];
  4799. int i = 0;
  4800. dd_dev_info(dd, "Misc Error: %s",
  4801. misc_err_status_string(buf, sizeof(buf), reg));
  4802. for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
  4803. if (reg & (1ull << i))
  4804. incr_cntr64(&dd->misc_err_status_cnt[i]);
  4805. }
  4806. }
  4807. static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  4808. {
  4809. char buf[96];
  4810. int i = 0;
  4811. dd_dev_info(dd, "PIO Error: %s\n",
  4812. pio_err_status_string(buf, sizeof(buf), reg));
  4813. if (reg & ALL_PIO_FREEZE_ERR)
  4814. start_freeze_handling(dd->pport, 0);
  4815. for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
  4816. if (reg & (1ull << i))
  4817. incr_cntr64(&dd->send_pio_err_status_cnt[i]);
  4818. }
  4819. }
  4820. static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  4821. {
  4822. char buf[96];
  4823. int i = 0;
  4824. dd_dev_info(dd, "SDMA Error: %s\n",
  4825. sdma_err_status_string(buf, sizeof(buf), reg));
  4826. if (reg & ALL_SDMA_FREEZE_ERR)
  4827. start_freeze_handling(dd->pport, 0);
  4828. for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
  4829. if (reg & (1ull << i))
  4830. incr_cntr64(&dd->send_dma_err_status_cnt[i]);
  4831. }
  4832. }
  4833. static inline void __count_port_discards(struct hfi1_pportdata *ppd)
  4834. {
  4835. incr_cntr64(&ppd->port_xmit_discards);
  4836. }
  4837. static void count_port_inactive(struct hfi1_devdata *dd)
  4838. {
  4839. __count_port_discards(dd->pport);
  4840. }
  4841. /*
  4842. * We have had a "disallowed packet" error during egress. Determine the
  4843. * integrity check which failed, and update relevant error counter, etc.
  4844. *
  4845. * Note that the SEND_EGRESS_ERR_INFO register has only a single
  4846. * bit of state per integrity check, and so we can miss the reason for an
  4847. * egress error if more than one packet fails the same integrity check
  4848. * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
  4849. */
  4850. static void handle_send_egress_err_info(struct hfi1_devdata *dd,
  4851. int vl)
  4852. {
  4853. struct hfi1_pportdata *ppd = dd->pport;
  4854. u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
  4855. u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
  4856. char buf[96];
  4857. /* clear down all observed info as quickly as possible after read */
  4858. write_csr(dd, SEND_EGRESS_ERR_INFO, info);
  4859. dd_dev_info(dd,
  4860. "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
  4861. info, egress_err_info_string(buf, sizeof(buf), info), src);
  4862. /* Eventually add other counters for each bit */
  4863. if (info & PORT_DISCARD_EGRESS_ERRS) {
  4864. int weight, i;
  4865. /*
  4866. * Count all applicable bits as individual errors and
  4867. * attribute them to the packet that triggered this handler.
  4868. * This may not be completely accurate due to limitations
  4869. * on the available hardware error information. There is
  4870. * a single information register and any number of error
  4871. * packets may have occurred and contributed to it before
  4872. * this routine is called. This means that:
  4873. * a) If multiple packets with the same error occur before
  4874. * this routine is called, earlier packets are missed.
  4875. * There is only a single bit for each error type.
  4876. * b) Errors may not be attributed to the correct VL.
  4877. * The driver is attributing all bits in the info register
  4878. * to the packet that triggered this call, but bits
  4879. * could be an accumulation of different packets with
  4880. * different VLs.
  4881. * c) A single error packet may have multiple counts attached
  4882. * to it. There is no way for the driver to know if
  4883. * multiple bits set in the info register are due to a
  4884. * single packet or multiple packets. The driver assumes
  4885. * multiple packets.
  4886. */
  4887. weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
  4888. for (i = 0; i < weight; i++) {
  4889. __count_port_discards(ppd);
  4890. if (vl >= 0 && vl < TXE_NUM_DATA_VL)
  4891. incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
  4892. else if (vl == 15)
  4893. incr_cntr64(&ppd->port_xmit_discards_vl
  4894. [C_VL_15]);
  4895. }
  4896. }
  4897. }
  4898. /*
  4899. * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
  4900. * register. Does it represent a 'port inactive' error?
  4901. */
  4902. static inline int port_inactive_err(u64 posn)
  4903. {
  4904. return (posn >= SEES(TX_LINKDOWN) &&
  4905. posn <= SEES(TX_INCORRECT_LINK_STATE));
  4906. }
  4907. /*
  4908. * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
  4909. * register. Does it represent a 'disallowed packet' error?
  4910. */
  4911. static inline int disallowed_pkt_err(int posn)
  4912. {
  4913. return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
  4914. posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
  4915. }
  4916. /*
  4917. * Input value is a bit position of one of the SDMA engine disallowed
  4918. * packet errors. Return which engine. Use of this must be guarded by
  4919. * disallowed_pkt_err().
  4920. */
  4921. static inline int disallowed_pkt_engine(int posn)
  4922. {
  4923. return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
  4924. }
  4925. /*
  4926. * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
  4927. * be done.
  4928. */
  4929. static int engine_to_vl(struct hfi1_devdata *dd, int engine)
  4930. {
  4931. struct sdma_vl_map *m;
  4932. int vl;
  4933. /* range check */
  4934. if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
  4935. return -1;
  4936. rcu_read_lock();
  4937. m = rcu_dereference(dd->sdma_map);
  4938. vl = m->engine_to_vl[engine];
  4939. rcu_read_unlock();
  4940. return vl;
  4941. }
  4942. /*
  4943. * Translate the send context (sofware index) into a VL. Return -1 if the
  4944. * translation cannot be done.
  4945. */
  4946. static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
  4947. {
  4948. struct send_context_info *sci;
  4949. struct send_context *sc;
  4950. int i;
  4951. sci = &dd->send_contexts[sw_index];
  4952. /* there is no information for user (PSM) and ack contexts */
  4953. if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
  4954. return -1;
  4955. sc = sci->sc;
  4956. if (!sc)
  4957. return -1;
  4958. if (dd->vld[15].sc == sc)
  4959. return 15;
  4960. for (i = 0; i < num_vls; i++)
  4961. if (dd->vld[i].sc == sc)
  4962. return i;
  4963. return -1;
  4964. }
  4965. static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  4966. {
  4967. u64 reg_copy = reg, handled = 0;
  4968. char buf[96];
  4969. int i = 0;
  4970. if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
  4971. start_freeze_handling(dd->pport, 0);
  4972. else if (is_ax(dd) &&
  4973. (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
  4974. (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
  4975. start_freeze_handling(dd->pport, 0);
  4976. while (reg_copy) {
  4977. int posn = fls64(reg_copy);
  4978. /* fls64() returns a 1-based offset, we want it zero based */
  4979. int shift = posn - 1;
  4980. u64 mask = 1ULL << shift;
  4981. if (port_inactive_err(shift)) {
  4982. count_port_inactive(dd);
  4983. handled |= mask;
  4984. } else if (disallowed_pkt_err(shift)) {
  4985. int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
  4986. handle_send_egress_err_info(dd, vl);
  4987. handled |= mask;
  4988. }
  4989. reg_copy &= ~mask;
  4990. }
  4991. reg &= ~handled;
  4992. if (reg)
  4993. dd_dev_info(dd, "Egress Error: %s\n",
  4994. egress_err_status_string(buf, sizeof(buf), reg));
  4995. for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
  4996. if (reg & (1ull << i))
  4997. incr_cntr64(&dd->send_egress_err_status_cnt[i]);
  4998. }
  4999. }
  5000. static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  5001. {
  5002. char buf[96];
  5003. int i = 0;
  5004. dd_dev_info(dd, "Send Error: %s\n",
  5005. send_err_status_string(buf, sizeof(buf), reg));
  5006. for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
  5007. if (reg & (1ull << i))
  5008. incr_cntr64(&dd->send_err_status_cnt[i]);
  5009. }
  5010. }
  5011. /*
  5012. * The maximum number of times the error clear down will loop before
  5013. * blocking a repeating error. This value is arbitrary.
  5014. */
  5015. #define MAX_CLEAR_COUNT 20
  5016. /*
  5017. * Clear and handle an error register. All error interrupts are funneled
  5018. * through here to have a central location to correctly handle single-
  5019. * or multi-shot errors.
  5020. *
  5021. * For non per-context registers, call this routine with a context value
  5022. * of 0 so the per-context offset is zero.
  5023. *
  5024. * If the handler loops too many times, assume that something is wrong
  5025. * and can't be fixed, so mask the error bits.
  5026. */
  5027. static void interrupt_clear_down(struct hfi1_devdata *dd,
  5028. u32 context,
  5029. const struct err_reg_info *eri)
  5030. {
  5031. u64 reg;
  5032. u32 count;
  5033. /* read in a loop until no more errors are seen */
  5034. count = 0;
  5035. while (1) {
  5036. reg = read_kctxt_csr(dd, context, eri->status);
  5037. if (reg == 0)
  5038. break;
  5039. write_kctxt_csr(dd, context, eri->clear, reg);
  5040. if (likely(eri->handler))
  5041. eri->handler(dd, context, reg);
  5042. count++;
  5043. if (count > MAX_CLEAR_COUNT) {
  5044. u64 mask;
  5045. dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
  5046. eri->desc, reg);
  5047. /*
  5048. * Read-modify-write so any other masked bits
  5049. * remain masked.
  5050. */
  5051. mask = read_kctxt_csr(dd, context, eri->mask);
  5052. mask &= ~reg;
  5053. write_kctxt_csr(dd, context, eri->mask, mask);
  5054. break;
  5055. }
  5056. }
  5057. }
  5058. /*
  5059. * CCE block "misc" interrupt. Source is < 16.
  5060. */
  5061. static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
  5062. {
  5063. const struct err_reg_info *eri = &misc_errs[source];
  5064. if (eri->handler) {
  5065. interrupt_clear_down(dd, 0, eri);
  5066. } else {
  5067. dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
  5068. source);
  5069. }
  5070. }
  5071. static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
  5072. {
  5073. return flag_string(buf, buf_len, flags,
  5074. sc_err_status_flags,
  5075. ARRAY_SIZE(sc_err_status_flags));
  5076. }
  5077. /*
  5078. * Send context error interrupt. Source (hw_context) is < 160.
  5079. *
  5080. * All send context errors cause the send context to halt. The normal
  5081. * clear-down mechanism cannot be used because we cannot clear the
  5082. * error bits until several other long-running items are done first.
  5083. * This is OK because with the context halted, nothing else is going
  5084. * to happen on it anyway.
  5085. */
  5086. static void is_sendctxt_err_int(struct hfi1_devdata *dd,
  5087. unsigned int hw_context)
  5088. {
  5089. struct send_context_info *sci;
  5090. struct send_context *sc;
  5091. char flags[96];
  5092. u64 status;
  5093. u32 sw_index;
  5094. int i = 0;
  5095. sw_index = dd->hw_to_sw[hw_context];
  5096. if (sw_index >= dd->num_send_contexts) {
  5097. dd_dev_err(dd,
  5098. "out of range sw index %u for send context %u\n",
  5099. sw_index, hw_context);
  5100. return;
  5101. }
  5102. sci = &dd->send_contexts[sw_index];
  5103. sc = sci->sc;
  5104. if (!sc) {
  5105. dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
  5106. sw_index, hw_context);
  5107. return;
  5108. }
  5109. /* tell the software that a halt has begun */
  5110. sc_stop(sc, SCF_HALTED);
  5111. status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
  5112. dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
  5113. send_context_err_status_string(flags, sizeof(flags),
  5114. status));
  5115. if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
  5116. handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
  5117. /*
  5118. * Automatically restart halted kernel contexts out of interrupt
  5119. * context. User contexts must ask the driver to restart the context.
  5120. */
  5121. if (sc->type != SC_USER)
  5122. queue_work(dd->pport->hfi1_wq, &sc->halt_work);
  5123. /*
  5124. * Update the counters for the corresponding status bits.
  5125. * Note that these particular counters are aggregated over all
  5126. * 160 contexts.
  5127. */
  5128. for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
  5129. if (status & (1ull << i))
  5130. incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
  5131. }
  5132. }
  5133. static void handle_sdma_eng_err(struct hfi1_devdata *dd,
  5134. unsigned int source, u64 status)
  5135. {
  5136. struct sdma_engine *sde;
  5137. int i = 0;
  5138. sde = &dd->per_sdma[source];
  5139. #ifdef CONFIG_SDMA_VERBOSITY
  5140. dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
  5141. slashstrip(__FILE__), __LINE__, __func__);
  5142. dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
  5143. sde->this_idx, source, (unsigned long long)status);
  5144. #endif
  5145. sde->err_cnt++;
  5146. sdma_engine_error(sde, status);
  5147. /*
  5148. * Update the counters for the corresponding status bits.
  5149. * Note that these particular counters are aggregated over
  5150. * all 16 DMA engines.
  5151. */
  5152. for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
  5153. if (status & (1ull << i))
  5154. incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
  5155. }
  5156. }
  5157. /*
  5158. * CCE block SDMA error interrupt. Source is < 16.
  5159. */
  5160. static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
  5161. {
  5162. #ifdef CONFIG_SDMA_VERBOSITY
  5163. struct sdma_engine *sde = &dd->per_sdma[source];
  5164. dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
  5165. slashstrip(__FILE__), __LINE__, __func__);
  5166. dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
  5167. source);
  5168. sdma_dumpstate(sde);
  5169. #endif
  5170. interrupt_clear_down(dd, source, &sdma_eng_err);
  5171. }
  5172. /*
  5173. * CCE block "various" interrupt. Source is < 8.
  5174. */
  5175. static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
  5176. {
  5177. const struct err_reg_info *eri = &various_err[source];
  5178. /*
  5179. * TCritInt cannot go through interrupt_clear_down()
  5180. * because it is not a second tier interrupt. The handler
  5181. * should be called directly.
  5182. */
  5183. if (source == TCRIT_INT_SOURCE)
  5184. handle_temp_err(dd);
  5185. else if (eri->handler)
  5186. interrupt_clear_down(dd, 0, eri);
  5187. else
  5188. dd_dev_info(dd,
  5189. "%s: Unimplemented/reserved interrupt %d\n",
  5190. __func__, source);
  5191. }
  5192. static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
  5193. {
  5194. /* src_ctx is always zero */
  5195. struct hfi1_pportdata *ppd = dd->pport;
  5196. unsigned long flags;
  5197. u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
  5198. if (reg & QSFP_HFI0_MODPRST_N) {
  5199. if (!qsfp_mod_present(ppd)) {
  5200. dd_dev_info(dd, "%s: QSFP module removed\n",
  5201. __func__);
  5202. ppd->driver_link_ready = 0;
  5203. /*
  5204. * Cable removed, reset all our information about the
  5205. * cache and cable capabilities
  5206. */
  5207. spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
  5208. /*
  5209. * We don't set cache_refresh_required here as we expect
  5210. * an interrupt when a cable is inserted
  5211. */
  5212. ppd->qsfp_info.cache_valid = 0;
  5213. ppd->qsfp_info.reset_needed = 0;
  5214. ppd->qsfp_info.limiting_active = 0;
  5215. spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
  5216. flags);
  5217. /* Invert the ModPresent pin now to detect plug-in */
  5218. write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
  5219. ASIC_QSFP1_INVERT, qsfp_int_mgmt);
  5220. if ((ppd->offline_disabled_reason >
  5221. HFI1_ODR_MASK(
  5222. OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
  5223. (ppd->offline_disabled_reason ==
  5224. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
  5225. ppd->offline_disabled_reason =
  5226. HFI1_ODR_MASK(
  5227. OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
  5228. if (ppd->host_link_state == HLS_DN_POLL) {
  5229. /*
  5230. * The link is still in POLL. This means
  5231. * that the normal link down processing
  5232. * will not happen. We have to do it here
  5233. * before turning the DC off.
  5234. */
  5235. queue_work(ppd->hfi1_wq, &ppd->link_down_work);
  5236. }
  5237. } else {
  5238. dd_dev_info(dd, "%s: QSFP module inserted\n",
  5239. __func__);
  5240. spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
  5241. ppd->qsfp_info.cache_valid = 0;
  5242. ppd->qsfp_info.cache_refresh_required = 1;
  5243. spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
  5244. flags);
  5245. /*
  5246. * Stop inversion of ModPresent pin to detect
  5247. * removal of the cable
  5248. */
  5249. qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
  5250. write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
  5251. ASIC_QSFP1_INVERT, qsfp_int_mgmt);
  5252. ppd->offline_disabled_reason =
  5253. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
  5254. }
  5255. }
  5256. if (reg & QSFP_HFI0_INT_N) {
  5257. dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
  5258. __func__);
  5259. spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
  5260. ppd->qsfp_info.check_interrupt_flags = 1;
  5261. spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
  5262. }
  5263. /* Schedule the QSFP work only if there is a cable attached. */
  5264. if (qsfp_mod_present(ppd))
  5265. queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
  5266. }
  5267. static int request_host_lcb_access(struct hfi1_devdata *dd)
  5268. {
  5269. int ret;
  5270. ret = do_8051_command(dd, HCMD_MISC,
  5271. (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
  5272. LOAD_DATA_FIELD_ID_SHIFT, NULL);
  5273. if (ret != HCMD_SUCCESS) {
  5274. dd_dev_err(dd, "%s: command failed with error %d\n",
  5275. __func__, ret);
  5276. }
  5277. return ret == HCMD_SUCCESS ? 0 : -EBUSY;
  5278. }
  5279. static int request_8051_lcb_access(struct hfi1_devdata *dd)
  5280. {
  5281. int ret;
  5282. ret = do_8051_command(dd, HCMD_MISC,
  5283. (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
  5284. LOAD_DATA_FIELD_ID_SHIFT, NULL);
  5285. if (ret != HCMD_SUCCESS) {
  5286. dd_dev_err(dd, "%s: command failed with error %d\n",
  5287. __func__, ret);
  5288. }
  5289. return ret == HCMD_SUCCESS ? 0 : -EBUSY;
  5290. }
  5291. /*
  5292. * Set the LCB selector - allow host access. The DCC selector always
  5293. * points to the host.
  5294. */
  5295. static inline void set_host_lcb_access(struct hfi1_devdata *dd)
  5296. {
  5297. write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
  5298. DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
  5299. DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
  5300. }
  5301. /*
  5302. * Clear the LCB selector - allow 8051 access. The DCC selector always
  5303. * points to the host.
  5304. */
  5305. static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
  5306. {
  5307. write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
  5308. DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
  5309. }
  5310. /*
  5311. * Acquire LCB access from the 8051. If the host already has access,
  5312. * just increment a counter. Otherwise, inform the 8051 that the
  5313. * host is taking access.
  5314. *
  5315. * Returns:
  5316. * 0 on success
  5317. * -EBUSY if the 8051 has control and cannot be disturbed
  5318. * -errno if unable to acquire access from the 8051
  5319. */
  5320. int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
  5321. {
  5322. struct hfi1_pportdata *ppd = dd->pport;
  5323. int ret = 0;
  5324. /*
  5325. * Use the host link state lock so the operation of this routine
  5326. * { link state check, selector change, count increment } can occur
  5327. * as a unit against a link state change. Otherwise there is a
  5328. * race between the state change and the count increment.
  5329. */
  5330. if (sleep_ok) {
  5331. mutex_lock(&ppd->hls_lock);
  5332. } else {
  5333. while (!mutex_trylock(&ppd->hls_lock))
  5334. udelay(1);
  5335. }
  5336. /* this access is valid only when the link is up */
  5337. if (ppd->host_link_state & HLS_DOWN) {
  5338. dd_dev_info(dd, "%s: link state %s not up\n",
  5339. __func__, link_state_name(ppd->host_link_state));
  5340. ret = -EBUSY;
  5341. goto done;
  5342. }
  5343. if (dd->lcb_access_count == 0) {
  5344. ret = request_host_lcb_access(dd);
  5345. if (ret) {
  5346. dd_dev_err(dd,
  5347. "%s: unable to acquire LCB access, err %d\n",
  5348. __func__, ret);
  5349. goto done;
  5350. }
  5351. set_host_lcb_access(dd);
  5352. }
  5353. dd->lcb_access_count++;
  5354. done:
  5355. mutex_unlock(&ppd->hls_lock);
  5356. return ret;
  5357. }
  5358. /*
  5359. * Release LCB access by decrementing the use count. If the count is moving
  5360. * from 1 to 0, inform 8051 that it has control back.
  5361. *
  5362. * Returns:
  5363. * 0 on success
  5364. * -errno if unable to release access to the 8051
  5365. */
  5366. int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
  5367. {
  5368. int ret = 0;
  5369. /*
  5370. * Use the host link state lock because the acquire needed it.
  5371. * Here, we only need to keep { selector change, count decrement }
  5372. * as a unit.
  5373. */
  5374. if (sleep_ok) {
  5375. mutex_lock(&dd->pport->hls_lock);
  5376. } else {
  5377. while (!mutex_trylock(&dd->pport->hls_lock))
  5378. udelay(1);
  5379. }
  5380. if (dd->lcb_access_count == 0) {
  5381. dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
  5382. __func__);
  5383. goto done;
  5384. }
  5385. if (dd->lcb_access_count == 1) {
  5386. set_8051_lcb_access(dd);
  5387. ret = request_8051_lcb_access(dd);
  5388. if (ret) {
  5389. dd_dev_err(dd,
  5390. "%s: unable to release LCB access, err %d\n",
  5391. __func__, ret);
  5392. /* restore host access if the grant didn't work */
  5393. set_host_lcb_access(dd);
  5394. goto done;
  5395. }
  5396. }
  5397. dd->lcb_access_count--;
  5398. done:
  5399. mutex_unlock(&dd->pport->hls_lock);
  5400. return ret;
  5401. }
  5402. /*
  5403. * Initialize LCB access variables and state. Called during driver load,
  5404. * after most of the initialization is finished.
  5405. *
  5406. * The DC default is LCB access on for the host. The driver defaults to
  5407. * leaving access to the 8051. Assign access now - this constrains the call
  5408. * to this routine to be after all LCB set-up is done. In particular, after
  5409. * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
  5410. */
  5411. static void init_lcb_access(struct hfi1_devdata *dd)
  5412. {
  5413. dd->lcb_access_count = 0;
  5414. }
  5415. /*
  5416. * Write a response back to a 8051 request.
  5417. */
  5418. static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
  5419. {
  5420. write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
  5421. DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
  5422. (u64)return_code <<
  5423. DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
  5424. (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
  5425. }
  5426. /*
  5427. * Handle host requests from the 8051.
  5428. */
  5429. static void handle_8051_request(struct hfi1_pportdata *ppd)
  5430. {
  5431. struct hfi1_devdata *dd = ppd->dd;
  5432. u64 reg;
  5433. u16 data = 0;
  5434. u8 type;
  5435. reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
  5436. if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
  5437. return; /* no request */
  5438. /* zero out COMPLETED so the response is seen */
  5439. write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
  5440. /* extract request details */
  5441. type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
  5442. & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
  5443. data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
  5444. & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
  5445. switch (type) {
  5446. case HREQ_LOAD_CONFIG:
  5447. case HREQ_SAVE_CONFIG:
  5448. case HREQ_READ_CONFIG:
  5449. case HREQ_SET_TX_EQ_ABS:
  5450. case HREQ_SET_TX_EQ_REL:
  5451. case HREQ_ENABLE:
  5452. dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
  5453. type);
  5454. hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
  5455. break;
  5456. case HREQ_CONFIG_DONE:
  5457. hreq_response(dd, HREQ_SUCCESS, 0);
  5458. break;
  5459. case HREQ_INTERFACE_TEST:
  5460. hreq_response(dd, HREQ_SUCCESS, data);
  5461. break;
  5462. default:
  5463. dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
  5464. hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
  5465. break;
  5466. }
  5467. }
  5468. static void write_global_credit(struct hfi1_devdata *dd,
  5469. u8 vau, u16 total, u16 shared)
  5470. {
  5471. write_csr(dd, SEND_CM_GLOBAL_CREDIT,
  5472. ((u64)total <<
  5473. SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
  5474. ((u64)shared <<
  5475. SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
  5476. ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
  5477. }
  5478. /*
  5479. * Set up initial VL15 credits of the remote. Assumes the rest of
  5480. * the CM credit registers are zero from a previous global or credit reset .
  5481. */
  5482. void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
  5483. {
  5484. /* leave shared count at zero for both global and VL15 */
  5485. write_global_credit(dd, vau, vl15buf, 0);
  5486. /* We may need some credits for another VL when sending packets
  5487. * with the snoop interface. Dividing it down the middle for VL15
  5488. * and VL0 should suffice.
  5489. */
  5490. if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
  5491. write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
  5492. << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
  5493. write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
  5494. << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
  5495. } else {
  5496. write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
  5497. << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
  5498. }
  5499. }
  5500. /*
  5501. * Zero all credit details from the previous connection and
  5502. * reset the CM manager's internal counters.
  5503. */
  5504. void reset_link_credits(struct hfi1_devdata *dd)
  5505. {
  5506. int i;
  5507. /* remove all previous VL credit limits */
  5508. for (i = 0; i < TXE_NUM_DATA_VL; i++)
  5509. write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
  5510. write_csr(dd, SEND_CM_CREDIT_VL15, 0);
  5511. write_global_credit(dd, 0, 0, 0);
  5512. /* reset the CM block */
  5513. pio_send_control(dd, PSC_CM_RESET);
  5514. }
  5515. /* convert a vCU to a CU */
  5516. static u32 vcu_to_cu(u8 vcu)
  5517. {
  5518. return 1 << vcu;
  5519. }
  5520. /* convert a CU to a vCU */
  5521. static u8 cu_to_vcu(u32 cu)
  5522. {
  5523. return ilog2(cu);
  5524. }
  5525. /* convert a vAU to an AU */
  5526. static u32 vau_to_au(u8 vau)
  5527. {
  5528. return 8 * (1 << vau);
  5529. }
  5530. static void set_linkup_defaults(struct hfi1_pportdata *ppd)
  5531. {
  5532. ppd->sm_trap_qp = 0x0;
  5533. ppd->sa_qp = 0x1;
  5534. }
  5535. /*
  5536. * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
  5537. */
  5538. static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
  5539. {
  5540. u64 reg;
  5541. /* clear lcb run: LCB_CFG_RUN.EN = 0 */
  5542. write_csr(dd, DC_LCB_CFG_RUN, 0);
  5543. /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
  5544. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
  5545. 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
  5546. /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
  5547. dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
  5548. reg = read_csr(dd, DCC_CFG_RESET);
  5549. write_csr(dd, DCC_CFG_RESET, reg |
  5550. (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
  5551. (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
  5552. (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
  5553. if (!abort) {
  5554. udelay(1); /* must hold for the longer of 16cclks or 20ns */
  5555. write_csr(dd, DCC_CFG_RESET, reg);
  5556. write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
  5557. }
  5558. }
  5559. /*
  5560. * This routine should be called after the link has been transitioned to
  5561. * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
  5562. * reset).
  5563. *
  5564. * The expectation is that the caller of this routine would have taken
  5565. * care of properly transitioning the link into the correct state.
  5566. */
  5567. static void dc_shutdown(struct hfi1_devdata *dd)
  5568. {
  5569. unsigned long flags;
  5570. spin_lock_irqsave(&dd->dc8051_lock, flags);
  5571. if (dd->dc_shutdown) {
  5572. spin_unlock_irqrestore(&dd->dc8051_lock, flags);
  5573. return;
  5574. }
  5575. dd->dc_shutdown = 1;
  5576. spin_unlock_irqrestore(&dd->dc8051_lock, flags);
  5577. /* Shutdown the LCB */
  5578. lcb_shutdown(dd, 1);
  5579. /*
  5580. * Going to OFFLINE would have causes the 8051 to put the
  5581. * SerDes into reset already. Just need to shut down the 8051,
  5582. * itself.
  5583. */
  5584. write_csr(dd, DC_DC8051_CFG_RST, 0x1);
  5585. }
  5586. /*
  5587. * Calling this after the DC has been brought out of reset should not
  5588. * do any damage.
  5589. */
  5590. static void dc_start(struct hfi1_devdata *dd)
  5591. {
  5592. unsigned long flags;
  5593. int ret;
  5594. spin_lock_irqsave(&dd->dc8051_lock, flags);
  5595. if (!dd->dc_shutdown)
  5596. goto done;
  5597. spin_unlock_irqrestore(&dd->dc8051_lock, flags);
  5598. /* Take the 8051 out of reset */
  5599. write_csr(dd, DC_DC8051_CFG_RST, 0ull);
  5600. /* Wait until 8051 is ready */
  5601. ret = wait_fm_ready(dd, TIMEOUT_8051_START);
  5602. if (ret) {
  5603. dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
  5604. __func__);
  5605. }
  5606. /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
  5607. write_csr(dd, DCC_CFG_RESET, 0x10);
  5608. /* lcb_shutdown() with abort=1 does not restore these */
  5609. write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
  5610. spin_lock_irqsave(&dd->dc8051_lock, flags);
  5611. dd->dc_shutdown = 0;
  5612. done:
  5613. spin_unlock_irqrestore(&dd->dc8051_lock, flags);
  5614. }
  5615. /*
  5616. * These LCB adjustments are for the Aurora SerDes core in the FPGA.
  5617. */
  5618. static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
  5619. {
  5620. u64 rx_radr, tx_radr;
  5621. u32 version;
  5622. if (dd->icode != ICODE_FPGA_EMULATION)
  5623. return;
  5624. /*
  5625. * These LCB defaults on emulator _s are good, nothing to do here:
  5626. * LCB_CFG_TX_FIFOS_RADR
  5627. * LCB_CFG_RX_FIFOS_RADR
  5628. * LCB_CFG_LN_DCLK
  5629. * LCB_CFG_IGNORE_LOST_RCLK
  5630. */
  5631. if (is_emulator_s(dd))
  5632. return;
  5633. /* else this is _p */
  5634. version = emulator_rev(dd);
  5635. if (!is_ax(dd))
  5636. version = 0x2d; /* all B0 use 0x2d or higher settings */
  5637. if (version <= 0x12) {
  5638. /* release 0x12 and below */
  5639. /*
  5640. * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
  5641. * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
  5642. * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
  5643. */
  5644. rx_radr =
  5645. 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
  5646. | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
  5647. | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
  5648. /*
  5649. * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
  5650. * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
  5651. */
  5652. tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
  5653. } else if (version <= 0x18) {
  5654. /* release 0x13 up to 0x18 */
  5655. /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
  5656. rx_radr =
  5657. 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
  5658. | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
  5659. | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
  5660. tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
  5661. } else if (version == 0x19) {
  5662. /* release 0x19 */
  5663. /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
  5664. rx_radr =
  5665. 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
  5666. | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
  5667. | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
  5668. tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
  5669. } else if (version == 0x1a) {
  5670. /* release 0x1a */
  5671. /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
  5672. rx_radr =
  5673. 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
  5674. | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
  5675. | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
  5676. tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
  5677. write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
  5678. } else {
  5679. /* release 0x1b and higher */
  5680. /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
  5681. rx_radr =
  5682. 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
  5683. | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
  5684. | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
  5685. tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
  5686. }
  5687. write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
  5688. /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
  5689. write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
  5690. DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
  5691. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
  5692. }
  5693. /*
  5694. * Handle a SMA idle message
  5695. *
  5696. * This is a work-queue function outside of the interrupt.
  5697. */
  5698. void handle_sma_message(struct work_struct *work)
  5699. {
  5700. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  5701. sma_message_work);
  5702. struct hfi1_devdata *dd = ppd->dd;
  5703. u64 msg;
  5704. int ret;
  5705. /*
  5706. * msg is bytes 1-4 of the 40-bit idle message - the command code
  5707. * is stripped off
  5708. */
  5709. ret = read_idle_sma(dd, &msg);
  5710. if (ret)
  5711. return;
  5712. dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
  5713. /*
  5714. * React to the SMA message. Byte[1] (0 for us) is the command.
  5715. */
  5716. switch (msg & 0xff) {
  5717. case SMA_IDLE_ARM:
  5718. /*
  5719. * See OPAv1 table 9-14 - HFI and External Switch Ports Key
  5720. * State Transitions
  5721. *
  5722. * Only expected in INIT or ARMED, discard otherwise.
  5723. */
  5724. if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
  5725. ppd->neighbor_normal = 1;
  5726. break;
  5727. case SMA_IDLE_ACTIVE:
  5728. /*
  5729. * See OPAv1 table 9-14 - HFI and External Switch Ports Key
  5730. * State Transitions
  5731. *
  5732. * Can activate the node. Discard otherwise.
  5733. */
  5734. if (ppd->host_link_state == HLS_UP_ARMED &&
  5735. ppd->is_active_optimize_enabled) {
  5736. ppd->neighbor_normal = 1;
  5737. ret = set_link_state(ppd, HLS_UP_ACTIVE);
  5738. if (ret)
  5739. dd_dev_err(
  5740. dd,
  5741. "%s: received Active SMA idle message, couldn't set link to Active\n",
  5742. __func__);
  5743. }
  5744. break;
  5745. default:
  5746. dd_dev_err(dd,
  5747. "%s: received unexpected SMA idle message 0x%llx\n",
  5748. __func__, msg);
  5749. break;
  5750. }
  5751. }
  5752. static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
  5753. {
  5754. u64 rcvctrl;
  5755. unsigned long flags;
  5756. spin_lock_irqsave(&dd->rcvctrl_lock, flags);
  5757. rcvctrl = read_csr(dd, RCV_CTRL);
  5758. rcvctrl |= add;
  5759. rcvctrl &= ~clear;
  5760. write_csr(dd, RCV_CTRL, rcvctrl);
  5761. spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
  5762. }
  5763. static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
  5764. {
  5765. adjust_rcvctrl(dd, add, 0);
  5766. }
  5767. static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
  5768. {
  5769. adjust_rcvctrl(dd, 0, clear);
  5770. }
  5771. /*
  5772. * Called from all interrupt handlers to start handling an SPC freeze.
  5773. */
  5774. void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
  5775. {
  5776. struct hfi1_devdata *dd = ppd->dd;
  5777. struct send_context *sc;
  5778. int i;
  5779. if (flags & FREEZE_SELF)
  5780. write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
  5781. /* enter frozen mode */
  5782. dd->flags |= HFI1_FROZEN;
  5783. /* notify all SDMA engines that they are going into a freeze */
  5784. sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
  5785. /* do halt pre-handling on all enabled send contexts */
  5786. for (i = 0; i < dd->num_send_contexts; i++) {
  5787. sc = dd->send_contexts[i].sc;
  5788. if (sc && (sc->flags & SCF_ENABLED))
  5789. sc_stop(sc, SCF_FROZEN | SCF_HALTED);
  5790. }
  5791. /* Send context are frozen. Notify user space */
  5792. hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
  5793. if (flags & FREEZE_ABORT) {
  5794. dd_dev_err(dd,
  5795. "Aborted freeze recovery. Please REBOOT system\n");
  5796. return;
  5797. }
  5798. /* queue non-interrupt handler */
  5799. queue_work(ppd->hfi1_wq, &ppd->freeze_work);
  5800. }
  5801. /*
  5802. * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
  5803. * depending on the "freeze" parameter.
  5804. *
  5805. * No need to return an error if it times out, our only option
  5806. * is to proceed anyway.
  5807. */
  5808. static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
  5809. {
  5810. unsigned long timeout;
  5811. u64 reg;
  5812. timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
  5813. while (1) {
  5814. reg = read_csr(dd, CCE_STATUS);
  5815. if (freeze) {
  5816. /* waiting until all indicators are set */
  5817. if ((reg & ALL_FROZE) == ALL_FROZE)
  5818. return; /* all done */
  5819. } else {
  5820. /* waiting until all indicators are clear */
  5821. if ((reg & ALL_FROZE) == 0)
  5822. return; /* all done */
  5823. }
  5824. if (time_after(jiffies, timeout)) {
  5825. dd_dev_err(dd,
  5826. "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
  5827. freeze ? "" : "un", reg & ALL_FROZE,
  5828. freeze ? ALL_FROZE : 0ull);
  5829. return;
  5830. }
  5831. usleep_range(80, 120);
  5832. }
  5833. }
  5834. /*
  5835. * Do all freeze handling for the RXE block.
  5836. */
  5837. static void rxe_freeze(struct hfi1_devdata *dd)
  5838. {
  5839. int i;
  5840. /* disable port */
  5841. clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
  5842. /* disable all receive contexts */
  5843. for (i = 0; i < dd->num_rcv_contexts; i++)
  5844. hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
  5845. }
  5846. /*
  5847. * Unfreeze handling for the RXE block - kernel contexts only.
  5848. * This will also enable the port. User contexts will do unfreeze
  5849. * handling on a per-context basis as they call into the driver.
  5850. *
  5851. */
  5852. static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
  5853. {
  5854. u32 rcvmask;
  5855. int i;
  5856. /* enable all kernel contexts */
  5857. for (i = 0; i < dd->n_krcv_queues; i++) {
  5858. rcvmask = HFI1_RCVCTRL_CTXT_ENB;
  5859. /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
  5860. rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
  5861. HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
  5862. hfi1_rcvctrl(dd, rcvmask, i);
  5863. }
  5864. /* enable port */
  5865. add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
  5866. }
  5867. /*
  5868. * Non-interrupt SPC freeze handling.
  5869. *
  5870. * This is a work-queue function outside of the triggering interrupt.
  5871. */
  5872. void handle_freeze(struct work_struct *work)
  5873. {
  5874. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  5875. freeze_work);
  5876. struct hfi1_devdata *dd = ppd->dd;
  5877. /* wait for freeze indicators on all affected blocks */
  5878. wait_for_freeze_status(dd, 1);
  5879. /* SPC is now frozen */
  5880. /* do send PIO freeze steps */
  5881. pio_freeze(dd);
  5882. /* do send DMA freeze steps */
  5883. sdma_freeze(dd);
  5884. /* do send egress freeze steps - nothing to do */
  5885. /* do receive freeze steps */
  5886. rxe_freeze(dd);
  5887. /*
  5888. * Unfreeze the hardware - clear the freeze, wait for each
  5889. * block's frozen bit to clear, then clear the frozen flag.
  5890. */
  5891. write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
  5892. wait_for_freeze_status(dd, 0);
  5893. if (is_ax(dd)) {
  5894. write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
  5895. wait_for_freeze_status(dd, 1);
  5896. write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
  5897. wait_for_freeze_status(dd, 0);
  5898. }
  5899. /* do send PIO unfreeze steps for kernel contexts */
  5900. pio_kernel_unfreeze(dd);
  5901. /* do send DMA unfreeze steps */
  5902. sdma_unfreeze(dd);
  5903. /* do send egress unfreeze steps - nothing to do */
  5904. /* do receive unfreeze steps for kernel contexts */
  5905. rxe_kernel_unfreeze(dd);
  5906. /*
  5907. * The unfreeze procedure touches global device registers when
  5908. * it disables and re-enables RXE. Mark the device unfrozen
  5909. * after all that is done so other parts of the driver waiting
  5910. * for the device to unfreeze don't do things out of order.
  5911. *
  5912. * The above implies that the meaning of HFI1_FROZEN flag is
  5913. * "Device has gone into freeze mode and freeze mode handling
  5914. * is still in progress."
  5915. *
  5916. * The flag will be removed when freeze mode processing has
  5917. * completed.
  5918. */
  5919. dd->flags &= ~HFI1_FROZEN;
  5920. wake_up(&dd->event_queue);
  5921. /* no longer frozen */
  5922. }
  5923. /*
  5924. * Handle a link up interrupt from the 8051.
  5925. *
  5926. * This is a work-queue function outside of the interrupt.
  5927. */
  5928. void handle_link_up(struct work_struct *work)
  5929. {
  5930. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  5931. link_up_work);
  5932. set_link_state(ppd, HLS_UP_INIT);
  5933. /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
  5934. read_ltp_rtt(ppd->dd);
  5935. /*
  5936. * OPA specifies that certain counters are cleared on a transition
  5937. * to link up, so do that.
  5938. */
  5939. clear_linkup_counters(ppd->dd);
  5940. /*
  5941. * And (re)set link up default values.
  5942. */
  5943. set_linkup_defaults(ppd);
  5944. /* enforce link speed enabled */
  5945. if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
  5946. /* oops - current speed is not enabled, bounce */
  5947. dd_dev_err(ppd->dd,
  5948. "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
  5949. ppd->link_speed_active, ppd->link_speed_enabled);
  5950. set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
  5951. OPA_LINKDOWN_REASON_SPEED_POLICY);
  5952. set_link_state(ppd, HLS_DN_OFFLINE);
  5953. tune_serdes(ppd);
  5954. start_link(ppd);
  5955. }
  5956. }
  5957. /*
  5958. * Several pieces of LNI information were cached for SMA in ppd.
  5959. * Reset these on link down
  5960. */
  5961. static void reset_neighbor_info(struct hfi1_pportdata *ppd)
  5962. {
  5963. ppd->neighbor_guid = 0;
  5964. ppd->neighbor_port_number = 0;
  5965. ppd->neighbor_type = 0;
  5966. ppd->neighbor_fm_security = 0;
  5967. }
  5968. static const char * const link_down_reason_strs[] = {
  5969. [OPA_LINKDOWN_REASON_NONE] = "None",
  5970. [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Recive error 0",
  5971. [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
  5972. [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
  5973. [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
  5974. [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
  5975. [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
  5976. [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
  5977. [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
  5978. [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
  5979. [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
  5980. [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
  5981. [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
  5982. [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
  5983. [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
  5984. [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
  5985. [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
  5986. [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
  5987. [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
  5988. [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
  5989. [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
  5990. [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
  5991. [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
  5992. [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
  5993. [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
  5994. [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
  5995. [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
  5996. [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
  5997. [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
  5998. [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
  5999. [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
  6000. [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
  6001. [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
  6002. "Excessive buffer overrun",
  6003. [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
  6004. [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
  6005. [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
  6006. [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
  6007. [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
  6008. [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
  6009. [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
  6010. [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
  6011. "Local media not installed",
  6012. [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
  6013. [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
  6014. [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
  6015. "End to end not installed",
  6016. [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
  6017. [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
  6018. [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
  6019. [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
  6020. [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
  6021. [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
  6022. };
  6023. /* return the neighbor link down reason string */
  6024. static const char *link_down_reason_str(u8 reason)
  6025. {
  6026. const char *str = NULL;
  6027. if (reason < ARRAY_SIZE(link_down_reason_strs))
  6028. str = link_down_reason_strs[reason];
  6029. if (!str)
  6030. str = "(invalid)";
  6031. return str;
  6032. }
  6033. /*
  6034. * Handle a link down interrupt from the 8051.
  6035. *
  6036. * This is a work-queue function outside of the interrupt.
  6037. */
  6038. void handle_link_down(struct work_struct *work)
  6039. {
  6040. u8 lcl_reason, neigh_reason = 0;
  6041. u8 link_down_reason;
  6042. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  6043. link_down_work);
  6044. int was_up;
  6045. static const char ldr_str[] = "Link down reason: ";
  6046. if ((ppd->host_link_state &
  6047. (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
  6048. ppd->port_type == PORT_TYPE_FIXED)
  6049. ppd->offline_disabled_reason =
  6050. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
  6051. /* Go offline first, then deal with reading/writing through 8051 */
  6052. was_up = !!(ppd->host_link_state & HLS_UP);
  6053. set_link_state(ppd, HLS_DN_OFFLINE);
  6054. if (was_up) {
  6055. lcl_reason = 0;
  6056. /* link down reason is only valid if the link was up */
  6057. read_link_down_reason(ppd->dd, &link_down_reason);
  6058. switch (link_down_reason) {
  6059. case LDR_LINK_TRANSFER_ACTIVE_LOW:
  6060. /* the link went down, no idle message reason */
  6061. dd_dev_info(ppd->dd, "%sUnexpected link down\n",
  6062. ldr_str);
  6063. break;
  6064. case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
  6065. /*
  6066. * The neighbor reason is only valid if an idle message
  6067. * was received for it.
  6068. */
  6069. read_planned_down_reason_code(ppd->dd, &neigh_reason);
  6070. dd_dev_info(ppd->dd,
  6071. "%sNeighbor link down message %d, %s\n",
  6072. ldr_str, neigh_reason,
  6073. link_down_reason_str(neigh_reason));
  6074. break;
  6075. case LDR_RECEIVED_HOST_OFFLINE_REQ:
  6076. dd_dev_info(ppd->dd,
  6077. "%sHost requested link to go offline\n",
  6078. ldr_str);
  6079. break;
  6080. default:
  6081. dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
  6082. ldr_str, link_down_reason);
  6083. break;
  6084. }
  6085. /*
  6086. * If no reason, assume peer-initiated but missed
  6087. * LinkGoingDown idle flits.
  6088. */
  6089. if (neigh_reason == 0)
  6090. lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
  6091. } else {
  6092. /* went down while polling or going up */
  6093. lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
  6094. }
  6095. set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
  6096. /* inform the SMA when the link transitions from up to down */
  6097. if (was_up && ppd->local_link_down_reason.sma == 0 &&
  6098. ppd->neigh_link_down_reason.sma == 0) {
  6099. ppd->local_link_down_reason.sma =
  6100. ppd->local_link_down_reason.latest;
  6101. ppd->neigh_link_down_reason.sma =
  6102. ppd->neigh_link_down_reason.latest;
  6103. }
  6104. reset_neighbor_info(ppd);
  6105. /* disable the port */
  6106. clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
  6107. /*
  6108. * If there is no cable attached, turn the DC off. Otherwise,
  6109. * start the link bring up.
  6110. */
  6111. if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd)) {
  6112. dc_shutdown(ppd->dd);
  6113. } else {
  6114. tune_serdes(ppd);
  6115. start_link(ppd);
  6116. }
  6117. }
  6118. void handle_link_bounce(struct work_struct *work)
  6119. {
  6120. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  6121. link_bounce_work);
  6122. /*
  6123. * Only do something if the link is currently up.
  6124. */
  6125. if (ppd->host_link_state & HLS_UP) {
  6126. set_link_state(ppd, HLS_DN_OFFLINE);
  6127. tune_serdes(ppd);
  6128. start_link(ppd);
  6129. } else {
  6130. dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
  6131. __func__, link_state_name(ppd->host_link_state));
  6132. }
  6133. }
  6134. /*
  6135. * Mask conversion: Capability exchange to Port LTP. The capability
  6136. * exchange has an implicit 16b CRC that is mandatory.
  6137. */
  6138. static int cap_to_port_ltp(int cap)
  6139. {
  6140. int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
  6141. if (cap & CAP_CRC_14B)
  6142. port_ltp |= PORT_LTP_CRC_MODE_14;
  6143. if (cap & CAP_CRC_48B)
  6144. port_ltp |= PORT_LTP_CRC_MODE_48;
  6145. if (cap & CAP_CRC_12B_16B_PER_LANE)
  6146. port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
  6147. return port_ltp;
  6148. }
  6149. /*
  6150. * Convert an OPA Port LTP mask to capability mask
  6151. */
  6152. int port_ltp_to_cap(int port_ltp)
  6153. {
  6154. int cap_mask = 0;
  6155. if (port_ltp & PORT_LTP_CRC_MODE_14)
  6156. cap_mask |= CAP_CRC_14B;
  6157. if (port_ltp & PORT_LTP_CRC_MODE_48)
  6158. cap_mask |= CAP_CRC_48B;
  6159. if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
  6160. cap_mask |= CAP_CRC_12B_16B_PER_LANE;
  6161. return cap_mask;
  6162. }
  6163. /*
  6164. * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
  6165. */
  6166. static int lcb_to_port_ltp(int lcb_crc)
  6167. {
  6168. int port_ltp = 0;
  6169. if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
  6170. port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
  6171. else if (lcb_crc == LCB_CRC_48B)
  6172. port_ltp = PORT_LTP_CRC_MODE_48;
  6173. else if (lcb_crc == LCB_CRC_14B)
  6174. port_ltp = PORT_LTP_CRC_MODE_14;
  6175. else
  6176. port_ltp = PORT_LTP_CRC_MODE_16;
  6177. return port_ltp;
  6178. }
  6179. /*
  6180. * Our neighbor has indicated that we are allowed to act as a fabric
  6181. * manager, so place the full management partition key in the second
  6182. * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
  6183. * that we should already have the limited management partition key in
  6184. * array element 1, and also that the port is not yet up when
  6185. * add_full_mgmt_pkey() is invoked.
  6186. */
  6187. static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
  6188. {
  6189. struct hfi1_devdata *dd = ppd->dd;
  6190. /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
  6191. if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
  6192. dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
  6193. __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
  6194. ppd->pkeys[2] = FULL_MGMT_P_KEY;
  6195. (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
  6196. hfi1_event_pkey_change(ppd->dd, ppd->port);
  6197. }
  6198. static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
  6199. {
  6200. if (ppd->pkeys[2] != 0) {
  6201. ppd->pkeys[2] = 0;
  6202. (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
  6203. hfi1_event_pkey_change(ppd->dd, ppd->port);
  6204. }
  6205. }
  6206. /*
  6207. * Convert the given link width to the OPA link width bitmask.
  6208. */
  6209. static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
  6210. {
  6211. switch (width) {
  6212. case 0:
  6213. /*
  6214. * Simulator and quick linkup do not set the width.
  6215. * Just set it to 4x without complaint.
  6216. */
  6217. if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
  6218. return OPA_LINK_WIDTH_4X;
  6219. return 0; /* no lanes up */
  6220. case 1: return OPA_LINK_WIDTH_1X;
  6221. case 2: return OPA_LINK_WIDTH_2X;
  6222. case 3: return OPA_LINK_WIDTH_3X;
  6223. default:
  6224. dd_dev_info(dd, "%s: invalid width %d, using 4\n",
  6225. __func__, width);
  6226. /* fall through */
  6227. case 4: return OPA_LINK_WIDTH_4X;
  6228. }
  6229. }
  6230. /*
  6231. * Do a population count on the bottom nibble.
  6232. */
  6233. static const u8 bit_counts[16] = {
  6234. 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
  6235. };
  6236. static inline u8 nibble_to_count(u8 nibble)
  6237. {
  6238. return bit_counts[nibble & 0xf];
  6239. }
  6240. /*
  6241. * Read the active lane information from the 8051 registers and return
  6242. * their widths.
  6243. *
  6244. * Active lane information is found in these 8051 registers:
  6245. * enable_lane_tx
  6246. * enable_lane_rx
  6247. */
  6248. static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
  6249. u16 *rx_width)
  6250. {
  6251. u16 tx, rx;
  6252. u8 enable_lane_rx;
  6253. u8 enable_lane_tx;
  6254. u8 tx_polarity_inversion;
  6255. u8 rx_polarity_inversion;
  6256. u8 max_rate;
  6257. /* read the active lanes */
  6258. read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
  6259. &rx_polarity_inversion, &max_rate);
  6260. read_local_lni(dd, &enable_lane_rx);
  6261. /* convert to counts */
  6262. tx = nibble_to_count(enable_lane_tx);
  6263. rx = nibble_to_count(enable_lane_rx);
  6264. /*
  6265. * Set link_speed_active here, overriding what was set in
  6266. * handle_verify_cap(). The ASIC 8051 firmware does not correctly
  6267. * set the max_rate field in handle_verify_cap until v0.19.
  6268. */
  6269. if ((dd->icode == ICODE_RTL_SILICON) &&
  6270. (dd->dc8051_ver < dc8051_ver(0, 19))) {
  6271. /* max_rate: 0 = 12.5G, 1 = 25G */
  6272. switch (max_rate) {
  6273. case 0:
  6274. dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
  6275. break;
  6276. default:
  6277. dd_dev_err(dd,
  6278. "%s: unexpected max rate %d, using 25Gb\n",
  6279. __func__, (int)max_rate);
  6280. /* fall through */
  6281. case 1:
  6282. dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
  6283. break;
  6284. }
  6285. }
  6286. dd_dev_info(dd,
  6287. "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
  6288. enable_lane_tx, tx, enable_lane_rx, rx);
  6289. *tx_width = link_width_to_bits(dd, tx);
  6290. *rx_width = link_width_to_bits(dd, rx);
  6291. }
  6292. /*
  6293. * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
  6294. * Valid after the end of VerifyCap and during LinkUp. Does not change
  6295. * after link up. I.e. look elsewhere for downgrade information.
  6296. *
  6297. * Bits are:
  6298. * + bits [7:4] contain the number of active transmitters
  6299. * + bits [3:0] contain the number of active receivers
  6300. * These are numbers 1 through 4 and can be different values if the
  6301. * link is asymmetric.
  6302. *
  6303. * verify_cap_local_fm_link_width[0] retains its original value.
  6304. */
  6305. static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
  6306. u16 *rx_width)
  6307. {
  6308. u16 widths, tx, rx;
  6309. u8 misc_bits, local_flags;
  6310. u16 active_tx, active_rx;
  6311. read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
  6312. tx = widths >> 12;
  6313. rx = (widths >> 8) & 0xf;
  6314. *tx_width = link_width_to_bits(dd, tx);
  6315. *rx_width = link_width_to_bits(dd, rx);
  6316. /* print the active widths */
  6317. get_link_widths(dd, &active_tx, &active_rx);
  6318. }
  6319. /*
  6320. * Set ppd->link_width_active and ppd->link_width_downgrade_active using
  6321. * hardware information when the link first comes up.
  6322. *
  6323. * The link width is not available until after VerifyCap.AllFramesReceived
  6324. * (the trigger for handle_verify_cap), so this is outside that routine
  6325. * and should be called when the 8051 signals linkup.
  6326. */
  6327. void get_linkup_link_widths(struct hfi1_pportdata *ppd)
  6328. {
  6329. u16 tx_width, rx_width;
  6330. /* get end-of-LNI link widths */
  6331. get_linkup_widths(ppd->dd, &tx_width, &rx_width);
  6332. /* use tx_width as the link is supposed to be symmetric on link up */
  6333. ppd->link_width_active = tx_width;
  6334. /* link width downgrade active (LWD.A) starts out matching LW.A */
  6335. ppd->link_width_downgrade_tx_active = ppd->link_width_active;
  6336. ppd->link_width_downgrade_rx_active = ppd->link_width_active;
  6337. /* per OPA spec, on link up LWD.E resets to LWD.S */
  6338. ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
  6339. /* cache the active egress rate (units {10^6 bits/sec]) */
  6340. ppd->current_egress_rate = active_egress_rate(ppd);
  6341. }
  6342. /*
  6343. * Handle a verify capabilities interrupt from the 8051.
  6344. *
  6345. * This is a work-queue function outside of the interrupt.
  6346. */
  6347. void handle_verify_cap(struct work_struct *work)
  6348. {
  6349. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  6350. link_vc_work);
  6351. struct hfi1_devdata *dd = ppd->dd;
  6352. u64 reg;
  6353. u8 power_management;
  6354. u8 continious;
  6355. u8 vcu;
  6356. u8 vau;
  6357. u8 z;
  6358. u16 vl15buf;
  6359. u16 link_widths;
  6360. u16 crc_mask;
  6361. u16 crc_val;
  6362. u16 device_id;
  6363. u16 active_tx, active_rx;
  6364. u8 partner_supported_crc;
  6365. u8 remote_tx_rate;
  6366. u8 device_rev;
  6367. set_link_state(ppd, HLS_VERIFY_CAP);
  6368. lcb_shutdown(dd, 0);
  6369. adjust_lcb_for_fpga_serdes(dd);
  6370. /*
  6371. * These are now valid:
  6372. * remote VerifyCap fields in the general LNI config
  6373. * CSR DC8051_STS_REMOTE_GUID
  6374. * CSR DC8051_STS_REMOTE_NODE_TYPE
  6375. * CSR DC8051_STS_REMOTE_FM_SECURITY
  6376. * CSR DC8051_STS_REMOTE_PORT_NO
  6377. */
  6378. read_vc_remote_phy(dd, &power_management, &continious);
  6379. read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
  6380. &partner_supported_crc);
  6381. read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
  6382. read_remote_device_id(dd, &device_id, &device_rev);
  6383. /*
  6384. * And the 'MgmtAllowed' information, which is exchanged during
  6385. * LNI, is also be available at this point.
  6386. */
  6387. read_mgmt_allowed(dd, &ppd->mgmt_allowed);
  6388. /* print the active widths */
  6389. get_link_widths(dd, &active_tx, &active_rx);
  6390. dd_dev_info(dd,
  6391. "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
  6392. (int)power_management, (int)continious);
  6393. dd_dev_info(dd,
  6394. "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
  6395. (int)vau, (int)z, (int)vcu, (int)vl15buf,
  6396. (int)partner_supported_crc);
  6397. dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
  6398. (u32)remote_tx_rate, (u32)link_widths);
  6399. dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
  6400. (u32)device_id, (u32)device_rev);
  6401. /*
  6402. * The peer vAU value just read is the peer receiver value. HFI does
  6403. * not support a transmit vAU of 0 (AU == 8). We advertised that
  6404. * with Z=1 in the fabric capabilities sent to the peer. The peer
  6405. * will see our Z=1, and, if it advertised a vAU of 0, will move its
  6406. * receive to vAU of 1 (AU == 16). Do the same here. We do not care
  6407. * about the peer Z value - our sent vAU is 3 (hardwired) and is not
  6408. * subject to the Z value exception.
  6409. */
  6410. if (vau == 0)
  6411. vau = 1;
  6412. set_up_vl15(dd, vau, vl15buf);
  6413. /* set up the LCB CRC mode */
  6414. crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
  6415. /* order is important: use the lowest bit in common */
  6416. if (crc_mask & CAP_CRC_14B)
  6417. crc_val = LCB_CRC_14B;
  6418. else if (crc_mask & CAP_CRC_48B)
  6419. crc_val = LCB_CRC_48B;
  6420. else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
  6421. crc_val = LCB_CRC_12B_16B_PER_LANE;
  6422. else
  6423. crc_val = LCB_CRC_16B;
  6424. dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
  6425. write_csr(dd, DC_LCB_CFG_CRC_MODE,
  6426. (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
  6427. /* set (14b only) or clear sideband credit */
  6428. reg = read_csr(dd, SEND_CM_CTRL);
  6429. if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
  6430. write_csr(dd, SEND_CM_CTRL,
  6431. reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
  6432. } else {
  6433. write_csr(dd, SEND_CM_CTRL,
  6434. reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
  6435. }
  6436. ppd->link_speed_active = 0; /* invalid value */
  6437. if (dd->dc8051_ver < dc8051_ver(0, 20)) {
  6438. /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
  6439. switch (remote_tx_rate) {
  6440. case 0:
  6441. ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
  6442. break;
  6443. case 1:
  6444. ppd->link_speed_active = OPA_LINK_SPEED_25G;
  6445. break;
  6446. }
  6447. } else {
  6448. /* actual rate is highest bit of the ANDed rates */
  6449. u8 rate = remote_tx_rate & ppd->local_tx_rate;
  6450. if (rate & 2)
  6451. ppd->link_speed_active = OPA_LINK_SPEED_25G;
  6452. else if (rate & 1)
  6453. ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
  6454. }
  6455. if (ppd->link_speed_active == 0) {
  6456. dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
  6457. __func__, (int)remote_tx_rate);
  6458. ppd->link_speed_active = OPA_LINK_SPEED_25G;
  6459. }
  6460. /*
  6461. * Cache the values of the supported, enabled, and active
  6462. * LTP CRC modes to return in 'portinfo' queries. But the bit
  6463. * flags that are returned in the portinfo query differ from
  6464. * what's in the link_crc_mask, crc_sizes, and crc_val
  6465. * variables. Convert these here.
  6466. */
  6467. ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
  6468. /* supported crc modes */
  6469. ppd->port_ltp_crc_mode |=
  6470. cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
  6471. /* enabled crc modes */
  6472. ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
  6473. /* active crc mode */
  6474. /* set up the remote credit return table */
  6475. assign_remote_cm_au_table(dd, vcu);
  6476. /*
  6477. * The LCB is reset on entry to handle_verify_cap(), so this must
  6478. * be applied on every link up.
  6479. *
  6480. * Adjust LCB error kill enable to kill the link if
  6481. * these RBUF errors are seen:
  6482. * REPLAY_BUF_MBE_SMASK
  6483. * FLIT_INPUT_BUF_MBE_SMASK
  6484. */
  6485. if (is_ax(dd)) { /* fixed in B0 */
  6486. reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
  6487. reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
  6488. | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
  6489. write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
  6490. }
  6491. /* pull LCB fifos out of reset - all fifo clocks must be stable */
  6492. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
  6493. /* give 8051 access to the LCB CSRs */
  6494. write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
  6495. set_8051_lcb_access(dd);
  6496. ppd->neighbor_guid =
  6497. read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
  6498. ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
  6499. DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
  6500. ppd->neighbor_type =
  6501. read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
  6502. DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
  6503. ppd->neighbor_fm_security =
  6504. read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
  6505. DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
  6506. dd_dev_info(dd,
  6507. "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
  6508. ppd->neighbor_guid, ppd->neighbor_type,
  6509. ppd->mgmt_allowed, ppd->neighbor_fm_security);
  6510. if (ppd->mgmt_allowed)
  6511. add_full_mgmt_pkey(ppd);
  6512. /* tell the 8051 to go to LinkUp */
  6513. set_link_state(ppd, HLS_GOING_UP);
  6514. }
  6515. /*
  6516. * Apply the link width downgrade enabled policy against the current active
  6517. * link widths.
  6518. *
  6519. * Called when the enabled policy changes or the active link widths change.
  6520. */
  6521. void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
  6522. {
  6523. int do_bounce = 0;
  6524. int tries;
  6525. u16 lwde;
  6526. u16 tx, rx;
  6527. /* use the hls lock to avoid a race with actual link up */
  6528. tries = 0;
  6529. retry:
  6530. mutex_lock(&ppd->hls_lock);
  6531. /* only apply if the link is up */
  6532. if (ppd->host_link_state & HLS_DOWN) {
  6533. /* still going up..wait and retry */
  6534. if (ppd->host_link_state & HLS_GOING_UP) {
  6535. if (++tries < 1000) {
  6536. mutex_unlock(&ppd->hls_lock);
  6537. usleep_range(100, 120); /* arbitrary */
  6538. goto retry;
  6539. }
  6540. dd_dev_err(ppd->dd,
  6541. "%s: giving up waiting for link state change\n",
  6542. __func__);
  6543. }
  6544. goto done;
  6545. }
  6546. lwde = ppd->link_width_downgrade_enabled;
  6547. if (refresh_widths) {
  6548. get_link_widths(ppd->dd, &tx, &rx);
  6549. ppd->link_width_downgrade_tx_active = tx;
  6550. ppd->link_width_downgrade_rx_active = rx;
  6551. }
  6552. if (ppd->link_width_downgrade_tx_active == 0 ||
  6553. ppd->link_width_downgrade_rx_active == 0) {
  6554. /* the 8051 reported a dead link as a downgrade */
  6555. dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
  6556. } else if (lwde == 0) {
  6557. /* downgrade is disabled */
  6558. /* bounce if not at starting active width */
  6559. if ((ppd->link_width_active !=
  6560. ppd->link_width_downgrade_tx_active) ||
  6561. (ppd->link_width_active !=
  6562. ppd->link_width_downgrade_rx_active)) {
  6563. dd_dev_err(ppd->dd,
  6564. "Link downgrade is disabled and link has downgraded, downing link\n");
  6565. dd_dev_err(ppd->dd,
  6566. " original 0x%x, tx active 0x%x, rx active 0x%x\n",
  6567. ppd->link_width_active,
  6568. ppd->link_width_downgrade_tx_active,
  6569. ppd->link_width_downgrade_rx_active);
  6570. do_bounce = 1;
  6571. }
  6572. } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
  6573. (lwde & ppd->link_width_downgrade_rx_active) == 0) {
  6574. /* Tx or Rx is outside the enabled policy */
  6575. dd_dev_err(ppd->dd,
  6576. "Link is outside of downgrade allowed, downing link\n");
  6577. dd_dev_err(ppd->dd,
  6578. " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
  6579. lwde, ppd->link_width_downgrade_tx_active,
  6580. ppd->link_width_downgrade_rx_active);
  6581. do_bounce = 1;
  6582. }
  6583. done:
  6584. mutex_unlock(&ppd->hls_lock);
  6585. if (do_bounce) {
  6586. set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
  6587. OPA_LINKDOWN_REASON_WIDTH_POLICY);
  6588. set_link_state(ppd, HLS_DN_OFFLINE);
  6589. tune_serdes(ppd);
  6590. start_link(ppd);
  6591. }
  6592. }
  6593. /*
  6594. * Handle a link downgrade interrupt from the 8051.
  6595. *
  6596. * This is a work-queue function outside of the interrupt.
  6597. */
  6598. void handle_link_downgrade(struct work_struct *work)
  6599. {
  6600. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  6601. link_downgrade_work);
  6602. dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
  6603. apply_link_downgrade_policy(ppd, 1);
  6604. }
  6605. static char *dcc_err_string(char *buf, int buf_len, u64 flags)
  6606. {
  6607. return flag_string(buf, buf_len, flags, dcc_err_flags,
  6608. ARRAY_SIZE(dcc_err_flags));
  6609. }
  6610. static char *lcb_err_string(char *buf, int buf_len, u64 flags)
  6611. {
  6612. return flag_string(buf, buf_len, flags, lcb_err_flags,
  6613. ARRAY_SIZE(lcb_err_flags));
  6614. }
  6615. static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
  6616. {
  6617. return flag_string(buf, buf_len, flags, dc8051_err_flags,
  6618. ARRAY_SIZE(dc8051_err_flags));
  6619. }
  6620. static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
  6621. {
  6622. return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
  6623. ARRAY_SIZE(dc8051_info_err_flags));
  6624. }
  6625. static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
  6626. {
  6627. return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
  6628. ARRAY_SIZE(dc8051_info_host_msg_flags));
  6629. }
  6630. static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
  6631. {
  6632. struct hfi1_pportdata *ppd = dd->pport;
  6633. u64 info, err, host_msg;
  6634. int queue_link_down = 0;
  6635. char buf[96];
  6636. /* look at the flags */
  6637. if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
  6638. /* 8051 information set by firmware */
  6639. /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
  6640. info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
  6641. err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
  6642. & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
  6643. host_msg = (info >>
  6644. DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
  6645. & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
  6646. /*
  6647. * Handle error flags.
  6648. */
  6649. if (err & FAILED_LNI) {
  6650. /*
  6651. * LNI error indications are cleared by the 8051
  6652. * only when starting polling. Only pay attention
  6653. * to them when in the states that occur during
  6654. * LNI.
  6655. */
  6656. if (ppd->host_link_state
  6657. & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
  6658. queue_link_down = 1;
  6659. dd_dev_info(dd, "Link error: %s\n",
  6660. dc8051_info_err_string(buf,
  6661. sizeof(buf),
  6662. err &
  6663. FAILED_LNI));
  6664. }
  6665. err &= ~(u64)FAILED_LNI;
  6666. }
  6667. /* unknown frames can happen durning LNI, just count */
  6668. if (err & UNKNOWN_FRAME) {
  6669. ppd->unknown_frame_count++;
  6670. err &= ~(u64)UNKNOWN_FRAME;
  6671. }
  6672. if (err) {
  6673. /* report remaining errors, but do not do anything */
  6674. dd_dev_err(dd, "8051 info error: %s\n",
  6675. dc8051_info_err_string(buf, sizeof(buf),
  6676. err));
  6677. }
  6678. /*
  6679. * Handle host message flags.
  6680. */
  6681. if (host_msg & HOST_REQ_DONE) {
  6682. /*
  6683. * Presently, the driver does a busy wait for
  6684. * host requests to complete. This is only an
  6685. * informational message.
  6686. * NOTE: The 8051 clears the host message
  6687. * information *on the next 8051 command*.
  6688. * Therefore, when linkup is achieved,
  6689. * this flag will still be set.
  6690. */
  6691. host_msg &= ~(u64)HOST_REQ_DONE;
  6692. }
  6693. if (host_msg & BC_SMA_MSG) {
  6694. queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
  6695. host_msg &= ~(u64)BC_SMA_MSG;
  6696. }
  6697. if (host_msg & LINKUP_ACHIEVED) {
  6698. dd_dev_info(dd, "8051: Link up\n");
  6699. queue_work(ppd->hfi1_wq, &ppd->link_up_work);
  6700. host_msg &= ~(u64)LINKUP_ACHIEVED;
  6701. }
  6702. if (host_msg & EXT_DEVICE_CFG_REQ) {
  6703. handle_8051_request(ppd);
  6704. host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
  6705. }
  6706. if (host_msg & VERIFY_CAP_FRAME) {
  6707. queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
  6708. host_msg &= ~(u64)VERIFY_CAP_FRAME;
  6709. }
  6710. if (host_msg & LINK_GOING_DOWN) {
  6711. const char *extra = "";
  6712. /* no downgrade action needed if going down */
  6713. if (host_msg & LINK_WIDTH_DOWNGRADED) {
  6714. host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
  6715. extra = " (ignoring downgrade)";
  6716. }
  6717. dd_dev_info(dd, "8051: Link down%s\n", extra);
  6718. queue_link_down = 1;
  6719. host_msg &= ~(u64)LINK_GOING_DOWN;
  6720. }
  6721. if (host_msg & LINK_WIDTH_DOWNGRADED) {
  6722. queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
  6723. host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
  6724. }
  6725. if (host_msg) {
  6726. /* report remaining messages, but do not do anything */
  6727. dd_dev_info(dd, "8051 info host message: %s\n",
  6728. dc8051_info_host_msg_string(buf,
  6729. sizeof(buf),
  6730. host_msg));
  6731. }
  6732. reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
  6733. }
  6734. if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
  6735. /*
  6736. * Lost the 8051 heartbeat. If this happens, we
  6737. * receive constant interrupts about it. Disable
  6738. * the interrupt after the first.
  6739. */
  6740. dd_dev_err(dd, "Lost 8051 heartbeat\n");
  6741. write_csr(dd, DC_DC8051_ERR_EN,
  6742. read_csr(dd, DC_DC8051_ERR_EN) &
  6743. ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
  6744. reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
  6745. }
  6746. if (reg) {
  6747. /* report the error, but do not do anything */
  6748. dd_dev_err(dd, "8051 error: %s\n",
  6749. dc8051_err_string(buf, sizeof(buf), reg));
  6750. }
  6751. if (queue_link_down) {
  6752. /*
  6753. * if the link is already going down or disabled, do not
  6754. * queue another
  6755. */
  6756. if ((ppd->host_link_state &
  6757. (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
  6758. ppd->link_enabled == 0) {
  6759. dd_dev_info(dd, "%s: not queuing link down\n",
  6760. __func__);
  6761. } else {
  6762. queue_work(ppd->hfi1_wq, &ppd->link_down_work);
  6763. }
  6764. }
  6765. }
  6766. static const char * const fm_config_txt[] = {
  6767. [0] =
  6768. "BadHeadDist: Distance violation between two head flits",
  6769. [1] =
  6770. "BadTailDist: Distance violation between two tail flits",
  6771. [2] =
  6772. "BadCtrlDist: Distance violation between two credit control flits",
  6773. [3] =
  6774. "BadCrdAck: Credits return for unsupported VL",
  6775. [4] =
  6776. "UnsupportedVLMarker: Received VL Marker",
  6777. [5] =
  6778. "BadPreempt: Exceeded the preemption nesting level",
  6779. [6] =
  6780. "BadControlFlit: Received unsupported control flit",
  6781. /* no 7 */
  6782. [8] =
  6783. "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
  6784. };
  6785. static const char * const port_rcv_txt[] = {
  6786. [1] =
  6787. "BadPktLen: Illegal PktLen",
  6788. [2] =
  6789. "PktLenTooLong: Packet longer than PktLen",
  6790. [3] =
  6791. "PktLenTooShort: Packet shorter than PktLen",
  6792. [4] =
  6793. "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
  6794. [5] =
  6795. "BadDLID: Illegal DLID (0, doesn't match HFI)",
  6796. [6] =
  6797. "BadL2: Illegal L2 opcode",
  6798. [7] =
  6799. "BadSC: Unsupported SC",
  6800. [9] =
  6801. "BadRC: Illegal RC",
  6802. [11] =
  6803. "PreemptError: Preempting with same VL",
  6804. [12] =
  6805. "PreemptVL15: Preempting a VL15 packet",
  6806. };
  6807. #define OPA_LDR_FMCONFIG_OFFSET 16
  6808. #define OPA_LDR_PORTRCV_OFFSET 0
  6809. static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  6810. {
  6811. u64 info, hdr0, hdr1;
  6812. const char *extra;
  6813. char buf[96];
  6814. struct hfi1_pportdata *ppd = dd->pport;
  6815. u8 lcl_reason = 0;
  6816. int do_bounce = 0;
  6817. if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
  6818. if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
  6819. info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
  6820. dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
  6821. /* set status bit */
  6822. dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
  6823. }
  6824. reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
  6825. }
  6826. if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
  6827. struct hfi1_pportdata *ppd = dd->pport;
  6828. /* this counter saturates at (2^32) - 1 */
  6829. if (ppd->link_downed < (u32)UINT_MAX)
  6830. ppd->link_downed++;
  6831. reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
  6832. }
  6833. if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
  6834. u8 reason_valid = 1;
  6835. info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
  6836. if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
  6837. dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
  6838. /* set status bit */
  6839. dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
  6840. }
  6841. switch (info) {
  6842. case 0:
  6843. case 1:
  6844. case 2:
  6845. case 3:
  6846. case 4:
  6847. case 5:
  6848. case 6:
  6849. extra = fm_config_txt[info];
  6850. break;
  6851. case 8:
  6852. extra = fm_config_txt[info];
  6853. if (ppd->port_error_action &
  6854. OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
  6855. do_bounce = 1;
  6856. /*
  6857. * lcl_reason cannot be derived from info
  6858. * for this error
  6859. */
  6860. lcl_reason =
  6861. OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
  6862. }
  6863. break;
  6864. default:
  6865. reason_valid = 0;
  6866. snprintf(buf, sizeof(buf), "reserved%lld", info);
  6867. extra = buf;
  6868. break;
  6869. }
  6870. if (reason_valid && !do_bounce) {
  6871. do_bounce = ppd->port_error_action &
  6872. (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
  6873. lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
  6874. }
  6875. /* just report this */
  6876. dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
  6877. reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
  6878. }
  6879. if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
  6880. u8 reason_valid = 1;
  6881. info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
  6882. hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
  6883. hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
  6884. if (!(dd->err_info_rcvport.status_and_code &
  6885. OPA_EI_STATUS_SMASK)) {
  6886. dd->err_info_rcvport.status_and_code =
  6887. info & OPA_EI_CODE_SMASK;
  6888. /* set status bit */
  6889. dd->err_info_rcvport.status_and_code |=
  6890. OPA_EI_STATUS_SMASK;
  6891. /*
  6892. * save first 2 flits in the packet that caused
  6893. * the error
  6894. */
  6895. dd->err_info_rcvport.packet_flit1 = hdr0;
  6896. dd->err_info_rcvport.packet_flit2 = hdr1;
  6897. }
  6898. switch (info) {
  6899. case 1:
  6900. case 2:
  6901. case 3:
  6902. case 4:
  6903. case 5:
  6904. case 6:
  6905. case 7:
  6906. case 9:
  6907. case 11:
  6908. case 12:
  6909. extra = port_rcv_txt[info];
  6910. break;
  6911. default:
  6912. reason_valid = 0;
  6913. snprintf(buf, sizeof(buf), "reserved%lld", info);
  6914. extra = buf;
  6915. break;
  6916. }
  6917. if (reason_valid && !do_bounce) {
  6918. do_bounce = ppd->port_error_action &
  6919. (1 << (OPA_LDR_PORTRCV_OFFSET + info));
  6920. lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
  6921. }
  6922. /* just report this */
  6923. dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
  6924. dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
  6925. hdr0, hdr1);
  6926. reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
  6927. }
  6928. if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
  6929. /* informative only */
  6930. dd_dev_info(dd, "8051 access to LCB blocked\n");
  6931. reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
  6932. }
  6933. if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
  6934. /* informative only */
  6935. dd_dev_info(dd, "host access to LCB blocked\n");
  6936. reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
  6937. }
  6938. /* report any remaining errors */
  6939. if (reg)
  6940. dd_dev_info(dd, "DCC Error: %s\n",
  6941. dcc_err_string(buf, sizeof(buf), reg));
  6942. if (lcl_reason == 0)
  6943. lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
  6944. if (do_bounce) {
  6945. dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
  6946. set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
  6947. queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
  6948. }
  6949. }
  6950. static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  6951. {
  6952. char buf[96];
  6953. dd_dev_info(dd, "LCB Error: %s\n",
  6954. lcb_err_string(buf, sizeof(buf), reg));
  6955. }
  6956. /*
  6957. * CCE block DC interrupt. Source is < 8.
  6958. */
  6959. static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
  6960. {
  6961. const struct err_reg_info *eri = &dc_errs[source];
  6962. if (eri->handler) {
  6963. interrupt_clear_down(dd, 0, eri);
  6964. } else if (source == 3 /* dc_lbm_int */) {
  6965. /*
  6966. * This indicates that a parity error has occurred on the
  6967. * address/control lines presented to the LBM. The error
  6968. * is a single pulse, there is no associated error flag,
  6969. * and it is non-maskable. This is because if a parity
  6970. * error occurs on the request the request is dropped.
  6971. * This should never occur, but it is nice to know if it
  6972. * ever does.
  6973. */
  6974. dd_dev_err(dd, "Parity error in DC LBM block\n");
  6975. } else {
  6976. dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
  6977. }
  6978. }
  6979. /*
  6980. * TX block send credit interrupt. Source is < 160.
  6981. */
  6982. static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
  6983. {
  6984. sc_group_release_update(dd, source);
  6985. }
  6986. /*
  6987. * TX block SDMA interrupt. Source is < 48.
  6988. *
  6989. * SDMA interrupts are grouped by type:
  6990. *
  6991. * 0 - N-1 = SDma
  6992. * N - 2N-1 = SDmaProgress
  6993. * 2N - 3N-1 = SDmaIdle
  6994. */
  6995. static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
  6996. {
  6997. /* what interrupt */
  6998. unsigned int what = source / TXE_NUM_SDMA_ENGINES;
  6999. /* which engine */
  7000. unsigned int which = source % TXE_NUM_SDMA_ENGINES;
  7001. #ifdef CONFIG_SDMA_VERBOSITY
  7002. dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
  7003. slashstrip(__FILE__), __LINE__, __func__);
  7004. sdma_dumpstate(&dd->per_sdma[which]);
  7005. #endif
  7006. if (likely(what < 3 && which < dd->num_sdma)) {
  7007. sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
  7008. } else {
  7009. /* should not happen */
  7010. dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
  7011. }
  7012. }
  7013. /*
  7014. * RX block receive available interrupt. Source is < 160.
  7015. */
  7016. static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
  7017. {
  7018. struct hfi1_ctxtdata *rcd;
  7019. char *err_detail;
  7020. if (likely(source < dd->num_rcv_contexts)) {
  7021. rcd = dd->rcd[source];
  7022. if (rcd) {
  7023. if (source < dd->first_user_ctxt)
  7024. rcd->do_interrupt(rcd, 0);
  7025. else
  7026. handle_user_interrupt(rcd);
  7027. return; /* OK */
  7028. }
  7029. /* received an interrupt, but no rcd */
  7030. err_detail = "dataless";
  7031. } else {
  7032. /* received an interrupt, but are not using that context */
  7033. err_detail = "out of range";
  7034. }
  7035. dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
  7036. err_detail, source);
  7037. }
  7038. /*
  7039. * RX block receive urgent interrupt. Source is < 160.
  7040. */
  7041. static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
  7042. {
  7043. struct hfi1_ctxtdata *rcd;
  7044. char *err_detail;
  7045. if (likely(source < dd->num_rcv_contexts)) {
  7046. rcd = dd->rcd[source];
  7047. if (rcd) {
  7048. /* only pay attention to user urgent interrupts */
  7049. if (source >= dd->first_user_ctxt)
  7050. handle_user_interrupt(rcd);
  7051. return; /* OK */
  7052. }
  7053. /* received an interrupt, but no rcd */
  7054. err_detail = "dataless";
  7055. } else {
  7056. /* received an interrupt, but are not using that context */
  7057. err_detail = "out of range";
  7058. }
  7059. dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
  7060. err_detail, source);
  7061. }
  7062. /*
  7063. * Reserved range interrupt. Should not be called in normal operation.
  7064. */
  7065. static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
  7066. {
  7067. char name[64];
  7068. dd_dev_err(dd, "unexpected %s interrupt\n",
  7069. is_reserved_name(name, sizeof(name), source));
  7070. }
  7071. static const struct is_table is_table[] = {
  7072. /*
  7073. * start end
  7074. * name func interrupt func
  7075. */
  7076. { IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
  7077. is_misc_err_name, is_misc_err_int },
  7078. { IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
  7079. is_sdma_eng_err_name, is_sdma_eng_err_int },
  7080. { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
  7081. is_sendctxt_err_name, is_sendctxt_err_int },
  7082. { IS_SDMA_START, IS_SDMA_END,
  7083. is_sdma_eng_name, is_sdma_eng_int },
  7084. { IS_VARIOUS_START, IS_VARIOUS_END,
  7085. is_various_name, is_various_int },
  7086. { IS_DC_START, IS_DC_END,
  7087. is_dc_name, is_dc_int },
  7088. { IS_RCVAVAIL_START, IS_RCVAVAIL_END,
  7089. is_rcv_avail_name, is_rcv_avail_int },
  7090. { IS_RCVURGENT_START, IS_RCVURGENT_END,
  7091. is_rcv_urgent_name, is_rcv_urgent_int },
  7092. { IS_SENDCREDIT_START, IS_SENDCREDIT_END,
  7093. is_send_credit_name, is_send_credit_int},
  7094. { IS_RESERVED_START, IS_RESERVED_END,
  7095. is_reserved_name, is_reserved_int},
  7096. };
  7097. /*
  7098. * Interrupt source interrupt - called when the given source has an interrupt.
  7099. * Source is a bit index into an array of 64-bit integers.
  7100. */
  7101. static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
  7102. {
  7103. const struct is_table *entry;
  7104. /* avoids a double compare by walking the table in-order */
  7105. for (entry = &is_table[0]; entry->is_name; entry++) {
  7106. if (source < entry->end) {
  7107. trace_hfi1_interrupt(dd, entry, source);
  7108. entry->is_int(dd, source - entry->start);
  7109. return;
  7110. }
  7111. }
  7112. /* fell off the end */
  7113. dd_dev_err(dd, "invalid interrupt source %u\n", source);
  7114. }
  7115. /*
  7116. * General interrupt handler. This is able to correctly handle
  7117. * all interrupts in case INTx is used.
  7118. */
  7119. static irqreturn_t general_interrupt(int irq, void *data)
  7120. {
  7121. struct hfi1_devdata *dd = data;
  7122. u64 regs[CCE_NUM_INT_CSRS];
  7123. u32 bit;
  7124. int i;
  7125. this_cpu_inc(*dd->int_counter);
  7126. /* phase 1: scan and clear all handled interrupts */
  7127. for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
  7128. if (dd->gi_mask[i] == 0) {
  7129. regs[i] = 0; /* used later */
  7130. continue;
  7131. }
  7132. regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
  7133. dd->gi_mask[i];
  7134. /* only clear if anything is set */
  7135. if (regs[i])
  7136. write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
  7137. }
  7138. /* phase 2: call the appropriate handler */
  7139. for_each_set_bit(bit, (unsigned long *)&regs[0],
  7140. CCE_NUM_INT_CSRS * 64) {
  7141. is_interrupt(dd, bit);
  7142. }
  7143. return IRQ_HANDLED;
  7144. }
  7145. static irqreturn_t sdma_interrupt(int irq, void *data)
  7146. {
  7147. struct sdma_engine *sde = data;
  7148. struct hfi1_devdata *dd = sde->dd;
  7149. u64 status;
  7150. #ifdef CONFIG_SDMA_VERBOSITY
  7151. dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
  7152. slashstrip(__FILE__), __LINE__, __func__);
  7153. sdma_dumpstate(sde);
  7154. #endif
  7155. this_cpu_inc(*dd->int_counter);
  7156. /* This read_csr is really bad in the hot path */
  7157. status = read_csr(dd,
  7158. CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
  7159. & sde->imask;
  7160. if (likely(status)) {
  7161. /* clear the interrupt(s) */
  7162. write_csr(dd,
  7163. CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
  7164. status);
  7165. /* handle the interrupt(s) */
  7166. sdma_engine_interrupt(sde, status);
  7167. } else
  7168. dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
  7169. sde->this_idx);
  7170. return IRQ_HANDLED;
  7171. }
  7172. /*
  7173. * Clear the receive interrupt. Use a read of the interrupt clear CSR
  7174. * to insure that the write completed. This does NOT guarantee that
  7175. * queued DMA writes to memory from the chip are pushed.
  7176. */
  7177. static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
  7178. {
  7179. struct hfi1_devdata *dd = rcd->dd;
  7180. u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
  7181. mmiowb(); /* make sure everything before is written */
  7182. write_csr(dd, addr, rcd->imask);
  7183. /* force the above write on the chip and get a value back */
  7184. (void)read_csr(dd, addr);
  7185. }
  7186. /* force the receive interrupt */
  7187. void force_recv_intr(struct hfi1_ctxtdata *rcd)
  7188. {
  7189. write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
  7190. }
  7191. /*
  7192. * Return non-zero if a packet is present.
  7193. *
  7194. * This routine is called when rechecking for packets after the RcvAvail
  7195. * interrupt has been cleared down. First, do a quick check of memory for
  7196. * a packet present. If not found, use an expensive CSR read of the context
  7197. * tail to determine the actual tail. The CSR read is necessary because there
  7198. * is no method to push pending DMAs to memory other than an interrupt and we
  7199. * are trying to determine if we need to force an interrupt.
  7200. */
  7201. static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
  7202. {
  7203. u32 tail;
  7204. int present;
  7205. if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
  7206. present = (rcd->seq_cnt ==
  7207. rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
  7208. else /* is RDMA rtail */
  7209. present = (rcd->head != get_rcvhdrtail(rcd));
  7210. if (present)
  7211. return 1;
  7212. /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
  7213. tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
  7214. return rcd->head != tail;
  7215. }
  7216. /*
  7217. * Receive packet IRQ handler. This routine expects to be on its own IRQ.
  7218. * This routine will try to handle packets immediately (latency), but if
  7219. * it finds too many, it will invoke the thread handler (bandwitdh). The
  7220. * chip receive interrupt is *not* cleared down until this or the thread (if
  7221. * invoked) is finished. The intent is to avoid extra interrupts while we
  7222. * are processing packets anyway.
  7223. */
  7224. static irqreturn_t receive_context_interrupt(int irq, void *data)
  7225. {
  7226. struct hfi1_ctxtdata *rcd = data;
  7227. struct hfi1_devdata *dd = rcd->dd;
  7228. int disposition;
  7229. int present;
  7230. trace_hfi1_receive_interrupt(dd, rcd->ctxt);
  7231. this_cpu_inc(*dd->int_counter);
  7232. aspm_ctx_disable(rcd);
  7233. /* receive interrupt remains blocked while processing packets */
  7234. disposition = rcd->do_interrupt(rcd, 0);
  7235. /*
  7236. * Too many packets were seen while processing packets in this
  7237. * IRQ handler. Invoke the handler thread. The receive interrupt
  7238. * remains blocked.
  7239. */
  7240. if (disposition == RCV_PKT_LIMIT)
  7241. return IRQ_WAKE_THREAD;
  7242. /*
  7243. * The packet processor detected no more packets. Clear the receive
  7244. * interrupt and recheck for a packet packet that may have arrived
  7245. * after the previous check and interrupt clear. If a packet arrived,
  7246. * force another interrupt.
  7247. */
  7248. clear_recv_intr(rcd);
  7249. present = check_packet_present(rcd);
  7250. if (present)
  7251. force_recv_intr(rcd);
  7252. return IRQ_HANDLED;
  7253. }
  7254. /*
  7255. * Receive packet thread handler. This expects to be invoked with the
  7256. * receive interrupt still blocked.
  7257. */
  7258. static irqreturn_t receive_context_thread(int irq, void *data)
  7259. {
  7260. struct hfi1_ctxtdata *rcd = data;
  7261. int present;
  7262. /* receive interrupt is still blocked from the IRQ handler */
  7263. (void)rcd->do_interrupt(rcd, 1);
  7264. /*
  7265. * The packet processor will only return if it detected no more
  7266. * packets. Hold IRQs here so we can safely clear the interrupt and
  7267. * recheck for a packet that may have arrived after the previous
  7268. * check and the interrupt clear. If a packet arrived, force another
  7269. * interrupt.
  7270. */
  7271. local_irq_disable();
  7272. clear_recv_intr(rcd);
  7273. present = check_packet_present(rcd);
  7274. if (present)
  7275. force_recv_intr(rcd);
  7276. local_irq_enable();
  7277. return IRQ_HANDLED;
  7278. }
  7279. /* ========================================================================= */
  7280. u32 read_physical_state(struct hfi1_devdata *dd)
  7281. {
  7282. u64 reg;
  7283. reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
  7284. return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
  7285. & DC_DC8051_STS_CUR_STATE_PORT_MASK;
  7286. }
  7287. u32 read_logical_state(struct hfi1_devdata *dd)
  7288. {
  7289. u64 reg;
  7290. reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
  7291. return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
  7292. & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
  7293. }
  7294. static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
  7295. {
  7296. u64 reg;
  7297. reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
  7298. /* clear current state, set new state */
  7299. reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
  7300. reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
  7301. write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
  7302. }
  7303. /*
  7304. * Use the 8051 to read a LCB CSR.
  7305. */
  7306. static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
  7307. {
  7308. u32 regno;
  7309. int ret;
  7310. if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
  7311. if (acquire_lcb_access(dd, 0) == 0) {
  7312. *data = read_csr(dd, addr);
  7313. release_lcb_access(dd, 0);
  7314. return 0;
  7315. }
  7316. return -EBUSY;
  7317. }
  7318. /* register is an index of LCB registers: (offset - base) / 8 */
  7319. regno = (addr - DC_LCB_CFG_RUN) >> 3;
  7320. ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
  7321. if (ret != HCMD_SUCCESS)
  7322. return -EBUSY;
  7323. return 0;
  7324. }
  7325. /*
  7326. * Read an LCB CSR. Access may not be in host control, so check.
  7327. * Return 0 on success, -EBUSY on failure.
  7328. */
  7329. int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
  7330. {
  7331. struct hfi1_pportdata *ppd = dd->pport;
  7332. /* if up, go through the 8051 for the value */
  7333. if (ppd->host_link_state & HLS_UP)
  7334. return read_lcb_via_8051(dd, addr, data);
  7335. /* if going up or down, no access */
  7336. if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
  7337. return -EBUSY;
  7338. /* otherwise, host has access */
  7339. *data = read_csr(dd, addr);
  7340. return 0;
  7341. }
  7342. /*
  7343. * Use the 8051 to write a LCB CSR.
  7344. */
  7345. static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
  7346. {
  7347. u32 regno;
  7348. int ret;
  7349. if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
  7350. (dd->dc8051_ver < dc8051_ver(0, 20))) {
  7351. if (acquire_lcb_access(dd, 0) == 0) {
  7352. write_csr(dd, addr, data);
  7353. release_lcb_access(dd, 0);
  7354. return 0;
  7355. }
  7356. return -EBUSY;
  7357. }
  7358. /* register is an index of LCB registers: (offset - base) / 8 */
  7359. regno = (addr - DC_LCB_CFG_RUN) >> 3;
  7360. ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
  7361. if (ret != HCMD_SUCCESS)
  7362. return -EBUSY;
  7363. return 0;
  7364. }
  7365. /*
  7366. * Write an LCB CSR. Access may not be in host control, so check.
  7367. * Return 0 on success, -EBUSY on failure.
  7368. */
  7369. int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
  7370. {
  7371. struct hfi1_pportdata *ppd = dd->pport;
  7372. /* if up, go through the 8051 for the value */
  7373. if (ppd->host_link_state & HLS_UP)
  7374. return write_lcb_via_8051(dd, addr, data);
  7375. /* if going up or down, no access */
  7376. if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
  7377. return -EBUSY;
  7378. /* otherwise, host has access */
  7379. write_csr(dd, addr, data);
  7380. return 0;
  7381. }
  7382. /*
  7383. * Returns:
  7384. * < 0 = Linux error, not able to get access
  7385. * > 0 = 8051 command RETURN_CODE
  7386. */
  7387. static int do_8051_command(
  7388. struct hfi1_devdata *dd,
  7389. u32 type,
  7390. u64 in_data,
  7391. u64 *out_data)
  7392. {
  7393. u64 reg, completed;
  7394. int return_code;
  7395. unsigned long flags;
  7396. unsigned long timeout;
  7397. hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
  7398. /*
  7399. * Alternative to holding the lock for a long time:
  7400. * - keep busy wait - have other users bounce off
  7401. */
  7402. spin_lock_irqsave(&dd->dc8051_lock, flags);
  7403. /* We can't send any commands to the 8051 if it's in reset */
  7404. if (dd->dc_shutdown) {
  7405. return_code = -ENODEV;
  7406. goto fail;
  7407. }
  7408. /*
  7409. * If an 8051 host command timed out previously, then the 8051 is
  7410. * stuck.
  7411. *
  7412. * On first timeout, attempt to reset and restart the entire DC
  7413. * block (including 8051). (Is this too big of a hammer?)
  7414. *
  7415. * If the 8051 times out a second time, the reset did not bring it
  7416. * back to healthy life. In that case, fail any subsequent commands.
  7417. */
  7418. if (dd->dc8051_timed_out) {
  7419. if (dd->dc8051_timed_out > 1) {
  7420. dd_dev_err(dd,
  7421. "Previous 8051 host command timed out, skipping command %u\n",
  7422. type);
  7423. return_code = -ENXIO;
  7424. goto fail;
  7425. }
  7426. spin_unlock_irqrestore(&dd->dc8051_lock, flags);
  7427. dc_shutdown(dd);
  7428. dc_start(dd);
  7429. spin_lock_irqsave(&dd->dc8051_lock, flags);
  7430. }
  7431. /*
  7432. * If there is no timeout, then the 8051 command interface is
  7433. * waiting for a command.
  7434. */
  7435. /*
  7436. * When writing a LCB CSR, out_data contains the full value to
  7437. * to be written, while in_data contains the relative LCB
  7438. * address in 7:0. Do the work here, rather than the caller,
  7439. * of distrubting the write data to where it needs to go:
  7440. *
  7441. * Write data
  7442. * 39:00 -> in_data[47:8]
  7443. * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
  7444. * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
  7445. */
  7446. if (type == HCMD_WRITE_LCB_CSR) {
  7447. in_data |= ((*out_data) & 0xffffffffffull) << 8;
  7448. reg = ((((*out_data) >> 40) & 0xff) <<
  7449. DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
  7450. | ((((*out_data) >> 48) & 0xffff) <<
  7451. DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
  7452. write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
  7453. }
  7454. /*
  7455. * Do two writes: the first to stabilize the type and req_data, the
  7456. * second to activate.
  7457. */
  7458. reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
  7459. << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
  7460. | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
  7461. << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
  7462. write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
  7463. reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
  7464. write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
  7465. /* wait for completion, alternate: interrupt */
  7466. timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
  7467. while (1) {
  7468. reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
  7469. completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
  7470. if (completed)
  7471. break;
  7472. if (time_after(jiffies, timeout)) {
  7473. dd->dc8051_timed_out++;
  7474. dd_dev_err(dd, "8051 host command %u timeout\n", type);
  7475. if (out_data)
  7476. *out_data = 0;
  7477. return_code = -ETIMEDOUT;
  7478. goto fail;
  7479. }
  7480. udelay(2);
  7481. }
  7482. if (out_data) {
  7483. *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
  7484. & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
  7485. if (type == HCMD_READ_LCB_CSR) {
  7486. /* top 16 bits are in a different register */
  7487. *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
  7488. & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
  7489. << (48
  7490. - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
  7491. }
  7492. }
  7493. return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
  7494. & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
  7495. dd->dc8051_timed_out = 0;
  7496. /*
  7497. * Clear command for next user.
  7498. */
  7499. write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
  7500. fail:
  7501. spin_unlock_irqrestore(&dd->dc8051_lock, flags);
  7502. return return_code;
  7503. }
  7504. static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
  7505. {
  7506. return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
  7507. }
  7508. int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
  7509. u8 lane_id, u32 config_data)
  7510. {
  7511. u64 data;
  7512. int ret;
  7513. data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
  7514. | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
  7515. | (u64)config_data << LOAD_DATA_DATA_SHIFT;
  7516. ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
  7517. if (ret != HCMD_SUCCESS) {
  7518. dd_dev_err(dd,
  7519. "load 8051 config: field id %d, lane %d, err %d\n",
  7520. (int)field_id, (int)lane_id, ret);
  7521. }
  7522. return ret;
  7523. }
  7524. /*
  7525. * Read the 8051 firmware "registers". Use the RAM directly. Always
  7526. * set the result, even on error.
  7527. * Return 0 on success, -errno on failure
  7528. */
  7529. int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
  7530. u32 *result)
  7531. {
  7532. u64 big_data;
  7533. u32 addr;
  7534. int ret;
  7535. /* address start depends on the lane_id */
  7536. if (lane_id < 4)
  7537. addr = (4 * NUM_GENERAL_FIELDS)
  7538. + (lane_id * 4 * NUM_LANE_FIELDS);
  7539. else
  7540. addr = 0;
  7541. addr += field_id * 4;
  7542. /* read is in 8-byte chunks, hardware will truncate the address down */
  7543. ret = read_8051_data(dd, addr, 8, &big_data);
  7544. if (ret == 0) {
  7545. /* extract the 4 bytes we want */
  7546. if (addr & 0x4)
  7547. *result = (u32)(big_data >> 32);
  7548. else
  7549. *result = (u32)big_data;
  7550. } else {
  7551. *result = 0;
  7552. dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
  7553. __func__, lane_id, field_id);
  7554. }
  7555. return ret;
  7556. }
  7557. static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
  7558. u8 continuous)
  7559. {
  7560. u32 frame;
  7561. frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
  7562. | power_management << POWER_MANAGEMENT_SHIFT;
  7563. return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
  7564. GENERAL_CONFIG, frame);
  7565. }
  7566. static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
  7567. u16 vl15buf, u8 crc_sizes)
  7568. {
  7569. u32 frame;
  7570. frame = (u32)vau << VAU_SHIFT
  7571. | (u32)z << Z_SHIFT
  7572. | (u32)vcu << VCU_SHIFT
  7573. | (u32)vl15buf << VL15BUF_SHIFT
  7574. | (u32)crc_sizes << CRC_SIZES_SHIFT;
  7575. return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
  7576. GENERAL_CONFIG, frame);
  7577. }
  7578. static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
  7579. u8 *flag_bits, u16 *link_widths)
  7580. {
  7581. u32 frame;
  7582. read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
  7583. &frame);
  7584. *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
  7585. *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
  7586. *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
  7587. }
  7588. static int write_vc_local_link_width(struct hfi1_devdata *dd,
  7589. u8 misc_bits,
  7590. u8 flag_bits,
  7591. u16 link_widths)
  7592. {
  7593. u32 frame;
  7594. frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
  7595. | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
  7596. | (u32)link_widths << LINK_WIDTH_SHIFT;
  7597. return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
  7598. frame);
  7599. }
  7600. static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
  7601. u8 device_rev)
  7602. {
  7603. u32 frame;
  7604. frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
  7605. | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
  7606. return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
  7607. }
  7608. static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
  7609. u8 *device_rev)
  7610. {
  7611. u32 frame;
  7612. read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
  7613. *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
  7614. *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
  7615. & REMOTE_DEVICE_REV_MASK;
  7616. }
  7617. void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
  7618. {
  7619. u32 frame;
  7620. read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
  7621. *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
  7622. *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
  7623. }
  7624. static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
  7625. u8 *continuous)
  7626. {
  7627. u32 frame;
  7628. read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
  7629. *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
  7630. & POWER_MANAGEMENT_MASK;
  7631. *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
  7632. & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
  7633. }
  7634. static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
  7635. u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
  7636. {
  7637. u32 frame;
  7638. read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
  7639. *vau = (frame >> VAU_SHIFT) & VAU_MASK;
  7640. *z = (frame >> Z_SHIFT) & Z_MASK;
  7641. *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
  7642. *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
  7643. *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
  7644. }
  7645. static void read_vc_remote_link_width(struct hfi1_devdata *dd,
  7646. u8 *remote_tx_rate,
  7647. u16 *link_widths)
  7648. {
  7649. u32 frame;
  7650. read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
  7651. &frame);
  7652. *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
  7653. & REMOTE_TX_RATE_MASK;
  7654. *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
  7655. }
  7656. static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
  7657. {
  7658. u32 frame;
  7659. read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
  7660. *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
  7661. }
  7662. static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
  7663. {
  7664. u32 frame;
  7665. read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
  7666. *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
  7667. }
  7668. static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
  7669. {
  7670. read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
  7671. }
  7672. static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
  7673. {
  7674. read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
  7675. }
  7676. void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
  7677. {
  7678. u32 frame;
  7679. int ret;
  7680. *link_quality = 0;
  7681. if (dd->pport->host_link_state & HLS_UP) {
  7682. ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
  7683. &frame);
  7684. if (ret == 0)
  7685. *link_quality = (frame >> LINK_QUALITY_SHIFT)
  7686. & LINK_QUALITY_MASK;
  7687. }
  7688. }
  7689. static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
  7690. {
  7691. u32 frame;
  7692. read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
  7693. *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
  7694. }
  7695. static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
  7696. {
  7697. u32 frame;
  7698. read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
  7699. *ldr = (frame & 0xff);
  7700. }
  7701. static int read_tx_settings(struct hfi1_devdata *dd,
  7702. u8 *enable_lane_tx,
  7703. u8 *tx_polarity_inversion,
  7704. u8 *rx_polarity_inversion,
  7705. u8 *max_rate)
  7706. {
  7707. u32 frame;
  7708. int ret;
  7709. ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
  7710. *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
  7711. & ENABLE_LANE_TX_MASK;
  7712. *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
  7713. & TX_POLARITY_INVERSION_MASK;
  7714. *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
  7715. & RX_POLARITY_INVERSION_MASK;
  7716. *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
  7717. return ret;
  7718. }
  7719. static int write_tx_settings(struct hfi1_devdata *dd,
  7720. u8 enable_lane_tx,
  7721. u8 tx_polarity_inversion,
  7722. u8 rx_polarity_inversion,
  7723. u8 max_rate)
  7724. {
  7725. u32 frame;
  7726. /* no need to mask, all variable sizes match field widths */
  7727. frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
  7728. | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
  7729. | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
  7730. | max_rate << MAX_RATE_SHIFT;
  7731. return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
  7732. }
  7733. /*
  7734. * Read an idle LCB message.
  7735. *
  7736. * Returns 0 on success, -EINVAL on error
  7737. */
  7738. static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
  7739. {
  7740. int ret;
  7741. ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
  7742. if (ret != HCMD_SUCCESS) {
  7743. dd_dev_err(dd, "read idle message: type %d, err %d\n",
  7744. (u32)type, ret);
  7745. return -EINVAL;
  7746. }
  7747. dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
  7748. /* return only the payload as we already know the type */
  7749. *data_out >>= IDLE_PAYLOAD_SHIFT;
  7750. return 0;
  7751. }
  7752. /*
  7753. * Read an idle SMA message. To be done in response to a notification from
  7754. * the 8051.
  7755. *
  7756. * Returns 0 on success, -EINVAL on error
  7757. */
  7758. static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
  7759. {
  7760. return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
  7761. data);
  7762. }
  7763. /*
  7764. * Send an idle LCB message.
  7765. *
  7766. * Returns 0 on success, -EINVAL on error
  7767. */
  7768. static int send_idle_message(struct hfi1_devdata *dd, u64 data)
  7769. {
  7770. int ret;
  7771. dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
  7772. ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
  7773. if (ret != HCMD_SUCCESS) {
  7774. dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
  7775. data, ret);
  7776. return -EINVAL;
  7777. }
  7778. return 0;
  7779. }
  7780. /*
  7781. * Send an idle SMA message.
  7782. *
  7783. * Returns 0 on success, -EINVAL on error
  7784. */
  7785. int send_idle_sma(struct hfi1_devdata *dd, u64 message)
  7786. {
  7787. u64 data;
  7788. data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
  7789. ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
  7790. return send_idle_message(dd, data);
  7791. }
  7792. /*
  7793. * Initialize the LCB then do a quick link up. This may or may not be
  7794. * in loopback.
  7795. *
  7796. * return 0 on success, -errno on error
  7797. */
  7798. static int do_quick_linkup(struct hfi1_devdata *dd)
  7799. {
  7800. u64 reg;
  7801. unsigned long timeout;
  7802. int ret;
  7803. lcb_shutdown(dd, 0);
  7804. if (loopback) {
  7805. /* LCB_CFG_LOOPBACK.VAL = 2 */
  7806. /* LCB_CFG_LANE_WIDTH.VAL = 0 */
  7807. write_csr(dd, DC_LCB_CFG_LOOPBACK,
  7808. IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
  7809. write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
  7810. }
  7811. /* start the LCBs */
  7812. /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
  7813. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
  7814. /* simulator only loopback steps */
  7815. if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
  7816. /* LCB_CFG_RUN.EN = 1 */
  7817. write_csr(dd, DC_LCB_CFG_RUN,
  7818. 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
  7819. /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
  7820. timeout = jiffies + msecs_to_jiffies(10);
  7821. while (1) {
  7822. reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
  7823. if (reg)
  7824. break;
  7825. if (time_after(jiffies, timeout)) {
  7826. dd_dev_err(dd,
  7827. "timeout waiting for LINK_TRANSFER_ACTIVE\n");
  7828. return -ETIMEDOUT;
  7829. }
  7830. udelay(2);
  7831. }
  7832. write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
  7833. 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
  7834. }
  7835. if (!loopback) {
  7836. /*
  7837. * When doing quick linkup and not in loopback, both
  7838. * sides must be done with LCB set-up before either
  7839. * starts the quick linkup. Put a delay here so that
  7840. * both sides can be started and have a chance to be
  7841. * done with LCB set up before resuming.
  7842. */
  7843. dd_dev_err(dd,
  7844. "Pausing for peer to be finished with LCB set up\n");
  7845. msleep(5000);
  7846. dd_dev_err(dd, "Continuing with quick linkup\n");
  7847. }
  7848. write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
  7849. set_8051_lcb_access(dd);
  7850. /*
  7851. * State "quick" LinkUp request sets the physical link state to
  7852. * LinkUp without a verify capability sequence.
  7853. * This state is in simulator v37 and later.
  7854. */
  7855. ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
  7856. if (ret != HCMD_SUCCESS) {
  7857. dd_dev_err(dd,
  7858. "%s: set physical link state to quick LinkUp failed with return %d\n",
  7859. __func__, ret);
  7860. set_host_lcb_access(dd);
  7861. write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
  7862. if (ret >= 0)
  7863. ret = -EINVAL;
  7864. return ret;
  7865. }
  7866. return 0; /* success */
  7867. }
  7868. /*
  7869. * Set the SerDes to internal loopback mode.
  7870. * Returns 0 on success, -errno on error.
  7871. */
  7872. static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
  7873. {
  7874. int ret;
  7875. ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
  7876. if (ret == HCMD_SUCCESS)
  7877. return 0;
  7878. dd_dev_err(dd,
  7879. "Set physical link state to SerDes Loopback failed with return %d\n",
  7880. ret);
  7881. if (ret >= 0)
  7882. ret = -EINVAL;
  7883. return ret;
  7884. }
  7885. /*
  7886. * Do all special steps to set up loopback.
  7887. */
  7888. static int init_loopback(struct hfi1_devdata *dd)
  7889. {
  7890. dd_dev_info(dd, "Entering loopback mode\n");
  7891. /* all loopbacks should disable self GUID check */
  7892. write_csr(dd, DC_DC8051_CFG_MODE,
  7893. (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
  7894. /*
  7895. * The simulator has only one loopback option - LCB. Switch
  7896. * to that option, which includes quick link up.
  7897. *
  7898. * Accept all valid loopback values.
  7899. */
  7900. if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
  7901. (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
  7902. loopback == LOOPBACK_CABLE)) {
  7903. loopback = LOOPBACK_LCB;
  7904. quick_linkup = 1;
  7905. return 0;
  7906. }
  7907. /* handle serdes loopback */
  7908. if (loopback == LOOPBACK_SERDES) {
  7909. /* internal serdes loopack needs quick linkup on RTL */
  7910. if (dd->icode == ICODE_RTL_SILICON)
  7911. quick_linkup = 1;
  7912. return set_serdes_loopback_mode(dd);
  7913. }
  7914. /* LCB loopback - handled at poll time */
  7915. if (loopback == LOOPBACK_LCB) {
  7916. quick_linkup = 1; /* LCB is always quick linkup */
  7917. /* not supported in emulation due to emulation RTL changes */
  7918. if (dd->icode == ICODE_FPGA_EMULATION) {
  7919. dd_dev_err(dd,
  7920. "LCB loopback not supported in emulation\n");
  7921. return -EINVAL;
  7922. }
  7923. return 0;
  7924. }
  7925. /* external cable loopback requires no extra steps */
  7926. if (loopback == LOOPBACK_CABLE)
  7927. return 0;
  7928. dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
  7929. return -EINVAL;
  7930. }
  7931. /*
  7932. * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
  7933. * used in the Verify Capability link width attribute.
  7934. */
  7935. static u16 opa_to_vc_link_widths(u16 opa_widths)
  7936. {
  7937. int i;
  7938. u16 result = 0;
  7939. static const struct link_bits {
  7940. u16 from;
  7941. u16 to;
  7942. } opa_link_xlate[] = {
  7943. { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
  7944. { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
  7945. { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
  7946. { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
  7947. };
  7948. for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
  7949. if (opa_widths & opa_link_xlate[i].from)
  7950. result |= opa_link_xlate[i].to;
  7951. }
  7952. return result;
  7953. }
  7954. /*
  7955. * Set link attributes before moving to polling.
  7956. */
  7957. static int set_local_link_attributes(struct hfi1_pportdata *ppd)
  7958. {
  7959. struct hfi1_devdata *dd = ppd->dd;
  7960. u8 enable_lane_tx;
  7961. u8 tx_polarity_inversion;
  7962. u8 rx_polarity_inversion;
  7963. int ret;
  7964. /* reset our fabric serdes to clear any lingering problems */
  7965. fabric_serdes_reset(dd);
  7966. /* set the local tx rate - need to read-modify-write */
  7967. ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
  7968. &rx_polarity_inversion, &ppd->local_tx_rate);
  7969. if (ret)
  7970. goto set_local_link_attributes_fail;
  7971. if (dd->dc8051_ver < dc8051_ver(0, 20)) {
  7972. /* set the tx rate to the fastest enabled */
  7973. if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
  7974. ppd->local_tx_rate = 1;
  7975. else
  7976. ppd->local_tx_rate = 0;
  7977. } else {
  7978. /* set the tx rate to all enabled */
  7979. ppd->local_tx_rate = 0;
  7980. if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
  7981. ppd->local_tx_rate |= 2;
  7982. if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
  7983. ppd->local_tx_rate |= 1;
  7984. }
  7985. enable_lane_tx = 0xF; /* enable all four lanes */
  7986. ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
  7987. rx_polarity_inversion, ppd->local_tx_rate);
  7988. if (ret != HCMD_SUCCESS)
  7989. goto set_local_link_attributes_fail;
  7990. /*
  7991. * DC supports continuous updates.
  7992. */
  7993. ret = write_vc_local_phy(dd,
  7994. 0 /* no power management */,
  7995. 1 /* continuous updates */);
  7996. if (ret != HCMD_SUCCESS)
  7997. goto set_local_link_attributes_fail;
  7998. /* z=1 in the next call: AU of 0 is not supported by the hardware */
  7999. ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
  8000. ppd->port_crc_mode_enabled);
  8001. if (ret != HCMD_SUCCESS)
  8002. goto set_local_link_attributes_fail;
  8003. ret = write_vc_local_link_width(dd, 0, 0,
  8004. opa_to_vc_link_widths(
  8005. ppd->link_width_enabled));
  8006. if (ret != HCMD_SUCCESS)
  8007. goto set_local_link_attributes_fail;
  8008. /* let peer know who we are */
  8009. ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
  8010. if (ret == HCMD_SUCCESS)
  8011. return 0;
  8012. set_local_link_attributes_fail:
  8013. dd_dev_err(dd,
  8014. "Failed to set local link attributes, return 0x%x\n",
  8015. ret);
  8016. return ret;
  8017. }
  8018. /*
  8019. * Call this to start the link.
  8020. * Do not do anything if the link is disabled.
  8021. * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
  8022. */
  8023. int start_link(struct hfi1_pportdata *ppd)
  8024. {
  8025. if (!ppd->link_enabled) {
  8026. dd_dev_info(ppd->dd,
  8027. "%s: stopping link start because link is disabled\n",
  8028. __func__);
  8029. return 0;
  8030. }
  8031. if (!ppd->driver_link_ready) {
  8032. dd_dev_info(ppd->dd,
  8033. "%s: stopping link start because driver is not ready\n",
  8034. __func__);
  8035. return 0;
  8036. }
  8037. /*
  8038. * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
  8039. * pkey table can be configured properly if the HFI unit is connected
  8040. * to switch port with MgmtAllowed=NO
  8041. */
  8042. clear_full_mgmt_pkey(ppd);
  8043. return set_link_state(ppd, HLS_DN_POLL);
  8044. }
  8045. static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
  8046. {
  8047. struct hfi1_devdata *dd = ppd->dd;
  8048. u64 mask;
  8049. unsigned long timeout;
  8050. /*
  8051. * Some QSFP cables have a quirk that asserts the IntN line as a side
  8052. * effect of power up on plug-in. We ignore this false positive
  8053. * interrupt until the module has finished powering up by waiting for
  8054. * a minimum timeout of the module inrush initialization time of
  8055. * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
  8056. * module have stabilized.
  8057. */
  8058. msleep(500);
  8059. /*
  8060. * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
  8061. */
  8062. timeout = jiffies + msecs_to_jiffies(2000);
  8063. while (1) {
  8064. mask = read_csr(dd, dd->hfi1_id ?
  8065. ASIC_QSFP2_IN : ASIC_QSFP1_IN);
  8066. if (!(mask & QSFP_HFI0_INT_N))
  8067. break;
  8068. if (time_after(jiffies, timeout)) {
  8069. dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
  8070. __func__);
  8071. break;
  8072. }
  8073. udelay(2);
  8074. }
  8075. }
  8076. static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
  8077. {
  8078. struct hfi1_devdata *dd = ppd->dd;
  8079. u64 mask;
  8080. mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
  8081. if (enable) {
  8082. /*
  8083. * Clear the status register to avoid an immediate interrupt
  8084. * when we re-enable the IntN pin
  8085. */
  8086. write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
  8087. QSFP_HFI0_INT_N);
  8088. mask |= (u64)QSFP_HFI0_INT_N;
  8089. } else {
  8090. mask &= ~(u64)QSFP_HFI0_INT_N;
  8091. }
  8092. write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
  8093. }
  8094. void reset_qsfp(struct hfi1_pportdata *ppd)
  8095. {
  8096. struct hfi1_devdata *dd = ppd->dd;
  8097. u64 mask, qsfp_mask;
  8098. /* Disable INT_N from triggering QSFP interrupts */
  8099. set_qsfp_int_n(ppd, 0);
  8100. /* Reset the QSFP */
  8101. mask = (u64)QSFP_HFI0_RESET_N;
  8102. qsfp_mask = read_csr(dd,
  8103. dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
  8104. qsfp_mask &= ~mask;
  8105. write_csr(dd,
  8106. dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
  8107. udelay(10);
  8108. qsfp_mask |= mask;
  8109. write_csr(dd,
  8110. dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
  8111. wait_for_qsfp_init(ppd);
  8112. /*
  8113. * Allow INT_N to trigger the QSFP interrupt to watch
  8114. * for alarms and warnings
  8115. */
  8116. set_qsfp_int_n(ppd, 1);
  8117. }
  8118. static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
  8119. u8 *qsfp_interrupt_status)
  8120. {
  8121. struct hfi1_devdata *dd = ppd->dd;
  8122. if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
  8123. (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
  8124. dd_dev_info(dd, "%s: QSFP cable on fire\n",
  8125. __func__);
  8126. if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
  8127. (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
  8128. dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
  8129. __func__);
  8130. /*
  8131. * The remaining alarms/warnings don't matter if the link is down.
  8132. */
  8133. if (ppd->host_link_state & HLS_DOWN)
  8134. return 0;
  8135. if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
  8136. (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
  8137. dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
  8138. __func__);
  8139. if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
  8140. (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
  8141. dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
  8142. __func__);
  8143. /* Byte 2 is vendor specific */
  8144. if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
  8145. (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
  8146. dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
  8147. __func__);
  8148. if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
  8149. (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
  8150. dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
  8151. __func__);
  8152. if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
  8153. (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
  8154. dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
  8155. __func__);
  8156. if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
  8157. (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
  8158. dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
  8159. __func__);
  8160. if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
  8161. (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
  8162. dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
  8163. __func__);
  8164. if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
  8165. (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
  8166. dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
  8167. __func__);
  8168. if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
  8169. (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
  8170. dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
  8171. __func__);
  8172. if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
  8173. (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
  8174. dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
  8175. __func__);
  8176. if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
  8177. (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
  8178. dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
  8179. __func__);
  8180. if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
  8181. (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
  8182. dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
  8183. __func__);
  8184. if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
  8185. (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
  8186. dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
  8187. __func__);
  8188. if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
  8189. (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
  8190. dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
  8191. __func__);
  8192. /* Bytes 9-10 and 11-12 are reserved */
  8193. /* Bytes 13-15 are vendor specific */
  8194. return 0;
  8195. }
  8196. /* This routine will only be scheduled if the QSFP module present is asserted */
  8197. void qsfp_event(struct work_struct *work)
  8198. {
  8199. struct qsfp_data *qd;
  8200. struct hfi1_pportdata *ppd;
  8201. struct hfi1_devdata *dd;
  8202. qd = container_of(work, struct qsfp_data, qsfp_work);
  8203. ppd = qd->ppd;
  8204. dd = ppd->dd;
  8205. /* Sanity check */
  8206. if (!qsfp_mod_present(ppd))
  8207. return;
  8208. /*
  8209. * Turn DC back on after cable has been re-inserted. Up until
  8210. * now, the DC has been in reset to save power.
  8211. */
  8212. dc_start(dd);
  8213. if (qd->cache_refresh_required) {
  8214. set_qsfp_int_n(ppd, 0);
  8215. wait_for_qsfp_init(ppd);
  8216. /*
  8217. * Allow INT_N to trigger the QSFP interrupt to watch
  8218. * for alarms and warnings
  8219. */
  8220. set_qsfp_int_n(ppd, 1);
  8221. tune_serdes(ppd);
  8222. start_link(ppd);
  8223. }
  8224. if (qd->check_interrupt_flags) {
  8225. u8 qsfp_interrupt_status[16] = {0,};
  8226. if (one_qsfp_read(ppd, dd->hfi1_id, 6,
  8227. &qsfp_interrupt_status[0], 16) != 16) {
  8228. dd_dev_info(dd,
  8229. "%s: Failed to read status of QSFP module\n",
  8230. __func__);
  8231. } else {
  8232. unsigned long flags;
  8233. handle_qsfp_error_conditions(
  8234. ppd, qsfp_interrupt_status);
  8235. spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
  8236. ppd->qsfp_info.check_interrupt_flags = 0;
  8237. spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
  8238. flags);
  8239. }
  8240. }
  8241. }
  8242. static void init_qsfp_int(struct hfi1_devdata *dd)
  8243. {
  8244. struct hfi1_pportdata *ppd = dd->pport;
  8245. u64 qsfp_mask, cce_int_mask;
  8246. const int qsfp1_int_smask = QSFP1_INT % 64;
  8247. const int qsfp2_int_smask = QSFP2_INT % 64;
  8248. /*
  8249. * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
  8250. * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
  8251. * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
  8252. * the index of the appropriate CSR in the CCEIntMask CSR array
  8253. */
  8254. cce_int_mask = read_csr(dd, CCE_INT_MASK +
  8255. (8 * (QSFP1_INT / 64)));
  8256. if (dd->hfi1_id) {
  8257. cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
  8258. write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
  8259. cce_int_mask);
  8260. } else {
  8261. cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
  8262. write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
  8263. cce_int_mask);
  8264. }
  8265. qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
  8266. /* Clear current status to avoid spurious interrupts */
  8267. write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
  8268. qsfp_mask);
  8269. write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
  8270. qsfp_mask);
  8271. set_qsfp_int_n(ppd, 0);
  8272. /* Handle active low nature of INT_N and MODPRST_N pins */
  8273. if (qsfp_mod_present(ppd))
  8274. qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
  8275. write_csr(dd,
  8276. dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
  8277. qsfp_mask);
  8278. }
  8279. /*
  8280. * Do a one-time initialize of the LCB block.
  8281. */
  8282. static void init_lcb(struct hfi1_devdata *dd)
  8283. {
  8284. /* simulator does not correctly handle LCB cclk loopback, skip */
  8285. if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
  8286. return;
  8287. /* the DC has been reset earlier in the driver load */
  8288. /* set LCB for cclk loopback on the port */
  8289. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
  8290. write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
  8291. write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
  8292. write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
  8293. write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
  8294. write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
  8295. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
  8296. }
  8297. int bringup_serdes(struct hfi1_pportdata *ppd)
  8298. {
  8299. struct hfi1_devdata *dd = ppd->dd;
  8300. u64 guid;
  8301. int ret;
  8302. if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
  8303. add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
  8304. guid = ppd->guid;
  8305. if (!guid) {
  8306. if (dd->base_guid)
  8307. guid = dd->base_guid + ppd->port - 1;
  8308. ppd->guid = guid;
  8309. }
  8310. /* Set linkinit_reason on power up per OPA spec */
  8311. ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
  8312. /* one-time init of the LCB */
  8313. init_lcb(dd);
  8314. if (loopback) {
  8315. ret = init_loopback(dd);
  8316. if (ret < 0)
  8317. return ret;
  8318. }
  8319. get_port_type(ppd);
  8320. if (ppd->port_type == PORT_TYPE_QSFP) {
  8321. set_qsfp_int_n(ppd, 0);
  8322. wait_for_qsfp_init(ppd);
  8323. set_qsfp_int_n(ppd, 1);
  8324. }
  8325. /*
  8326. * Tune the SerDes to a ballpark setting for
  8327. * optimal signal and bit error rate
  8328. * Needs to be done before starting the link
  8329. */
  8330. tune_serdes(ppd);
  8331. return start_link(ppd);
  8332. }
  8333. void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
  8334. {
  8335. struct hfi1_devdata *dd = ppd->dd;
  8336. /*
  8337. * Shut down the link and keep it down. First turn off that the
  8338. * driver wants to allow the link to be up (driver_link_ready).
  8339. * Then make sure the link is not automatically restarted
  8340. * (link_enabled). Cancel any pending restart. And finally
  8341. * go offline.
  8342. */
  8343. ppd->driver_link_ready = 0;
  8344. ppd->link_enabled = 0;
  8345. ppd->offline_disabled_reason =
  8346. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
  8347. set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
  8348. OPA_LINKDOWN_REASON_SMA_DISABLED);
  8349. set_link_state(ppd, HLS_DN_OFFLINE);
  8350. /* disable the port */
  8351. clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
  8352. }
  8353. static inline int init_cpu_counters(struct hfi1_devdata *dd)
  8354. {
  8355. struct hfi1_pportdata *ppd;
  8356. int i;
  8357. ppd = (struct hfi1_pportdata *)(dd + 1);
  8358. for (i = 0; i < dd->num_pports; i++, ppd++) {
  8359. ppd->ibport_data.rvp.rc_acks = NULL;
  8360. ppd->ibport_data.rvp.rc_qacks = NULL;
  8361. ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
  8362. ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
  8363. ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
  8364. if (!ppd->ibport_data.rvp.rc_acks ||
  8365. !ppd->ibport_data.rvp.rc_delayed_comp ||
  8366. !ppd->ibport_data.rvp.rc_qacks)
  8367. return -ENOMEM;
  8368. }
  8369. return 0;
  8370. }
  8371. static const char * const pt_names[] = {
  8372. "expected",
  8373. "eager",
  8374. "invalid"
  8375. };
  8376. static const char *pt_name(u32 type)
  8377. {
  8378. return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
  8379. }
  8380. /*
  8381. * index is the index into the receive array
  8382. */
  8383. void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
  8384. u32 type, unsigned long pa, u16 order)
  8385. {
  8386. u64 reg;
  8387. void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
  8388. (dd->kregbase + RCV_ARRAY));
  8389. if (!(dd->flags & HFI1_PRESENT))
  8390. goto done;
  8391. if (type == PT_INVALID) {
  8392. pa = 0;
  8393. } else if (type > PT_INVALID) {
  8394. dd_dev_err(dd,
  8395. "unexpected receive array type %u for index %u, not handled\n",
  8396. type, index);
  8397. goto done;
  8398. }
  8399. hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
  8400. pt_name(type), index, pa, (unsigned long)order);
  8401. #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
  8402. reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
  8403. | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
  8404. | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
  8405. << RCV_ARRAY_RT_ADDR_SHIFT;
  8406. writeq(reg, base + (index * 8));
  8407. if (type == PT_EAGER)
  8408. /*
  8409. * Eager entries are written one-by-one so we have to push them
  8410. * after we write the entry.
  8411. */
  8412. flush_wc();
  8413. done:
  8414. return;
  8415. }
  8416. void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
  8417. {
  8418. struct hfi1_devdata *dd = rcd->dd;
  8419. u32 i;
  8420. /* this could be optimized */
  8421. for (i = rcd->eager_base; i < rcd->eager_base +
  8422. rcd->egrbufs.alloced; i++)
  8423. hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
  8424. for (i = rcd->expected_base;
  8425. i < rcd->expected_base + rcd->expected_count; i++)
  8426. hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
  8427. }
  8428. struct hfi1_message_header *hfi1_get_msgheader(
  8429. struct hfi1_devdata *dd, __le32 *rhf_addr)
  8430. {
  8431. u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
  8432. return (struct hfi1_message_header *)
  8433. (rhf_addr - dd->rhf_offset + offset);
  8434. }
  8435. static const char * const ib_cfg_name_strings[] = {
  8436. "HFI1_IB_CFG_LIDLMC",
  8437. "HFI1_IB_CFG_LWID_DG_ENB",
  8438. "HFI1_IB_CFG_LWID_ENB",
  8439. "HFI1_IB_CFG_LWID",
  8440. "HFI1_IB_CFG_SPD_ENB",
  8441. "HFI1_IB_CFG_SPD",
  8442. "HFI1_IB_CFG_RXPOL_ENB",
  8443. "HFI1_IB_CFG_LREV_ENB",
  8444. "HFI1_IB_CFG_LINKLATENCY",
  8445. "HFI1_IB_CFG_HRTBT",
  8446. "HFI1_IB_CFG_OP_VLS",
  8447. "HFI1_IB_CFG_VL_HIGH_CAP",
  8448. "HFI1_IB_CFG_VL_LOW_CAP",
  8449. "HFI1_IB_CFG_OVERRUN_THRESH",
  8450. "HFI1_IB_CFG_PHYERR_THRESH",
  8451. "HFI1_IB_CFG_LINKDEFAULT",
  8452. "HFI1_IB_CFG_PKEYS",
  8453. "HFI1_IB_CFG_MTU",
  8454. "HFI1_IB_CFG_LSTATE",
  8455. "HFI1_IB_CFG_VL_HIGH_LIMIT",
  8456. "HFI1_IB_CFG_PMA_TICKS",
  8457. "HFI1_IB_CFG_PORT"
  8458. };
  8459. static const char *ib_cfg_name(int which)
  8460. {
  8461. if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
  8462. return "invalid";
  8463. return ib_cfg_name_strings[which];
  8464. }
  8465. int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
  8466. {
  8467. struct hfi1_devdata *dd = ppd->dd;
  8468. int val = 0;
  8469. switch (which) {
  8470. case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
  8471. val = ppd->link_width_enabled;
  8472. break;
  8473. case HFI1_IB_CFG_LWID: /* currently active Link-width */
  8474. val = ppd->link_width_active;
  8475. break;
  8476. case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
  8477. val = ppd->link_speed_enabled;
  8478. break;
  8479. case HFI1_IB_CFG_SPD: /* current Link speed */
  8480. val = ppd->link_speed_active;
  8481. break;
  8482. case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
  8483. case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
  8484. case HFI1_IB_CFG_LINKLATENCY:
  8485. goto unimplemented;
  8486. case HFI1_IB_CFG_OP_VLS:
  8487. val = ppd->vls_operational;
  8488. break;
  8489. case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
  8490. val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
  8491. break;
  8492. case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
  8493. val = VL_ARB_LOW_PRIO_TABLE_SIZE;
  8494. break;
  8495. case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
  8496. val = ppd->overrun_threshold;
  8497. break;
  8498. case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
  8499. val = ppd->phy_error_threshold;
  8500. break;
  8501. case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
  8502. val = dd->link_default;
  8503. break;
  8504. case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
  8505. case HFI1_IB_CFG_PMA_TICKS:
  8506. default:
  8507. unimplemented:
  8508. if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
  8509. dd_dev_info(
  8510. dd,
  8511. "%s: which %s: not implemented\n",
  8512. __func__,
  8513. ib_cfg_name(which));
  8514. break;
  8515. }
  8516. return val;
  8517. }
  8518. /*
  8519. * The largest MAD packet size.
  8520. */
  8521. #define MAX_MAD_PACKET 2048
  8522. /*
  8523. * Return the maximum header bytes that can go on the _wire_
  8524. * for this device. This count includes the ICRC which is
  8525. * not part of the packet held in memory but it is appended
  8526. * by the HW.
  8527. * This is dependent on the device's receive header entry size.
  8528. * HFI allows this to be set per-receive context, but the
  8529. * driver presently enforces a global value.
  8530. */
  8531. u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
  8532. {
  8533. /*
  8534. * The maximum non-payload (MTU) bytes in LRH.PktLen are
  8535. * the Receive Header Entry Size minus the PBC (or RHF) size
  8536. * plus one DW for the ICRC appended by HW.
  8537. *
  8538. * dd->rcd[0].rcvhdrqentsize is in DW.
  8539. * We use rcd[0] as all context will have the same value. Also,
  8540. * the first kernel context would have been allocated by now so
  8541. * we are guaranteed a valid value.
  8542. */
  8543. return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
  8544. }
  8545. /*
  8546. * Set Send Length
  8547. * @ppd - per port data
  8548. *
  8549. * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
  8550. * registers compare against LRH.PktLen, so use the max bytes included
  8551. * in the LRH.
  8552. *
  8553. * This routine changes all VL values except VL15, which it maintains at
  8554. * the same value.
  8555. */
  8556. static void set_send_length(struct hfi1_pportdata *ppd)
  8557. {
  8558. struct hfi1_devdata *dd = ppd->dd;
  8559. u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
  8560. u32 maxvlmtu = dd->vld[15].mtu;
  8561. u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
  8562. & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
  8563. SEND_LEN_CHECK1_LEN_VL15_SHIFT;
  8564. int i, j;
  8565. u32 thres;
  8566. for (i = 0; i < ppd->vls_supported; i++) {
  8567. if (dd->vld[i].mtu > maxvlmtu)
  8568. maxvlmtu = dd->vld[i].mtu;
  8569. if (i <= 3)
  8570. len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
  8571. & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
  8572. ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
  8573. else
  8574. len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
  8575. & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
  8576. ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
  8577. }
  8578. write_csr(dd, SEND_LEN_CHECK0, len1);
  8579. write_csr(dd, SEND_LEN_CHECK1, len2);
  8580. /* adjust kernel credit return thresholds based on new MTUs */
  8581. /* all kernel receive contexts have the same hdrqentsize */
  8582. for (i = 0; i < ppd->vls_supported; i++) {
  8583. thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
  8584. sc_mtu_to_threshold(dd->vld[i].sc,
  8585. dd->vld[i].mtu,
  8586. dd->rcd[0]->rcvhdrqentsize));
  8587. for (j = 0; j < INIT_SC_PER_VL; j++)
  8588. sc_set_cr_threshold(
  8589. pio_select_send_context_vl(dd, j, i),
  8590. thres);
  8591. }
  8592. thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
  8593. sc_mtu_to_threshold(dd->vld[15].sc,
  8594. dd->vld[15].mtu,
  8595. dd->rcd[0]->rcvhdrqentsize));
  8596. sc_set_cr_threshold(dd->vld[15].sc, thres);
  8597. /* Adjust maximum MTU for the port in DC */
  8598. dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
  8599. (ilog2(maxvlmtu >> 8) + 1);
  8600. len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
  8601. len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
  8602. len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
  8603. DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
  8604. write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
  8605. }
  8606. static void set_lidlmc(struct hfi1_pportdata *ppd)
  8607. {
  8608. int i;
  8609. u64 sreg = 0;
  8610. struct hfi1_devdata *dd = ppd->dd;
  8611. u32 mask = ~((1U << ppd->lmc) - 1);
  8612. u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
  8613. if (dd->hfi1_snoop.mode_flag)
  8614. dd_dev_info(dd, "Set lid/lmc while snooping");
  8615. c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
  8616. | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
  8617. c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
  8618. << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
  8619. ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
  8620. << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
  8621. write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
  8622. /*
  8623. * Iterate over all the send contexts and set their SLID check
  8624. */
  8625. sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
  8626. SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
  8627. (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
  8628. SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
  8629. for (i = 0; i < dd->chip_send_contexts; i++) {
  8630. hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
  8631. i, (u32)sreg);
  8632. write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
  8633. }
  8634. /* Now we have to do the same thing for the sdma engines */
  8635. sdma_update_lmc(dd, mask, ppd->lid);
  8636. }
  8637. static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
  8638. {
  8639. unsigned long timeout;
  8640. u32 curr_state;
  8641. timeout = jiffies + msecs_to_jiffies(msecs);
  8642. while (1) {
  8643. curr_state = read_physical_state(dd);
  8644. if (curr_state == state)
  8645. break;
  8646. if (time_after(jiffies, timeout)) {
  8647. dd_dev_err(dd,
  8648. "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
  8649. state, curr_state);
  8650. return -ETIMEDOUT;
  8651. }
  8652. usleep_range(1950, 2050); /* sleep 2ms-ish */
  8653. }
  8654. return 0;
  8655. }
  8656. static const char *state_completed_string(u32 completed)
  8657. {
  8658. static const char * const state_completed[] = {
  8659. "EstablishComm",
  8660. "OptimizeEQ",
  8661. "VerifyCap"
  8662. };
  8663. if (completed < ARRAY_SIZE(state_completed))
  8664. return state_completed[completed];
  8665. return "unknown";
  8666. }
  8667. static const char all_lanes_dead_timeout_expired[] =
  8668. "All lanes were inactive – was the interconnect media removed?";
  8669. static const char tx_out_of_policy[] =
  8670. "Passing lanes on local port do not meet the local link width policy";
  8671. static const char no_state_complete[] =
  8672. "State timeout occurred before link partner completed the state";
  8673. static const char * const state_complete_reasons[] = {
  8674. [0x00] = "Reason unknown",
  8675. [0x01] = "Link was halted by driver, refer to LinkDownReason",
  8676. [0x02] = "Link partner reported failure",
  8677. [0x10] = "Unable to achieve frame sync on any lane",
  8678. [0x11] =
  8679. "Unable to find a common bit rate with the link partner",
  8680. [0x12] =
  8681. "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
  8682. [0x13] =
  8683. "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
  8684. [0x14] = no_state_complete,
  8685. [0x15] =
  8686. "State timeout occurred before link partner identified equalization presets",
  8687. [0x16] =
  8688. "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
  8689. [0x17] = tx_out_of_policy,
  8690. [0x20] = all_lanes_dead_timeout_expired,
  8691. [0x21] =
  8692. "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
  8693. [0x22] = no_state_complete,
  8694. [0x23] =
  8695. "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
  8696. [0x24] = tx_out_of_policy,
  8697. [0x30] = all_lanes_dead_timeout_expired,
  8698. [0x31] =
  8699. "State timeout occurred waiting for host to process received frames",
  8700. [0x32] = no_state_complete,
  8701. [0x33] =
  8702. "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
  8703. [0x34] = tx_out_of_policy,
  8704. };
  8705. static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
  8706. u32 code)
  8707. {
  8708. const char *str = NULL;
  8709. if (code < ARRAY_SIZE(state_complete_reasons))
  8710. str = state_complete_reasons[code];
  8711. if (str)
  8712. return str;
  8713. return "Reserved";
  8714. }
  8715. /* describe the given last state complete frame */
  8716. static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
  8717. const char *prefix)
  8718. {
  8719. struct hfi1_devdata *dd = ppd->dd;
  8720. u32 success;
  8721. u32 state;
  8722. u32 reason;
  8723. u32 lanes;
  8724. /*
  8725. * Decode frame:
  8726. * [ 0: 0] - success
  8727. * [ 3: 1] - state
  8728. * [ 7: 4] - next state timeout
  8729. * [15: 8] - reason code
  8730. * [31:16] - lanes
  8731. */
  8732. success = frame & 0x1;
  8733. state = (frame >> 1) & 0x7;
  8734. reason = (frame >> 8) & 0xff;
  8735. lanes = (frame >> 16) & 0xffff;
  8736. dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
  8737. prefix, frame);
  8738. dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
  8739. state_completed_string(state), state);
  8740. dd_dev_err(dd, " state successfully completed: %s\n",
  8741. success ? "yes" : "no");
  8742. dd_dev_err(dd, " fail reason 0x%x: %s\n",
  8743. reason, state_complete_reason_code_string(ppd, reason));
  8744. dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
  8745. }
  8746. /*
  8747. * Read the last state complete frames and explain them. This routine
  8748. * expects to be called if the link went down during link negotiation
  8749. * and initialization (LNI). That is, anywhere between polling and link up.
  8750. */
  8751. static void check_lni_states(struct hfi1_pportdata *ppd)
  8752. {
  8753. u32 last_local_state;
  8754. u32 last_remote_state;
  8755. read_last_local_state(ppd->dd, &last_local_state);
  8756. read_last_remote_state(ppd->dd, &last_remote_state);
  8757. /*
  8758. * Don't report anything if there is nothing to report. A value of
  8759. * 0 means the link was taken down while polling and there was no
  8760. * training in-process.
  8761. */
  8762. if (last_local_state == 0 && last_remote_state == 0)
  8763. return;
  8764. decode_state_complete(ppd, last_local_state, "transmitted");
  8765. decode_state_complete(ppd, last_remote_state, "received");
  8766. }
  8767. /*
  8768. * Helper for set_link_state(). Do not call except from that routine.
  8769. * Expects ppd->hls_mutex to be held.
  8770. *
  8771. * @rem_reason value to be sent to the neighbor
  8772. *
  8773. * LinkDownReasons only set if transition succeeds.
  8774. */
  8775. static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
  8776. {
  8777. struct hfi1_devdata *dd = ppd->dd;
  8778. u32 pstate, previous_state;
  8779. int ret;
  8780. int do_transition;
  8781. int do_wait;
  8782. previous_state = ppd->host_link_state;
  8783. ppd->host_link_state = HLS_GOING_OFFLINE;
  8784. pstate = read_physical_state(dd);
  8785. if (pstate == PLS_OFFLINE) {
  8786. do_transition = 0; /* in right state */
  8787. do_wait = 0; /* ...no need to wait */
  8788. } else if ((pstate & 0xff) == PLS_OFFLINE) {
  8789. do_transition = 0; /* in an offline transient state */
  8790. do_wait = 1; /* ...wait for it to settle */
  8791. } else {
  8792. do_transition = 1; /* need to move to offline */
  8793. do_wait = 1; /* ...will need to wait */
  8794. }
  8795. if (do_transition) {
  8796. ret = set_physical_link_state(dd,
  8797. (rem_reason << 8) | PLS_OFFLINE);
  8798. if (ret != HCMD_SUCCESS) {
  8799. dd_dev_err(dd,
  8800. "Failed to transition to Offline link state, return %d\n",
  8801. ret);
  8802. return -EINVAL;
  8803. }
  8804. if (ppd->offline_disabled_reason ==
  8805. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
  8806. ppd->offline_disabled_reason =
  8807. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
  8808. }
  8809. if (do_wait) {
  8810. /* it can take a while for the link to go down */
  8811. ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
  8812. if (ret < 0)
  8813. return ret;
  8814. }
  8815. /* make sure the logical state is also down */
  8816. wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
  8817. /*
  8818. * Now in charge of LCB - must be after the physical state is
  8819. * offline.quiet and before host_link_state is changed.
  8820. */
  8821. set_host_lcb_access(dd);
  8822. write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
  8823. ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
  8824. if (ppd->port_type == PORT_TYPE_QSFP &&
  8825. ppd->qsfp_info.limiting_active &&
  8826. qsfp_mod_present(ppd)) {
  8827. int ret;
  8828. ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
  8829. if (ret == 0) {
  8830. set_qsfp_tx(ppd, 0);
  8831. release_chip_resource(dd, qsfp_resource(dd));
  8832. } else {
  8833. /* not fatal, but should warn */
  8834. dd_dev_err(dd,
  8835. "Unable to acquire lock to turn off QSFP TX\n");
  8836. }
  8837. }
  8838. /*
  8839. * The LNI has a mandatory wait time after the physical state
  8840. * moves to Offline.Quiet. The wait time may be different
  8841. * depending on how the link went down. The 8051 firmware
  8842. * will observe the needed wait time and only move to ready
  8843. * when that is completed. The largest of the quiet timeouts
  8844. * is 6s, so wait that long and then at least 0.5s more for
  8845. * other transitions, and another 0.5s for a buffer.
  8846. */
  8847. ret = wait_fm_ready(dd, 7000);
  8848. if (ret) {
  8849. dd_dev_err(dd,
  8850. "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
  8851. /* state is really offline, so make it so */
  8852. ppd->host_link_state = HLS_DN_OFFLINE;
  8853. return ret;
  8854. }
  8855. /*
  8856. * The state is now offline and the 8051 is ready to accept host
  8857. * requests.
  8858. * - change our state
  8859. * - notify others if we were previously in a linkup state
  8860. */
  8861. ppd->host_link_state = HLS_DN_OFFLINE;
  8862. if (previous_state & HLS_UP) {
  8863. /* went down while link was up */
  8864. handle_linkup_change(dd, 0);
  8865. } else if (previous_state
  8866. & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
  8867. /* went down while attempting link up */
  8868. check_lni_states(ppd);
  8869. }
  8870. /* the active link width (downgrade) is 0 on link down */
  8871. ppd->link_width_active = 0;
  8872. ppd->link_width_downgrade_tx_active = 0;
  8873. ppd->link_width_downgrade_rx_active = 0;
  8874. ppd->current_egress_rate = 0;
  8875. return 0;
  8876. }
  8877. /* return the link state name */
  8878. static const char *link_state_name(u32 state)
  8879. {
  8880. const char *name;
  8881. int n = ilog2(state);
  8882. static const char * const names[] = {
  8883. [__HLS_UP_INIT_BP] = "INIT",
  8884. [__HLS_UP_ARMED_BP] = "ARMED",
  8885. [__HLS_UP_ACTIVE_BP] = "ACTIVE",
  8886. [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
  8887. [__HLS_DN_POLL_BP] = "POLL",
  8888. [__HLS_DN_DISABLE_BP] = "DISABLE",
  8889. [__HLS_DN_OFFLINE_BP] = "OFFLINE",
  8890. [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
  8891. [__HLS_GOING_UP_BP] = "GOING_UP",
  8892. [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
  8893. [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
  8894. };
  8895. name = n < ARRAY_SIZE(names) ? names[n] : NULL;
  8896. return name ? name : "unknown";
  8897. }
  8898. /* return the link state reason name */
  8899. static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
  8900. {
  8901. if (state == HLS_UP_INIT) {
  8902. switch (ppd->linkinit_reason) {
  8903. case OPA_LINKINIT_REASON_LINKUP:
  8904. return "(LINKUP)";
  8905. case OPA_LINKINIT_REASON_FLAPPING:
  8906. return "(FLAPPING)";
  8907. case OPA_LINKINIT_OUTSIDE_POLICY:
  8908. return "(OUTSIDE_POLICY)";
  8909. case OPA_LINKINIT_QUARANTINED:
  8910. return "(QUARANTINED)";
  8911. case OPA_LINKINIT_INSUFIC_CAPABILITY:
  8912. return "(INSUFIC_CAPABILITY)";
  8913. default:
  8914. break;
  8915. }
  8916. }
  8917. return "";
  8918. }
  8919. /*
  8920. * driver_physical_state - convert the driver's notion of a port's
  8921. * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
  8922. * Return -1 (converted to a u32) to indicate error.
  8923. */
  8924. u32 driver_physical_state(struct hfi1_pportdata *ppd)
  8925. {
  8926. switch (ppd->host_link_state) {
  8927. case HLS_UP_INIT:
  8928. case HLS_UP_ARMED:
  8929. case HLS_UP_ACTIVE:
  8930. return IB_PORTPHYSSTATE_LINKUP;
  8931. case HLS_DN_POLL:
  8932. return IB_PORTPHYSSTATE_POLLING;
  8933. case HLS_DN_DISABLE:
  8934. return IB_PORTPHYSSTATE_DISABLED;
  8935. case HLS_DN_OFFLINE:
  8936. return OPA_PORTPHYSSTATE_OFFLINE;
  8937. case HLS_VERIFY_CAP:
  8938. return IB_PORTPHYSSTATE_POLLING;
  8939. case HLS_GOING_UP:
  8940. return IB_PORTPHYSSTATE_POLLING;
  8941. case HLS_GOING_OFFLINE:
  8942. return OPA_PORTPHYSSTATE_OFFLINE;
  8943. case HLS_LINK_COOLDOWN:
  8944. return OPA_PORTPHYSSTATE_OFFLINE;
  8945. case HLS_DN_DOWNDEF:
  8946. default:
  8947. dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
  8948. ppd->host_link_state);
  8949. return -1;
  8950. }
  8951. }
  8952. /*
  8953. * driver_logical_state - convert the driver's notion of a port's
  8954. * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
  8955. * (converted to a u32) to indicate error.
  8956. */
  8957. u32 driver_logical_state(struct hfi1_pportdata *ppd)
  8958. {
  8959. if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
  8960. return IB_PORT_DOWN;
  8961. switch (ppd->host_link_state & HLS_UP) {
  8962. case HLS_UP_INIT:
  8963. return IB_PORT_INIT;
  8964. case HLS_UP_ARMED:
  8965. return IB_PORT_ARMED;
  8966. case HLS_UP_ACTIVE:
  8967. return IB_PORT_ACTIVE;
  8968. default:
  8969. dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
  8970. ppd->host_link_state);
  8971. return -1;
  8972. }
  8973. }
  8974. void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
  8975. u8 neigh_reason, u8 rem_reason)
  8976. {
  8977. if (ppd->local_link_down_reason.latest == 0 &&
  8978. ppd->neigh_link_down_reason.latest == 0) {
  8979. ppd->local_link_down_reason.latest = lcl_reason;
  8980. ppd->neigh_link_down_reason.latest = neigh_reason;
  8981. ppd->remote_link_down_reason = rem_reason;
  8982. }
  8983. }
  8984. /*
  8985. * Change the physical and/or logical link state.
  8986. *
  8987. * Do not call this routine while inside an interrupt. It contains
  8988. * calls to routines that can take multiple seconds to finish.
  8989. *
  8990. * Returns 0 on success, -errno on failure.
  8991. */
  8992. int set_link_state(struct hfi1_pportdata *ppd, u32 state)
  8993. {
  8994. struct hfi1_devdata *dd = ppd->dd;
  8995. struct ib_event event = {.device = NULL};
  8996. int ret1, ret = 0;
  8997. int orig_new_state, poll_bounce;
  8998. mutex_lock(&ppd->hls_lock);
  8999. orig_new_state = state;
  9000. if (state == HLS_DN_DOWNDEF)
  9001. state = dd->link_default;
  9002. /* interpret poll -> poll as a link bounce */
  9003. poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
  9004. state == HLS_DN_POLL;
  9005. dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
  9006. link_state_name(ppd->host_link_state),
  9007. link_state_name(orig_new_state),
  9008. poll_bounce ? "(bounce) " : "",
  9009. link_state_reason_name(ppd, state));
  9010. /*
  9011. * If we're going to a (HLS_*) link state that implies the logical
  9012. * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
  9013. * reset is_sm_config_started to 0.
  9014. */
  9015. if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
  9016. ppd->is_sm_config_started = 0;
  9017. /*
  9018. * Do nothing if the states match. Let a poll to poll link bounce
  9019. * go through.
  9020. */
  9021. if (ppd->host_link_state == state && !poll_bounce)
  9022. goto done;
  9023. switch (state) {
  9024. case HLS_UP_INIT:
  9025. if (ppd->host_link_state == HLS_DN_POLL &&
  9026. (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
  9027. /*
  9028. * Quick link up jumps from polling to here.
  9029. *
  9030. * Whether in normal or loopback mode, the
  9031. * simulator jumps from polling to link up.
  9032. * Accept that here.
  9033. */
  9034. /* OK */
  9035. } else if (ppd->host_link_state != HLS_GOING_UP) {
  9036. goto unexpected;
  9037. }
  9038. ppd->host_link_state = HLS_UP_INIT;
  9039. ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
  9040. if (ret) {
  9041. /* logical state didn't change, stay at going_up */
  9042. ppd->host_link_state = HLS_GOING_UP;
  9043. dd_dev_err(dd,
  9044. "%s: logical state did not change to INIT\n",
  9045. __func__);
  9046. } else {
  9047. /* clear old transient LINKINIT_REASON code */
  9048. if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
  9049. ppd->linkinit_reason =
  9050. OPA_LINKINIT_REASON_LINKUP;
  9051. /* enable the port */
  9052. add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
  9053. handle_linkup_change(dd, 1);
  9054. }
  9055. break;
  9056. case HLS_UP_ARMED:
  9057. if (ppd->host_link_state != HLS_UP_INIT)
  9058. goto unexpected;
  9059. ppd->host_link_state = HLS_UP_ARMED;
  9060. set_logical_state(dd, LSTATE_ARMED);
  9061. ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
  9062. if (ret) {
  9063. /* logical state didn't change, stay at init */
  9064. ppd->host_link_state = HLS_UP_INIT;
  9065. dd_dev_err(dd,
  9066. "%s: logical state did not change to ARMED\n",
  9067. __func__);
  9068. }
  9069. /*
  9070. * The simulator does not currently implement SMA messages,
  9071. * so neighbor_normal is not set. Set it here when we first
  9072. * move to Armed.
  9073. */
  9074. if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
  9075. ppd->neighbor_normal = 1;
  9076. break;
  9077. case HLS_UP_ACTIVE:
  9078. if (ppd->host_link_state != HLS_UP_ARMED)
  9079. goto unexpected;
  9080. ppd->host_link_state = HLS_UP_ACTIVE;
  9081. set_logical_state(dd, LSTATE_ACTIVE);
  9082. ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
  9083. if (ret) {
  9084. /* logical state didn't change, stay at armed */
  9085. ppd->host_link_state = HLS_UP_ARMED;
  9086. dd_dev_err(dd,
  9087. "%s: logical state did not change to ACTIVE\n",
  9088. __func__);
  9089. } else {
  9090. /* tell all engines to go running */
  9091. sdma_all_running(dd);
  9092. /* Signal the IB layer that the port has went active */
  9093. event.device = &dd->verbs_dev.rdi.ibdev;
  9094. event.element.port_num = ppd->port;
  9095. event.event = IB_EVENT_PORT_ACTIVE;
  9096. }
  9097. break;
  9098. case HLS_DN_POLL:
  9099. if ((ppd->host_link_state == HLS_DN_DISABLE ||
  9100. ppd->host_link_state == HLS_DN_OFFLINE) &&
  9101. dd->dc_shutdown)
  9102. dc_start(dd);
  9103. /* Hand LED control to the DC */
  9104. write_csr(dd, DCC_CFG_LED_CNTRL, 0);
  9105. if (ppd->host_link_state != HLS_DN_OFFLINE) {
  9106. u8 tmp = ppd->link_enabled;
  9107. ret = goto_offline(ppd, ppd->remote_link_down_reason);
  9108. if (ret) {
  9109. ppd->link_enabled = tmp;
  9110. break;
  9111. }
  9112. ppd->remote_link_down_reason = 0;
  9113. if (ppd->driver_link_ready)
  9114. ppd->link_enabled = 1;
  9115. }
  9116. set_all_slowpath(ppd->dd);
  9117. ret = set_local_link_attributes(ppd);
  9118. if (ret)
  9119. break;
  9120. ppd->port_error_action = 0;
  9121. ppd->host_link_state = HLS_DN_POLL;
  9122. if (quick_linkup) {
  9123. /* quick linkup does not go into polling */
  9124. ret = do_quick_linkup(dd);
  9125. } else {
  9126. ret1 = set_physical_link_state(dd, PLS_POLLING);
  9127. if (ret1 != HCMD_SUCCESS) {
  9128. dd_dev_err(dd,
  9129. "Failed to transition to Polling link state, return 0x%x\n",
  9130. ret1);
  9131. ret = -EINVAL;
  9132. }
  9133. }
  9134. ppd->offline_disabled_reason =
  9135. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
  9136. /*
  9137. * If an error occurred above, go back to offline. The
  9138. * caller may reschedule another attempt.
  9139. */
  9140. if (ret)
  9141. goto_offline(ppd, 0);
  9142. break;
  9143. case HLS_DN_DISABLE:
  9144. /* link is disabled */
  9145. ppd->link_enabled = 0;
  9146. /* allow any state to transition to disabled */
  9147. /* must transition to offline first */
  9148. if (ppd->host_link_state != HLS_DN_OFFLINE) {
  9149. ret = goto_offline(ppd, ppd->remote_link_down_reason);
  9150. if (ret)
  9151. break;
  9152. ppd->remote_link_down_reason = 0;
  9153. }
  9154. ret1 = set_physical_link_state(dd, PLS_DISABLED);
  9155. if (ret1 != HCMD_SUCCESS) {
  9156. dd_dev_err(dd,
  9157. "Failed to transition to Disabled link state, return 0x%x\n",
  9158. ret1);
  9159. ret = -EINVAL;
  9160. break;
  9161. }
  9162. ppd->host_link_state = HLS_DN_DISABLE;
  9163. dc_shutdown(dd);
  9164. break;
  9165. case HLS_DN_OFFLINE:
  9166. if (ppd->host_link_state == HLS_DN_DISABLE)
  9167. dc_start(dd);
  9168. /* allow any state to transition to offline */
  9169. ret = goto_offline(ppd, ppd->remote_link_down_reason);
  9170. if (!ret)
  9171. ppd->remote_link_down_reason = 0;
  9172. break;
  9173. case HLS_VERIFY_CAP:
  9174. if (ppd->host_link_state != HLS_DN_POLL)
  9175. goto unexpected;
  9176. ppd->host_link_state = HLS_VERIFY_CAP;
  9177. break;
  9178. case HLS_GOING_UP:
  9179. if (ppd->host_link_state != HLS_VERIFY_CAP)
  9180. goto unexpected;
  9181. ret1 = set_physical_link_state(dd, PLS_LINKUP);
  9182. if (ret1 != HCMD_SUCCESS) {
  9183. dd_dev_err(dd,
  9184. "Failed to transition to link up state, return 0x%x\n",
  9185. ret1);
  9186. ret = -EINVAL;
  9187. break;
  9188. }
  9189. ppd->host_link_state = HLS_GOING_UP;
  9190. break;
  9191. case HLS_GOING_OFFLINE: /* transient within goto_offline() */
  9192. case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
  9193. default:
  9194. dd_dev_info(dd, "%s: state 0x%x: not supported\n",
  9195. __func__, state);
  9196. ret = -EINVAL;
  9197. break;
  9198. }
  9199. goto done;
  9200. unexpected:
  9201. dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
  9202. __func__, link_state_name(ppd->host_link_state),
  9203. link_state_name(state));
  9204. ret = -EINVAL;
  9205. done:
  9206. mutex_unlock(&ppd->hls_lock);
  9207. if (event.device)
  9208. ib_dispatch_event(&event);
  9209. return ret;
  9210. }
  9211. int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
  9212. {
  9213. u64 reg;
  9214. int ret = 0;
  9215. switch (which) {
  9216. case HFI1_IB_CFG_LIDLMC:
  9217. set_lidlmc(ppd);
  9218. break;
  9219. case HFI1_IB_CFG_VL_HIGH_LIMIT:
  9220. /*
  9221. * The VL Arbitrator high limit is sent in units of 4k
  9222. * bytes, while HFI stores it in units of 64 bytes.
  9223. */
  9224. val *= 4096 / 64;
  9225. reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
  9226. << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
  9227. write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
  9228. break;
  9229. case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
  9230. /* HFI only supports POLL as the default link down state */
  9231. if (val != HLS_DN_POLL)
  9232. ret = -EINVAL;
  9233. break;
  9234. case HFI1_IB_CFG_OP_VLS:
  9235. if (ppd->vls_operational != val) {
  9236. ppd->vls_operational = val;
  9237. if (!ppd->port)
  9238. ret = -EINVAL;
  9239. }
  9240. break;
  9241. /*
  9242. * For link width, link width downgrade, and speed enable, always AND
  9243. * the setting with what is actually supported. This has two benefits.
  9244. * First, enabled can't have unsupported values, no matter what the
  9245. * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
  9246. * "fill in with your supported value" have all the bits in the
  9247. * field set, so simply ANDing with supported has the desired result.
  9248. */
  9249. case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
  9250. ppd->link_width_enabled = val & ppd->link_width_supported;
  9251. break;
  9252. case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
  9253. ppd->link_width_downgrade_enabled =
  9254. val & ppd->link_width_downgrade_supported;
  9255. break;
  9256. case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
  9257. ppd->link_speed_enabled = val & ppd->link_speed_supported;
  9258. break;
  9259. case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
  9260. /*
  9261. * HFI does not follow IB specs, save this value
  9262. * so we can report it, if asked.
  9263. */
  9264. ppd->overrun_threshold = val;
  9265. break;
  9266. case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
  9267. /*
  9268. * HFI does not follow IB specs, save this value
  9269. * so we can report it, if asked.
  9270. */
  9271. ppd->phy_error_threshold = val;
  9272. break;
  9273. case HFI1_IB_CFG_MTU:
  9274. set_send_length(ppd);
  9275. break;
  9276. case HFI1_IB_CFG_PKEYS:
  9277. if (HFI1_CAP_IS_KSET(PKEY_CHECK))
  9278. set_partition_keys(ppd);
  9279. break;
  9280. default:
  9281. if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
  9282. dd_dev_info(ppd->dd,
  9283. "%s: which %s, val 0x%x: not implemented\n",
  9284. __func__, ib_cfg_name(which), val);
  9285. break;
  9286. }
  9287. return ret;
  9288. }
  9289. /* begin functions related to vl arbitration table caching */
  9290. static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
  9291. {
  9292. int i;
  9293. BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
  9294. VL_ARB_LOW_PRIO_TABLE_SIZE);
  9295. BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
  9296. VL_ARB_HIGH_PRIO_TABLE_SIZE);
  9297. /*
  9298. * Note that we always return values directly from the
  9299. * 'vl_arb_cache' (and do no CSR reads) in response to a
  9300. * 'Get(VLArbTable)'. This is obviously correct after a
  9301. * 'Set(VLArbTable)', since the cache will then be up to
  9302. * date. But it's also correct prior to any 'Set(VLArbTable)'
  9303. * since then both the cache, and the relevant h/w registers
  9304. * will be zeroed.
  9305. */
  9306. for (i = 0; i < MAX_PRIO_TABLE; i++)
  9307. spin_lock_init(&ppd->vl_arb_cache[i].lock);
  9308. }
  9309. /*
  9310. * vl_arb_lock_cache
  9311. *
  9312. * All other vl_arb_* functions should be called only after locking
  9313. * the cache.
  9314. */
  9315. static inline struct vl_arb_cache *
  9316. vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
  9317. {
  9318. if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
  9319. return NULL;
  9320. spin_lock(&ppd->vl_arb_cache[idx].lock);
  9321. return &ppd->vl_arb_cache[idx];
  9322. }
  9323. static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
  9324. {
  9325. spin_unlock(&ppd->vl_arb_cache[idx].lock);
  9326. }
  9327. static void vl_arb_get_cache(struct vl_arb_cache *cache,
  9328. struct ib_vl_weight_elem *vl)
  9329. {
  9330. memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
  9331. }
  9332. static void vl_arb_set_cache(struct vl_arb_cache *cache,
  9333. struct ib_vl_weight_elem *vl)
  9334. {
  9335. memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
  9336. }
  9337. static int vl_arb_match_cache(struct vl_arb_cache *cache,
  9338. struct ib_vl_weight_elem *vl)
  9339. {
  9340. return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
  9341. }
  9342. /* end functions related to vl arbitration table caching */
  9343. static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
  9344. u32 size, struct ib_vl_weight_elem *vl)
  9345. {
  9346. struct hfi1_devdata *dd = ppd->dd;
  9347. u64 reg;
  9348. unsigned int i, is_up = 0;
  9349. int drain, ret = 0;
  9350. mutex_lock(&ppd->hls_lock);
  9351. if (ppd->host_link_state & HLS_UP)
  9352. is_up = 1;
  9353. drain = !is_ax(dd) && is_up;
  9354. if (drain)
  9355. /*
  9356. * Before adjusting VL arbitration weights, empty per-VL
  9357. * FIFOs, otherwise a packet whose VL weight is being
  9358. * set to 0 could get stuck in a FIFO with no chance to
  9359. * egress.
  9360. */
  9361. ret = stop_drain_data_vls(dd);
  9362. if (ret) {
  9363. dd_dev_err(
  9364. dd,
  9365. "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
  9366. __func__);
  9367. goto err;
  9368. }
  9369. for (i = 0; i < size; i++, vl++) {
  9370. /*
  9371. * NOTE: The low priority shift and mask are used here, but
  9372. * they are the same for both the low and high registers.
  9373. */
  9374. reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
  9375. << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
  9376. | (((u64)vl->weight
  9377. & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
  9378. << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
  9379. write_csr(dd, target + (i * 8), reg);
  9380. }
  9381. pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
  9382. if (drain)
  9383. open_fill_data_vls(dd); /* reopen all VLs */
  9384. err:
  9385. mutex_unlock(&ppd->hls_lock);
  9386. return ret;
  9387. }
  9388. /*
  9389. * Read one credit merge VL register.
  9390. */
  9391. static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
  9392. struct vl_limit *vll)
  9393. {
  9394. u64 reg = read_csr(dd, csr);
  9395. vll->dedicated = cpu_to_be16(
  9396. (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
  9397. & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
  9398. vll->shared = cpu_to_be16(
  9399. (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
  9400. & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
  9401. }
  9402. /*
  9403. * Read the current credit merge limits.
  9404. */
  9405. static int get_buffer_control(struct hfi1_devdata *dd,
  9406. struct buffer_control *bc, u16 *overall_limit)
  9407. {
  9408. u64 reg;
  9409. int i;
  9410. /* not all entries are filled in */
  9411. memset(bc, 0, sizeof(*bc));
  9412. /* OPA and HFI have a 1-1 mapping */
  9413. for (i = 0; i < TXE_NUM_DATA_VL; i++)
  9414. read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
  9415. /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
  9416. read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
  9417. reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
  9418. bc->overall_shared_limit = cpu_to_be16(
  9419. (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
  9420. & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
  9421. if (overall_limit)
  9422. *overall_limit = (reg
  9423. >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
  9424. & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
  9425. return sizeof(struct buffer_control);
  9426. }
  9427. static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
  9428. {
  9429. u64 reg;
  9430. int i;
  9431. /* each register contains 16 SC->VLnt mappings, 4 bits each */
  9432. reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
  9433. for (i = 0; i < sizeof(u64); i++) {
  9434. u8 byte = *(((u8 *)&reg) + i);
  9435. dp->vlnt[2 * i] = byte & 0xf;
  9436. dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
  9437. }
  9438. reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
  9439. for (i = 0; i < sizeof(u64); i++) {
  9440. u8 byte = *(((u8 *)&reg) + i);
  9441. dp->vlnt[16 + (2 * i)] = byte & 0xf;
  9442. dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
  9443. }
  9444. return sizeof(struct sc2vlnt);
  9445. }
  9446. static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
  9447. struct ib_vl_weight_elem *vl)
  9448. {
  9449. unsigned int i;
  9450. for (i = 0; i < nelems; i++, vl++) {
  9451. vl->vl = 0xf;
  9452. vl->weight = 0;
  9453. }
  9454. }
  9455. static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
  9456. {
  9457. write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
  9458. DC_SC_VL_VAL(15_0,
  9459. 0, dp->vlnt[0] & 0xf,
  9460. 1, dp->vlnt[1] & 0xf,
  9461. 2, dp->vlnt[2] & 0xf,
  9462. 3, dp->vlnt[3] & 0xf,
  9463. 4, dp->vlnt[4] & 0xf,
  9464. 5, dp->vlnt[5] & 0xf,
  9465. 6, dp->vlnt[6] & 0xf,
  9466. 7, dp->vlnt[7] & 0xf,
  9467. 8, dp->vlnt[8] & 0xf,
  9468. 9, dp->vlnt[9] & 0xf,
  9469. 10, dp->vlnt[10] & 0xf,
  9470. 11, dp->vlnt[11] & 0xf,
  9471. 12, dp->vlnt[12] & 0xf,
  9472. 13, dp->vlnt[13] & 0xf,
  9473. 14, dp->vlnt[14] & 0xf,
  9474. 15, dp->vlnt[15] & 0xf));
  9475. write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
  9476. DC_SC_VL_VAL(31_16,
  9477. 16, dp->vlnt[16] & 0xf,
  9478. 17, dp->vlnt[17] & 0xf,
  9479. 18, dp->vlnt[18] & 0xf,
  9480. 19, dp->vlnt[19] & 0xf,
  9481. 20, dp->vlnt[20] & 0xf,
  9482. 21, dp->vlnt[21] & 0xf,
  9483. 22, dp->vlnt[22] & 0xf,
  9484. 23, dp->vlnt[23] & 0xf,
  9485. 24, dp->vlnt[24] & 0xf,
  9486. 25, dp->vlnt[25] & 0xf,
  9487. 26, dp->vlnt[26] & 0xf,
  9488. 27, dp->vlnt[27] & 0xf,
  9489. 28, dp->vlnt[28] & 0xf,
  9490. 29, dp->vlnt[29] & 0xf,
  9491. 30, dp->vlnt[30] & 0xf,
  9492. 31, dp->vlnt[31] & 0xf));
  9493. }
  9494. static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
  9495. u16 limit)
  9496. {
  9497. if (limit != 0)
  9498. dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
  9499. what, (int)limit, idx);
  9500. }
  9501. /* change only the shared limit portion of SendCmGLobalCredit */
  9502. static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
  9503. {
  9504. u64 reg;
  9505. reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
  9506. reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
  9507. reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
  9508. write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
  9509. }
  9510. /* change only the total credit limit portion of SendCmGLobalCredit */
  9511. static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
  9512. {
  9513. u64 reg;
  9514. reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
  9515. reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
  9516. reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
  9517. write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
  9518. }
  9519. /* set the given per-VL shared limit */
  9520. static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
  9521. {
  9522. u64 reg;
  9523. u32 addr;
  9524. if (vl < TXE_NUM_DATA_VL)
  9525. addr = SEND_CM_CREDIT_VL + (8 * vl);
  9526. else
  9527. addr = SEND_CM_CREDIT_VL15;
  9528. reg = read_csr(dd, addr);
  9529. reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
  9530. reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
  9531. write_csr(dd, addr, reg);
  9532. }
  9533. /* set the given per-VL dedicated limit */
  9534. static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
  9535. {
  9536. u64 reg;
  9537. u32 addr;
  9538. if (vl < TXE_NUM_DATA_VL)
  9539. addr = SEND_CM_CREDIT_VL + (8 * vl);
  9540. else
  9541. addr = SEND_CM_CREDIT_VL15;
  9542. reg = read_csr(dd, addr);
  9543. reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
  9544. reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
  9545. write_csr(dd, addr, reg);
  9546. }
  9547. /* spin until the given per-VL status mask bits clear */
  9548. static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
  9549. const char *which)
  9550. {
  9551. unsigned long timeout;
  9552. u64 reg;
  9553. timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
  9554. while (1) {
  9555. reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
  9556. if (reg == 0)
  9557. return; /* success */
  9558. if (time_after(jiffies, timeout))
  9559. break; /* timed out */
  9560. udelay(1);
  9561. }
  9562. dd_dev_err(dd,
  9563. "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
  9564. which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
  9565. /*
  9566. * If this occurs, it is likely there was a credit loss on the link.
  9567. * The only recovery from that is a link bounce.
  9568. */
  9569. dd_dev_err(dd,
  9570. "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
  9571. }
  9572. /*
  9573. * The number of credits on the VLs may be changed while everything
  9574. * is "live", but the following algorithm must be followed due to
  9575. * how the hardware is actually implemented. In particular,
  9576. * Return_Credit_Status[] is the only correct status check.
  9577. *
  9578. * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
  9579. * set Global_Shared_Credit_Limit = 0
  9580. * use_all_vl = 1
  9581. * mask0 = all VLs that are changing either dedicated or shared limits
  9582. * set Shared_Limit[mask0] = 0
  9583. * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
  9584. * if (changing any dedicated limit)
  9585. * mask1 = all VLs that are lowering dedicated limits
  9586. * lower Dedicated_Limit[mask1]
  9587. * spin until Return_Credit_Status[mask1] == 0
  9588. * raise Dedicated_Limits
  9589. * raise Shared_Limits
  9590. * raise Global_Shared_Credit_Limit
  9591. *
  9592. * lower = if the new limit is lower, set the limit to the new value
  9593. * raise = if the new limit is higher than the current value (may be changed
  9594. * earlier in the algorithm), set the new limit to the new value
  9595. */
  9596. int set_buffer_control(struct hfi1_pportdata *ppd,
  9597. struct buffer_control *new_bc)
  9598. {
  9599. struct hfi1_devdata *dd = ppd->dd;
  9600. u64 changing_mask, ld_mask, stat_mask;
  9601. int change_count;
  9602. int i, use_all_mask;
  9603. int this_shared_changing;
  9604. int vl_count = 0, ret;
  9605. /*
  9606. * A0: add the variable any_shared_limit_changing below and in the
  9607. * algorithm above. If removing A0 support, it can be removed.
  9608. */
  9609. int any_shared_limit_changing;
  9610. struct buffer_control cur_bc;
  9611. u8 changing[OPA_MAX_VLS];
  9612. u8 lowering_dedicated[OPA_MAX_VLS];
  9613. u16 cur_total;
  9614. u32 new_total = 0;
  9615. const u64 all_mask =
  9616. SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
  9617. | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
  9618. | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
  9619. | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
  9620. | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
  9621. | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
  9622. | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
  9623. | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
  9624. | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
  9625. #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
  9626. #define NUM_USABLE_VLS 16 /* look at VL15 and less */
  9627. /* find the new total credits, do sanity check on unused VLs */
  9628. for (i = 0; i < OPA_MAX_VLS; i++) {
  9629. if (valid_vl(i)) {
  9630. new_total += be16_to_cpu(new_bc->vl[i].dedicated);
  9631. continue;
  9632. }
  9633. nonzero_msg(dd, i, "dedicated",
  9634. be16_to_cpu(new_bc->vl[i].dedicated));
  9635. nonzero_msg(dd, i, "shared",
  9636. be16_to_cpu(new_bc->vl[i].shared));
  9637. new_bc->vl[i].dedicated = 0;
  9638. new_bc->vl[i].shared = 0;
  9639. }
  9640. new_total += be16_to_cpu(new_bc->overall_shared_limit);
  9641. /* fetch the current values */
  9642. get_buffer_control(dd, &cur_bc, &cur_total);
  9643. /*
  9644. * Create the masks we will use.
  9645. */
  9646. memset(changing, 0, sizeof(changing));
  9647. memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
  9648. /*
  9649. * NOTE: Assumes that the individual VL bits are adjacent and in
  9650. * increasing order
  9651. */
  9652. stat_mask =
  9653. SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
  9654. changing_mask = 0;
  9655. ld_mask = 0;
  9656. change_count = 0;
  9657. any_shared_limit_changing = 0;
  9658. for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
  9659. if (!valid_vl(i))
  9660. continue;
  9661. this_shared_changing = new_bc->vl[i].shared
  9662. != cur_bc.vl[i].shared;
  9663. if (this_shared_changing)
  9664. any_shared_limit_changing = 1;
  9665. if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
  9666. this_shared_changing) {
  9667. changing[i] = 1;
  9668. changing_mask |= stat_mask;
  9669. change_count++;
  9670. }
  9671. if (be16_to_cpu(new_bc->vl[i].dedicated) <
  9672. be16_to_cpu(cur_bc.vl[i].dedicated)) {
  9673. lowering_dedicated[i] = 1;
  9674. ld_mask |= stat_mask;
  9675. }
  9676. }
  9677. /* bracket the credit change with a total adjustment */
  9678. if (new_total > cur_total)
  9679. set_global_limit(dd, new_total);
  9680. /*
  9681. * Start the credit change algorithm.
  9682. */
  9683. use_all_mask = 0;
  9684. if ((be16_to_cpu(new_bc->overall_shared_limit) <
  9685. be16_to_cpu(cur_bc.overall_shared_limit)) ||
  9686. (is_ax(dd) && any_shared_limit_changing)) {
  9687. set_global_shared(dd, 0);
  9688. cur_bc.overall_shared_limit = 0;
  9689. use_all_mask = 1;
  9690. }
  9691. for (i = 0; i < NUM_USABLE_VLS; i++) {
  9692. if (!valid_vl(i))
  9693. continue;
  9694. if (changing[i]) {
  9695. set_vl_shared(dd, i, 0);
  9696. cur_bc.vl[i].shared = 0;
  9697. }
  9698. }
  9699. wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
  9700. "shared");
  9701. if (change_count > 0) {
  9702. for (i = 0; i < NUM_USABLE_VLS; i++) {
  9703. if (!valid_vl(i))
  9704. continue;
  9705. if (lowering_dedicated[i]) {
  9706. set_vl_dedicated(dd, i,
  9707. be16_to_cpu(new_bc->
  9708. vl[i].dedicated));
  9709. cur_bc.vl[i].dedicated =
  9710. new_bc->vl[i].dedicated;
  9711. }
  9712. }
  9713. wait_for_vl_status_clear(dd, ld_mask, "dedicated");
  9714. /* now raise all dedicated that are going up */
  9715. for (i = 0; i < NUM_USABLE_VLS; i++) {
  9716. if (!valid_vl(i))
  9717. continue;
  9718. if (be16_to_cpu(new_bc->vl[i].dedicated) >
  9719. be16_to_cpu(cur_bc.vl[i].dedicated))
  9720. set_vl_dedicated(dd, i,
  9721. be16_to_cpu(new_bc->
  9722. vl[i].dedicated));
  9723. }
  9724. }
  9725. /* next raise all shared that are going up */
  9726. for (i = 0; i < NUM_USABLE_VLS; i++) {
  9727. if (!valid_vl(i))
  9728. continue;
  9729. if (be16_to_cpu(new_bc->vl[i].shared) >
  9730. be16_to_cpu(cur_bc.vl[i].shared))
  9731. set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
  9732. }
  9733. /* finally raise the global shared */
  9734. if (be16_to_cpu(new_bc->overall_shared_limit) >
  9735. be16_to_cpu(cur_bc.overall_shared_limit))
  9736. set_global_shared(dd,
  9737. be16_to_cpu(new_bc->overall_shared_limit));
  9738. /* bracket the credit change with a total adjustment */
  9739. if (new_total < cur_total)
  9740. set_global_limit(dd, new_total);
  9741. /*
  9742. * Determine the actual number of operational VLS using the number of
  9743. * dedicated and shared credits for each VL.
  9744. */
  9745. if (change_count > 0) {
  9746. for (i = 0; i < TXE_NUM_DATA_VL; i++)
  9747. if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
  9748. be16_to_cpu(new_bc->vl[i].shared) > 0)
  9749. vl_count++;
  9750. ppd->actual_vls_operational = vl_count;
  9751. ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
  9752. ppd->actual_vls_operational :
  9753. ppd->vls_operational,
  9754. NULL);
  9755. if (ret == 0)
  9756. ret = pio_map_init(dd, ppd->port - 1, vl_count ?
  9757. ppd->actual_vls_operational :
  9758. ppd->vls_operational, NULL);
  9759. if (ret)
  9760. return ret;
  9761. }
  9762. return 0;
  9763. }
  9764. /*
  9765. * Read the given fabric manager table. Return the size of the
  9766. * table (in bytes) on success, and a negative error code on
  9767. * failure.
  9768. */
  9769. int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
  9770. {
  9771. int size;
  9772. struct vl_arb_cache *vlc;
  9773. switch (which) {
  9774. case FM_TBL_VL_HIGH_ARB:
  9775. size = 256;
  9776. /*
  9777. * OPA specifies 128 elements (of 2 bytes each), though
  9778. * HFI supports only 16 elements in h/w.
  9779. */
  9780. vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
  9781. vl_arb_get_cache(vlc, t);
  9782. vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
  9783. break;
  9784. case FM_TBL_VL_LOW_ARB:
  9785. size = 256;
  9786. /*
  9787. * OPA specifies 128 elements (of 2 bytes each), though
  9788. * HFI supports only 16 elements in h/w.
  9789. */
  9790. vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
  9791. vl_arb_get_cache(vlc, t);
  9792. vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
  9793. break;
  9794. case FM_TBL_BUFFER_CONTROL:
  9795. size = get_buffer_control(ppd->dd, t, NULL);
  9796. break;
  9797. case FM_TBL_SC2VLNT:
  9798. size = get_sc2vlnt(ppd->dd, t);
  9799. break;
  9800. case FM_TBL_VL_PREEMPT_ELEMS:
  9801. size = 256;
  9802. /* OPA specifies 128 elements, of 2 bytes each */
  9803. get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
  9804. break;
  9805. case FM_TBL_VL_PREEMPT_MATRIX:
  9806. size = 256;
  9807. /*
  9808. * OPA specifies that this is the same size as the VL
  9809. * arbitration tables (i.e., 256 bytes).
  9810. */
  9811. break;
  9812. default:
  9813. return -EINVAL;
  9814. }
  9815. return size;
  9816. }
  9817. /*
  9818. * Write the given fabric manager table.
  9819. */
  9820. int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
  9821. {
  9822. int ret = 0;
  9823. struct vl_arb_cache *vlc;
  9824. switch (which) {
  9825. case FM_TBL_VL_HIGH_ARB:
  9826. vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
  9827. if (vl_arb_match_cache(vlc, t)) {
  9828. vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
  9829. break;
  9830. }
  9831. vl_arb_set_cache(vlc, t);
  9832. vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
  9833. ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
  9834. VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
  9835. break;
  9836. case FM_TBL_VL_LOW_ARB:
  9837. vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
  9838. if (vl_arb_match_cache(vlc, t)) {
  9839. vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
  9840. break;
  9841. }
  9842. vl_arb_set_cache(vlc, t);
  9843. vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
  9844. ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
  9845. VL_ARB_LOW_PRIO_TABLE_SIZE, t);
  9846. break;
  9847. case FM_TBL_BUFFER_CONTROL:
  9848. ret = set_buffer_control(ppd, t);
  9849. break;
  9850. case FM_TBL_SC2VLNT:
  9851. set_sc2vlnt(ppd->dd, t);
  9852. break;
  9853. default:
  9854. ret = -EINVAL;
  9855. }
  9856. return ret;
  9857. }
  9858. /*
  9859. * Disable all data VLs.
  9860. *
  9861. * Return 0 if disabled, non-zero if the VLs cannot be disabled.
  9862. */
  9863. static int disable_data_vls(struct hfi1_devdata *dd)
  9864. {
  9865. if (is_ax(dd))
  9866. return 1;
  9867. pio_send_control(dd, PSC_DATA_VL_DISABLE);
  9868. return 0;
  9869. }
  9870. /*
  9871. * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
  9872. * Just re-enables all data VLs (the "fill" part happens
  9873. * automatically - the name was chosen for symmetry with
  9874. * stop_drain_data_vls()).
  9875. *
  9876. * Return 0 if successful, non-zero if the VLs cannot be enabled.
  9877. */
  9878. int open_fill_data_vls(struct hfi1_devdata *dd)
  9879. {
  9880. if (is_ax(dd))
  9881. return 1;
  9882. pio_send_control(dd, PSC_DATA_VL_ENABLE);
  9883. return 0;
  9884. }
  9885. /*
  9886. * drain_data_vls() - assumes that disable_data_vls() has been called,
  9887. * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
  9888. * engines to drop to 0.
  9889. */
  9890. static void drain_data_vls(struct hfi1_devdata *dd)
  9891. {
  9892. sc_wait(dd);
  9893. sdma_wait(dd);
  9894. pause_for_credit_return(dd);
  9895. }
  9896. /*
  9897. * stop_drain_data_vls() - disable, then drain all per-VL fifos.
  9898. *
  9899. * Use open_fill_data_vls() to resume using data VLs. This pair is
  9900. * meant to be used like this:
  9901. *
  9902. * stop_drain_data_vls(dd);
  9903. * // do things with per-VL resources
  9904. * open_fill_data_vls(dd);
  9905. */
  9906. int stop_drain_data_vls(struct hfi1_devdata *dd)
  9907. {
  9908. int ret;
  9909. ret = disable_data_vls(dd);
  9910. if (ret == 0)
  9911. drain_data_vls(dd);
  9912. return ret;
  9913. }
  9914. /*
  9915. * Convert a nanosecond time to a cclock count. No matter how slow
  9916. * the cclock, a non-zero ns will always have a non-zero result.
  9917. */
  9918. u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
  9919. {
  9920. u32 cclocks;
  9921. if (dd->icode == ICODE_FPGA_EMULATION)
  9922. cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
  9923. else /* simulation pretends to be ASIC */
  9924. cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
  9925. if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
  9926. cclocks = 1;
  9927. return cclocks;
  9928. }
  9929. /*
  9930. * Convert a cclock count to nanoseconds. Not matter how slow
  9931. * the cclock, a non-zero cclocks will always have a non-zero result.
  9932. */
  9933. u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
  9934. {
  9935. u32 ns;
  9936. if (dd->icode == ICODE_FPGA_EMULATION)
  9937. ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
  9938. else /* simulation pretends to be ASIC */
  9939. ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
  9940. if (cclocks && !ns)
  9941. ns = 1;
  9942. return ns;
  9943. }
  9944. /*
  9945. * Dynamically adjust the receive interrupt timeout for a context based on
  9946. * incoming packet rate.
  9947. *
  9948. * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
  9949. */
  9950. static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
  9951. {
  9952. struct hfi1_devdata *dd = rcd->dd;
  9953. u32 timeout = rcd->rcvavail_timeout;
  9954. /*
  9955. * This algorithm doubles or halves the timeout depending on whether
  9956. * the number of packets received in this interrupt were less than or
  9957. * greater equal the interrupt count.
  9958. *
  9959. * The calculations below do not allow a steady state to be achieved.
  9960. * Only at the endpoints it is possible to have an unchanging
  9961. * timeout.
  9962. */
  9963. if (npkts < rcv_intr_count) {
  9964. /*
  9965. * Not enough packets arrived before the timeout, adjust
  9966. * timeout downward.
  9967. */
  9968. if (timeout < 2) /* already at minimum? */
  9969. return;
  9970. timeout >>= 1;
  9971. } else {
  9972. /*
  9973. * More than enough packets arrived before the timeout, adjust
  9974. * timeout upward.
  9975. */
  9976. if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
  9977. return;
  9978. timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
  9979. }
  9980. rcd->rcvavail_timeout = timeout;
  9981. /*
  9982. * timeout cannot be larger than rcv_intr_timeout_csr which has already
  9983. * been verified to be in range
  9984. */
  9985. write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
  9986. (u64)timeout <<
  9987. RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
  9988. }
  9989. void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
  9990. u32 intr_adjust, u32 npkts)
  9991. {
  9992. struct hfi1_devdata *dd = rcd->dd;
  9993. u64 reg;
  9994. u32 ctxt = rcd->ctxt;
  9995. /*
  9996. * Need to write timeout register before updating RcvHdrHead to ensure
  9997. * that a new value is used when the HW decides to restart counting.
  9998. */
  9999. if (intr_adjust)
  10000. adjust_rcv_timeout(rcd, npkts);
  10001. if (updegr) {
  10002. reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
  10003. << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
  10004. write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
  10005. }
  10006. mmiowb();
  10007. reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
  10008. (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
  10009. << RCV_HDR_HEAD_HEAD_SHIFT);
  10010. write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
  10011. mmiowb();
  10012. }
  10013. u32 hdrqempty(struct hfi1_ctxtdata *rcd)
  10014. {
  10015. u32 head, tail;
  10016. head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
  10017. & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
  10018. if (rcd->rcvhdrtail_kvaddr)
  10019. tail = get_rcvhdrtail(rcd);
  10020. else
  10021. tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
  10022. return head == tail;
  10023. }
  10024. /*
  10025. * Context Control and Receive Array encoding for buffer size:
  10026. * 0x0 invalid
  10027. * 0x1 4 KB
  10028. * 0x2 8 KB
  10029. * 0x3 16 KB
  10030. * 0x4 32 KB
  10031. * 0x5 64 KB
  10032. * 0x6 128 KB
  10033. * 0x7 256 KB
  10034. * 0x8 512 KB (Receive Array only)
  10035. * 0x9 1 MB (Receive Array only)
  10036. * 0xa 2 MB (Receive Array only)
  10037. *
  10038. * 0xB-0xF - reserved (Receive Array only)
  10039. *
  10040. *
  10041. * This routine assumes that the value has already been sanity checked.
  10042. */
  10043. static u32 encoded_size(u32 size)
  10044. {
  10045. switch (size) {
  10046. case 4 * 1024: return 0x1;
  10047. case 8 * 1024: return 0x2;
  10048. case 16 * 1024: return 0x3;
  10049. case 32 * 1024: return 0x4;
  10050. case 64 * 1024: return 0x5;
  10051. case 128 * 1024: return 0x6;
  10052. case 256 * 1024: return 0x7;
  10053. case 512 * 1024: return 0x8;
  10054. case 1 * 1024 * 1024: return 0x9;
  10055. case 2 * 1024 * 1024: return 0xa;
  10056. }
  10057. return 0x1; /* if invalid, go with the minimum size */
  10058. }
  10059. void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
  10060. {
  10061. struct hfi1_ctxtdata *rcd;
  10062. u64 rcvctrl, reg;
  10063. int did_enable = 0;
  10064. rcd = dd->rcd[ctxt];
  10065. if (!rcd)
  10066. return;
  10067. hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
  10068. rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
  10069. /* if the context already enabled, don't do the extra steps */
  10070. if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
  10071. !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
  10072. /* reset the tail and hdr addresses, and sequence count */
  10073. write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
  10074. rcd->rcvhdrq_phys);
  10075. if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
  10076. write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
  10077. rcd->rcvhdrqtailaddr_phys);
  10078. rcd->seq_cnt = 1;
  10079. /* reset the cached receive header queue head value */
  10080. rcd->head = 0;
  10081. /*
  10082. * Zero the receive header queue so we don't get false
  10083. * positives when checking the sequence number. The
  10084. * sequence numbers could land exactly on the same spot.
  10085. * E.g. a rcd restart before the receive header wrapped.
  10086. */
  10087. memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
  10088. /* starting timeout */
  10089. rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
  10090. /* enable the context */
  10091. rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
  10092. /* clean the egr buffer size first */
  10093. rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
  10094. rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
  10095. & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
  10096. << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
  10097. /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
  10098. write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
  10099. did_enable = 1;
  10100. /* zero RcvEgrIndexHead */
  10101. write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
  10102. /* set eager count and base index */
  10103. reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
  10104. & RCV_EGR_CTRL_EGR_CNT_MASK)
  10105. << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
  10106. (((rcd->eager_base >> RCV_SHIFT)
  10107. & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
  10108. << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
  10109. write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
  10110. /*
  10111. * Set TID (expected) count and base index.
  10112. * rcd->expected_count is set to individual RcvArray entries,
  10113. * not pairs, and the CSR takes a pair-count in groups of
  10114. * four, so divide by 8.
  10115. */
  10116. reg = (((rcd->expected_count >> RCV_SHIFT)
  10117. & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
  10118. << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
  10119. (((rcd->expected_base >> RCV_SHIFT)
  10120. & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
  10121. << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
  10122. write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
  10123. if (ctxt == HFI1_CTRL_CTXT)
  10124. write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
  10125. }
  10126. if (op & HFI1_RCVCTRL_CTXT_DIS) {
  10127. write_csr(dd, RCV_VL15, 0);
  10128. /*
  10129. * When receive context is being disabled turn on tail
  10130. * update with a dummy tail address and then disable
  10131. * receive context.
  10132. */
  10133. if (dd->rcvhdrtail_dummy_physaddr) {
  10134. write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
  10135. dd->rcvhdrtail_dummy_physaddr);
  10136. /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
  10137. rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
  10138. }
  10139. rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
  10140. }
  10141. if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
  10142. rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
  10143. if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
  10144. rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
  10145. if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
  10146. rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
  10147. if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
  10148. /* See comment on RcvCtxtCtrl.TailUpd above */
  10149. if (!(op & HFI1_RCVCTRL_CTXT_DIS))
  10150. rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
  10151. }
  10152. if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
  10153. rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
  10154. if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
  10155. rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
  10156. if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
  10157. /*
  10158. * In one-packet-per-eager mode, the size comes from
  10159. * the RcvArray entry.
  10160. */
  10161. rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
  10162. rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
  10163. }
  10164. if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
  10165. rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
  10166. if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
  10167. rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
  10168. if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
  10169. rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
  10170. if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
  10171. rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
  10172. if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
  10173. rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
  10174. rcd->rcvctrl = rcvctrl;
  10175. hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
  10176. write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
  10177. /* work around sticky RcvCtxtStatus.BlockedRHQFull */
  10178. if (did_enable &&
  10179. (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
  10180. reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
  10181. if (reg != 0) {
  10182. dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
  10183. ctxt, reg);
  10184. read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
  10185. write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
  10186. write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
  10187. read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
  10188. reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
  10189. dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
  10190. ctxt, reg, reg == 0 ? "not" : "still");
  10191. }
  10192. }
  10193. if (did_enable) {
  10194. /*
  10195. * The interrupt timeout and count must be set after
  10196. * the context is enabled to take effect.
  10197. */
  10198. /* set interrupt timeout */
  10199. write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
  10200. (u64)rcd->rcvavail_timeout <<
  10201. RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
  10202. /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
  10203. reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
  10204. write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
  10205. }
  10206. if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
  10207. /*
  10208. * If the context has been disabled and the Tail Update has
  10209. * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
  10210. * so it doesn't contain an address that is invalid.
  10211. */
  10212. write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
  10213. dd->rcvhdrtail_dummy_physaddr);
  10214. }
  10215. u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
  10216. {
  10217. int ret;
  10218. u64 val = 0;
  10219. if (namep) {
  10220. ret = dd->cntrnameslen;
  10221. *namep = dd->cntrnames;
  10222. } else {
  10223. const struct cntr_entry *entry;
  10224. int i, j;
  10225. ret = (dd->ndevcntrs) * sizeof(u64);
  10226. /* Get the start of the block of counters */
  10227. *cntrp = dd->cntrs;
  10228. /*
  10229. * Now go and fill in each counter in the block.
  10230. */
  10231. for (i = 0; i < DEV_CNTR_LAST; i++) {
  10232. entry = &dev_cntrs[i];
  10233. hfi1_cdbg(CNTR, "reading %s", entry->name);
  10234. if (entry->flags & CNTR_DISABLED) {
  10235. /* Nothing */
  10236. hfi1_cdbg(CNTR, "\tDisabled\n");
  10237. } else {
  10238. if (entry->flags & CNTR_VL) {
  10239. hfi1_cdbg(CNTR, "\tPer VL\n");
  10240. for (j = 0; j < C_VL_COUNT; j++) {
  10241. val = entry->rw_cntr(entry,
  10242. dd, j,
  10243. CNTR_MODE_R,
  10244. 0);
  10245. hfi1_cdbg(
  10246. CNTR,
  10247. "\t\tRead 0x%llx for %d\n",
  10248. val, j);
  10249. dd->cntrs[entry->offset + j] =
  10250. val;
  10251. }
  10252. } else if (entry->flags & CNTR_SDMA) {
  10253. hfi1_cdbg(CNTR,
  10254. "\t Per SDMA Engine\n");
  10255. for (j = 0; j < dd->chip_sdma_engines;
  10256. j++) {
  10257. val =
  10258. entry->rw_cntr(entry, dd, j,
  10259. CNTR_MODE_R, 0);
  10260. hfi1_cdbg(CNTR,
  10261. "\t\tRead 0x%llx for %d\n",
  10262. val, j);
  10263. dd->cntrs[entry->offset + j] =
  10264. val;
  10265. }
  10266. } else {
  10267. val = entry->rw_cntr(entry, dd,
  10268. CNTR_INVALID_VL,
  10269. CNTR_MODE_R, 0);
  10270. dd->cntrs[entry->offset] = val;
  10271. hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
  10272. }
  10273. }
  10274. }
  10275. }
  10276. return ret;
  10277. }
  10278. /*
  10279. * Used by sysfs to create files for hfi stats to read
  10280. */
  10281. u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
  10282. {
  10283. int ret;
  10284. u64 val = 0;
  10285. if (namep) {
  10286. ret = ppd->dd->portcntrnameslen;
  10287. *namep = ppd->dd->portcntrnames;
  10288. } else {
  10289. const struct cntr_entry *entry;
  10290. int i, j;
  10291. ret = ppd->dd->nportcntrs * sizeof(u64);
  10292. *cntrp = ppd->cntrs;
  10293. for (i = 0; i < PORT_CNTR_LAST; i++) {
  10294. entry = &port_cntrs[i];
  10295. hfi1_cdbg(CNTR, "reading %s", entry->name);
  10296. if (entry->flags & CNTR_DISABLED) {
  10297. /* Nothing */
  10298. hfi1_cdbg(CNTR, "\tDisabled\n");
  10299. continue;
  10300. }
  10301. if (entry->flags & CNTR_VL) {
  10302. hfi1_cdbg(CNTR, "\tPer VL");
  10303. for (j = 0; j < C_VL_COUNT; j++) {
  10304. val = entry->rw_cntr(entry, ppd, j,
  10305. CNTR_MODE_R,
  10306. 0);
  10307. hfi1_cdbg(
  10308. CNTR,
  10309. "\t\tRead 0x%llx for %d",
  10310. val, j);
  10311. ppd->cntrs[entry->offset + j] = val;
  10312. }
  10313. } else {
  10314. val = entry->rw_cntr(entry, ppd,
  10315. CNTR_INVALID_VL,
  10316. CNTR_MODE_R,
  10317. 0);
  10318. ppd->cntrs[entry->offset] = val;
  10319. hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
  10320. }
  10321. }
  10322. }
  10323. return ret;
  10324. }
  10325. static void free_cntrs(struct hfi1_devdata *dd)
  10326. {
  10327. struct hfi1_pportdata *ppd;
  10328. int i;
  10329. if (dd->synth_stats_timer.data)
  10330. del_timer_sync(&dd->synth_stats_timer);
  10331. dd->synth_stats_timer.data = 0;
  10332. ppd = (struct hfi1_pportdata *)(dd + 1);
  10333. for (i = 0; i < dd->num_pports; i++, ppd++) {
  10334. kfree(ppd->cntrs);
  10335. kfree(ppd->scntrs);
  10336. free_percpu(ppd->ibport_data.rvp.rc_acks);
  10337. free_percpu(ppd->ibport_data.rvp.rc_qacks);
  10338. free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
  10339. ppd->cntrs = NULL;
  10340. ppd->scntrs = NULL;
  10341. ppd->ibport_data.rvp.rc_acks = NULL;
  10342. ppd->ibport_data.rvp.rc_qacks = NULL;
  10343. ppd->ibport_data.rvp.rc_delayed_comp = NULL;
  10344. }
  10345. kfree(dd->portcntrnames);
  10346. dd->portcntrnames = NULL;
  10347. kfree(dd->cntrs);
  10348. dd->cntrs = NULL;
  10349. kfree(dd->scntrs);
  10350. dd->scntrs = NULL;
  10351. kfree(dd->cntrnames);
  10352. dd->cntrnames = NULL;
  10353. }
  10354. static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
  10355. u64 *psval, void *context, int vl)
  10356. {
  10357. u64 val;
  10358. u64 sval = *psval;
  10359. if (entry->flags & CNTR_DISABLED) {
  10360. dd_dev_err(dd, "Counter %s not enabled", entry->name);
  10361. return 0;
  10362. }
  10363. hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
  10364. val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
  10365. /* If its a synthetic counter there is more work we need to do */
  10366. if (entry->flags & CNTR_SYNTH) {
  10367. if (sval == CNTR_MAX) {
  10368. /* No need to read already saturated */
  10369. return CNTR_MAX;
  10370. }
  10371. if (entry->flags & CNTR_32BIT) {
  10372. /* 32bit counters can wrap multiple times */
  10373. u64 upper = sval >> 32;
  10374. u64 lower = (sval << 32) >> 32;
  10375. if (lower > val) { /* hw wrapped */
  10376. if (upper == CNTR_32BIT_MAX)
  10377. val = CNTR_MAX;
  10378. else
  10379. upper++;
  10380. }
  10381. if (val != CNTR_MAX)
  10382. val = (upper << 32) | val;
  10383. } else {
  10384. /* If we rolled we are saturated */
  10385. if ((val < sval) || (val > CNTR_MAX))
  10386. val = CNTR_MAX;
  10387. }
  10388. }
  10389. *psval = val;
  10390. hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
  10391. return val;
  10392. }
  10393. static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
  10394. struct cntr_entry *entry,
  10395. u64 *psval, void *context, int vl, u64 data)
  10396. {
  10397. u64 val;
  10398. if (entry->flags & CNTR_DISABLED) {
  10399. dd_dev_err(dd, "Counter %s not enabled", entry->name);
  10400. return 0;
  10401. }
  10402. hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
  10403. if (entry->flags & CNTR_SYNTH) {
  10404. *psval = data;
  10405. if (entry->flags & CNTR_32BIT) {
  10406. val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
  10407. (data << 32) >> 32);
  10408. val = data; /* return the full 64bit value */
  10409. } else {
  10410. val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
  10411. data);
  10412. }
  10413. } else {
  10414. val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
  10415. }
  10416. *psval = val;
  10417. hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
  10418. return val;
  10419. }
  10420. u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
  10421. {
  10422. struct cntr_entry *entry;
  10423. u64 *sval;
  10424. entry = &dev_cntrs[index];
  10425. sval = dd->scntrs + entry->offset;
  10426. if (vl != CNTR_INVALID_VL)
  10427. sval += vl;
  10428. return read_dev_port_cntr(dd, entry, sval, dd, vl);
  10429. }
  10430. u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
  10431. {
  10432. struct cntr_entry *entry;
  10433. u64 *sval;
  10434. entry = &dev_cntrs[index];
  10435. sval = dd->scntrs + entry->offset;
  10436. if (vl != CNTR_INVALID_VL)
  10437. sval += vl;
  10438. return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
  10439. }
  10440. u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
  10441. {
  10442. struct cntr_entry *entry;
  10443. u64 *sval;
  10444. entry = &port_cntrs[index];
  10445. sval = ppd->scntrs + entry->offset;
  10446. if (vl != CNTR_INVALID_VL)
  10447. sval += vl;
  10448. if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
  10449. (index <= C_RCV_HDR_OVF_LAST)) {
  10450. /* We do not want to bother for disabled contexts */
  10451. return 0;
  10452. }
  10453. return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
  10454. }
  10455. u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
  10456. {
  10457. struct cntr_entry *entry;
  10458. u64 *sval;
  10459. entry = &port_cntrs[index];
  10460. sval = ppd->scntrs + entry->offset;
  10461. if (vl != CNTR_INVALID_VL)
  10462. sval += vl;
  10463. if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
  10464. (index <= C_RCV_HDR_OVF_LAST)) {
  10465. /* We do not want to bother for disabled contexts */
  10466. return 0;
  10467. }
  10468. return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
  10469. }
  10470. static void update_synth_timer(unsigned long opaque)
  10471. {
  10472. u64 cur_tx;
  10473. u64 cur_rx;
  10474. u64 total_flits;
  10475. u8 update = 0;
  10476. int i, j, vl;
  10477. struct hfi1_pportdata *ppd;
  10478. struct cntr_entry *entry;
  10479. struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
  10480. /*
  10481. * Rather than keep beating on the CSRs pick a minimal set that we can
  10482. * check to watch for potential roll over. We can do this by looking at
  10483. * the number of flits sent/recv. If the total flits exceeds 32bits then
  10484. * we have to iterate all the counters and update.
  10485. */
  10486. entry = &dev_cntrs[C_DC_RCV_FLITS];
  10487. cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
  10488. entry = &dev_cntrs[C_DC_XMIT_FLITS];
  10489. cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
  10490. hfi1_cdbg(
  10491. CNTR,
  10492. "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
  10493. dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
  10494. if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
  10495. /*
  10496. * May not be strictly necessary to update but it won't hurt and
  10497. * simplifies the logic here.
  10498. */
  10499. update = 1;
  10500. hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
  10501. dd->unit);
  10502. } else {
  10503. total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
  10504. hfi1_cdbg(CNTR,
  10505. "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
  10506. total_flits, (u64)CNTR_32BIT_MAX);
  10507. if (total_flits >= CNTR_32BIT_MAX) {
  10508. hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
  10509. dd->unit);
  10510. update = 1;
  10511. }
  10512. }
  10513. if (update) {
  10514. hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
  10515. for (i = 0; i < DEV_CNTR_LAST; i++) {
  10516. entry = &dev_cntrs[i];
  10517. if (entry->flags & CNTR_VL) {
  10518. for (vl = 0; vl < C_VL_COUNT; vl++)
  10519. read_dev_cntr(dd, i, vl);
  10520. } else {
  10521. read_dev_cntr(dd, i, CNTR_INVALID_VL);
  10522. }
  10523. }
  10524. ppd = (struct hfi1_pportdata *)(dd + 1);
  10525. for (i = 0; i < dd->num_pports; i++, ppd++) {
  10526. for (j = 0; j < PORT_CNTR_LAST; j++) {
  10527. entry = &port_cntrs[j];
  10528. if (entry->flags & CNTR_VL) {
  10529. for (vl = 0; vl < C_VL_COUNT; vl++)
  10530. read_port_cntr(ppd, j, vl);
  10531. } else {
  10532. read_port_cntr(ppd, j, CNTR_INVALID_VL);
  10533. }
  10534. }
  10535. }
  10536. /*
  10537. * We want the value in the register. The goal is to keep track
  10538. * of the number of "ticks" not the counter value. In other
  10539. * words if the register rolls we want to notice it and go ahead
  10540. * and force an update.
  10541. */
  10542. entry = &dev_cntrs[C_DC_XMIT_FLITS];
  10543. dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
  10544. CNTR_MODE_R, 0);
  10545. entry = &dev_cntrs[C_DC_RCV_FLITS];
  10546. dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
  10547. CNTR_MODE_R, 0);
  10548. hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
  10549. dd->unit, dd->last_tx, dd->last_rx);
  10550. } else {
  10551. hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
  10552. }
  10553. mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
  10554. }
  10555. #define C_MAX_NAME 13 /* 12 chars + one for /0 */
  10556. static int init_cntrs(struct hfi1_devdata *dd)
  10557. {
  10558. int i, rcv_ctxts, j;
  10559. size_t sz;
  10560. char *p;
  10561. char name[C_MAX_NAME];
  10562. struct hfi1_pportdata *ppd;
  10563. const char *bit_type_32 = ",32";
  10564. const int bit_type_32_sz = strlen(bit_type_32);
  10565. /* set up the stats timer; the add_timer is done at the end */
  10566. setup_timer(&dd->synth_stats_timer, update_synth_timer,
  10567. (unsigned long)dd);
  10568. /***********************/
  10569. /* per device counters */
  10570. /***********************/
  10571. /* size names and determine how many we have*/
  10572. dd->ndevcntrs = 0;
  10573. sz = 0;
  10574. for (i = 0; i < DEV_CNTR_LAST; i++) {
  10575. if (dev_cntrs[i].flags & CNTR_DISABLED) {
  10576. hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
  10577. continue;
  10578. }
  10579. if (dev_cntrs[i].flags & CNTR_VL) {
  10580. dev_cntrs[i].offset = dd->ndevcntrs;
  10581. for (j = 0; j < C_VL_COUNT; j++) {
  10582. snprintf(name, C_MAX_NAME, "%s%d",
  10583. dev_cntrs[i].name, vl_from_idx(j));
  10584. sz += strlen(name);
  10585. /* Add ",32" for 32-bit counters */
  10586. if (dev_cntrs[i].flags & CNTR_32BIT)
  10587. sz += bit_type_32_sz;
  10588. sz++;
  10589. dd->ndevcntrs++;
  10590. }
  10591. } else if (dev_cntrs[i].flags & CNTR_SDMA) {
  10592. dev_cntrs[i].offset = dd->ndevcntrs;
  10593. for (j = 0; j < dd->chip_sdma_engines; j++) {
  10594. snprintf(name, C_MAX_NAME, "%s%d",
  10595. dev_cntrs[i].name, j);
  10596. sz += strlen(name);
  10597. /* Add ",32" for 32-bit counters */
  10598. if (dev_cntrs[i].flags & CNTR_32BIT)
  10599. sz += bit_type_32_sz;
  10600. sz++;
  10601. dd->ndevcntrs++;
  10602. }
  10603. } else {
  10604. /* +1 for newline. */
  10605. sz += strlen(dev_cntrs[i].name) + 1;
  10606. /* Add ",32" for 32-bit counters */
  10607. if (dev_cntrs[i].flags & CNTR_32BIT)
  10608. sz += bit_type_32_sz;
  10609. dev_cntrs[i].offset = dd->ndevcntrs;
  10610. dd->ndevcntrs++;
  10611. }
  10612. }
  10613. /* allocate space for the counter values */
  10614. dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
  10615. if (!dd->cntrs)
  10616. goto bail;
  10617. dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
  10618. if (!dd->scntrs)
  10619. goto bail;
  10620. /* allocate space for the counter names */
  10621. dd->cntrnameslen = sz;
  10622. dd->cntrnames = kmalloc(sz, GFP_KERNEL);
  10623. if (!dd->cntrnames)
  10624. goto bail;
  10625. /* fill in the names */
  10626. for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
  10627. if (dev_cntrs[i].flags & CNTR_DISABLED) {
  10628. /* Nothing */
  10629. } else if (dev_cntrs[i].flags & CNTR_VL) {
  10630. for (j = 0; j < C_VL_COUNT; j++) {
  10631. snprintf(name, C_MAX_NAME, "%s%d",
  10632. dev_cntrs[i].name,
  10633. vl_from_idx(j));
  10634. memcpy(p, name, strlen(name));
  10635. p += strlen(name);
  10636. /* Counter is 32 bits */
  10637. if (dev_cntrs[i].flags & CNTR_32BIT) {
  10638. memcpy(p, bit_type_32, bit_type_32_sz);
  10639. p += bit_type_32_sz;
  10640. }
  10641. *p++ = '\n';
  10642. }
  10643. } else if (dev_cntrs[i].flags & CNTR_SDMA) {
  10644. for (j = 0; j < dd->chip_sdma_engines; j++) {
  10645. snprintf(name, C_MAX_NAME, "%s%d",
  10646. dev_cntrs[i].name, j);
  10647. memcpy(p, name, strlen(name));
  10648. p += strlen(name);
  10649. /* Counter is 32 bits */
  10650. if (dev_cntrs[i].flags & CNTR_32BIT) {
  10651. memcpy(p, bit_type_32, bit_type_32_sz);
  10652. p += bit_type_32_sz;
  10653. }
  10654. *p++ = '\n';
  10655. }
  10656. } else {
  10657. memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
  10658. p += strlen(dev_cntrs[i].name);
  10659. /* Counter is 32 bits */
  10660. if (dev_cntrs[i].flags & CNTR_32BIT) {
  10661. memcpy(p, bit_type_32, bit_type_32_sz);
  10662. p += bit_type_32_sz;
  10663. }
  10664. *p++ = '\n';
  10665. }
  10666. }
  10667. /*********************/
  10668. /* per port counters */
  10669. /*********************/
  10670. /*
  10671. * Go through the counters for the overflows and disable the ones we
  10672. * don't need. This varies based on platform so we need to do it
  10673. * dynamically here.
  10674. */
  10675. rcv_ctxts = dd->num_rcv_contexts;
  10676. for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
  10677. i <= C_RCV_HDR_OVF_LAST; i++) {
  10678. port_cntrs[i].flags |= CNTR_DISABLED;
  10679. }
  10680. /* size port counter names and determine how many we have*/
  10681. sz = 0;
  10682. dd->nportcntrs = 0;
  10683. for (i = 0; i < PORT_CNTR_LAST; i++) {
  10684. if (port_cntrs[i].flags & CNTR_DISABLED) {
  10685. hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
  10686. continue;
  10687. }
  10688. if (port_cntrs[i].flags & CNTR_VL) {
  10689. port_cntrs[i].offset = dd->nportcntrs;
  10690. for (j = 0; j < C_VL_COUNT; j++) {
  10691. snprintf(name, C_MAX_NAME, "%s%d",
  10692. port_cntrs[i].name, vl_from_idx(j));
  10693. sz += strlen(name);
  10694. /* Add ",32" for 32-bit counters */
  10695. if (port_cntrs[i].flags & CNTR_32BIT)
  10696. sz += bit_type_32_sz;
  10697. sz++;
  10698. dd->nportcntrs++;
  10699. }
  10700. } else {
  10701. /* +1 for newline */
  10702. sz += strlen(port_cntrs[i].name) + 1;
  10703. /* Add ",32" for 32-bit counters */
  10704. if (port_cntrs[i].flags & CNTR_32BIT)
  10705. sz += bit_type_32_sz;
  10706. port_cntrs[i].offset = dd->nportcntrs;
  10707. dd->nportcntrs++;
  10708. }
  10709. }
  10710. /* allocate space for the counter names */
  10711. dd->portcntrnameslen = sz;
  10712. dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
  10713. if (!dd->portcntrnames)
  10714. goto bail;
  10715. /* fill in port cntr names */
  10716. for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
  10717. if (port_cntrs[i].flags & CNTR_DISABLED)
  10718. continue;
  10719. if (port_cntrs[i].flags & CNTR_VL) {
  10720. for (j = 0; j < C_VL_COUNT; j++) {
  10721. snprintf(name, C_MAX_NAME, "%s%d",
  10722. port_cntrs[i].name, vl_from_idx(j));
  10723. memcpy(p, name, strlen(name));
  10724. p += strlen(name);
  10725. /* Counter is 32 bits */
  10726. if (port_cntrs[i].flags & CNTR_32BIT) {
  10727. memcpy(p, bit_type_32, bit_type_32_sz);
  10728. p += bit_type_32_sz;
  10729. }
  10730. *p++ = '\n';
  10731. }
  10732. } else {
  10733. memcpy(p, port_cntrs[i].name,
  10734. strlen(port_cntrs[i].name));
  10735. p += strlen(port_cntrs[i].name);
  10736. /* Counter is 32 bits */
  10737. if (port_cntrs[i].flags & CNTR_32BIT) {
  10738. memcpy(p, bit_type_32, bit_type_32_sz);
  10739. p += bit_type_32_sz;
  10740. }
  10741. *p++ = '\n';
  10742. }
  10743. }
  10744. /* allocate per port storage for counter values */
  10745. ppd = (struct hfi1_pportdata *)(dd + 1);
  10746. for (i = 0; i < dd->num_pports; i++, ppd++) {
  10747. ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
  10748. if (!ppd->cntrs)
  10749. goto bail;
  10750. ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
  10751. if (!ppd->scntrs)
  10752. goto bail;
  10753. }
  10754. /* CPU counters need to be allocated and zeroed */
  10755. if (init_cpu_counters(dd))
  10756. goto bail;
  10757. mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
  10758. return 0;
  10759. bail:
  10760. free_cntrs(dd);
  10761. return -ENOMEM;
  10762. }
  10763. static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
  10764. {
  10765. switch (chip_lstate) {
  10766. default:
  10767. dd_dev_err(dd,
  10768. "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
  10769. chip_lstate);
  10770. /* fall through */
  10771. case LSTATE_DOWN:
  10772. return IB_PORT_DOWN;
  10773. case LSTATE_INIT:
  10774. return IB_PORT_INIT;
  10775. case LSTATE_ARMED:
  10776. return IB_PORT_ARMED;
  10777. case LSTATE_ACTIVE:
  10778. return IB_PORT_ACTIVE;
  10779. }
  10780. }
  10781. u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
  10782. {
  10783. /* look at the HFI meta-states only */
  10784. switch (chip_pstate & 0xf0) {
  10785. default:
  10786. dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
  10787. chip_pstate);
  10788. /* fall through */
  10789. case PLS_DISABLED:
  10790. return IB_PORTPHYSSTATE_DISABLED;
  10791. case PLS_OFFLINE:
  10792. return OPA_PORTPHYSSTATE_OFFLINE;
  10793. case PLS_POLLING:
  10794. return IB_PORTPHYSSTATE_POLLING;
  10795. case PLS_CONFIGPHY:
  10796. return IB_PORTPHYSSTATE_TRAINING;
  10797. case PLS_LINKUP:
  10798. return IB_PORTPHYSSTATE_LINKUP;
  10799. case PLS_PHYTEST:
  10800. return IB_PORTPHYSSTATE_PHY_TEST;
  10801. }
  10802. }
  10803. /* return the OPA port logical state name */
  10804. const char *opa_lstate_name(u32 lstate)
  10805. {
  10806. static const char * const port_logical_names[] = {
  10807. "PORT_NOP",
  10808. "PORT_DOWN",
  10809. "PORT_INIT",
  10810. "PORT_ARMED",
  10811. "PORT_ACTIVE",
  10812. "PORT_ACTIVE_DEFER",
  10813. };
  10814. if (lstate < ARRAY_SIZE(port_logical_names))
  10815. return port_logical_names[lstate];
  10816. return "unknown";
  10817. }
  10818. /* return the OPA port physical state name */
  10819. const char *opa_pstate_name(u32 pstate)
  10820. {
  10821. static const char * const port_physical_names[] = {
  10822. "PHYS_NOP",
  10823. "reserved1",
  10824. "PHYS_POLL",
  10825. "PHYS_DISABLED",
  10826. "PHYS_TRAINING",
  10827. "PHYS_LINKUP",
  10828. "PHYS_LINK_ERR_RECOVER",
  10829. "PHYS_PHY_TEST",
  10830. "reserved8",
  10831. "PHYS_OFFLINE",
  10832. "PHYS_GANGED",
  10833. "PHYS_TEST",
  10834. };
  10835. if (pstate < ARRAY_SIZE(port_physical_names))
  10836. return port_physical_names[pstate];
  10837. return "unknown";
  10838. }
  10839. /*
  10840. * Read the hardware link state and set the driver's cached value of it.
  10841. * Return the (new) current value.
  10842. */
  10843. u32 get_logical_state(struct hfi1_pportdata *ppd)
  10844. {
  10845. u32 new_state;
  10846. new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
  10847. if (new_state != ppd->lstate) {
  10848. dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
  10849. opa_lstate_name(new_state), new_state);
  10850. ppd->lstate = new_state;
  10851. }
  10852. /*
  10853. * Set port status flags in the page mapped into userspace
  10854. * memory. Do it here to ensure a reliable state - this is
  10855. * the only function called by all state handling code.
  10856. * Always set the flags due to the fact that the cache value
  10857. * might have been changed explicitly outside of this
  10858. * function.
  10859. */
  10860. if (ppd->statusp) {
  10861. switch (ppd->lstate) {
  10862. case IB_PORT_DOWN:
  10863. case IB_PORT_INIT:
  10864. *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
  10865. HFI1_STATUS_IB_READY);
  10866. break;
  10867. case IB_PORT_ARMED:
  10868. *ppd->statusp |= HFI1_STATUS_IB_CONF;
  10869. break;
  10870. case IB_PORT_ACTIVE:
  10871. *ppd->statusp |= HFI1_STATUS_IB_READY;
  10872. break;
  10873. }
  10874. }
  10875. return ppd->lstate;
  10876. }
  10877. /**
  10878. * wait_logical_linkstate - wait for an IB link state change to occur
  10879. * @ppd: port device
  10880. * @state: the state to wait for
  10881. * @msecs: the number of milliseconds to wait
  10882. *
  10883. * Wait up to msecs milliseconds for IB link state change to occur.
  10884. * For now, take the easy polling route.
  10885. * Returns 0 if state reached, otherwise -ETIMEDOUT.
  10886. */
  10887. static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
  10888. int msecs)
  10889. {
  10890. unsigned long timeout;
  10891. timeout = jiffies + msecs_to_jiffies(msecs);
  10892. while (1) {
  10893. if (get_logical_state(ppd) == state)
  10894. return 0;
  10895. if (time_after(jiffies, timeout))
  10896. break;
  10897. msleep(20);
  10898. }
  10899. dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
  10900. return -ETIMEDOUT;
  10901. }
  10902. u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
  10903. {
  10904. u32 pstate;
  10905. u32 ib_pstate;
  10906. pstate = read_physical_state(ppd->dd);
  10907. ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
  10908. if (ppd->last_pstate != ib_pstate) {
  10909. dd_dev_info(ppd->dd,
  10910. "%s: physical state changed to %s (0x%x), phy 0x%x\n",
  10911. __func__, opa_pstate_name(ib_pstate), ib_pstate,
  10912. pstate);
  10913. ppd->last_pstate = ib_pstate;
  10914. }
  10915. return ib_pstate;
  10916. }
  10917. #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
  10918. (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
  10919. #define SET_STATIC_RATE_CONTROL_SMASK(r) \
  10920. (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
  10921. int hfi1_init_ctxt(struct send_context *sc)
  10922. {
  10923. if (sc) {
  10924. struct hfi1_devdata *dd = sc->dd;
  10925. u64 reg;
  10926. u8 set = (sc->type == SC_USER ?
  10927. HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
  10928. HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
  10929. reg = read_kctxt_csr(dd, sc->hw_context,
  10930. SEND_CTXT_CHECK_ENABLE);
  10931. if (set)
  10932. CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
  10933. else
  10934. SET_STATIC_RATE_CONTROL_SMASK(reg);
  10935. write_kctxt_csr(dd, sc->hw_context,
  10936. SEND_CTXT_CHECK_ENABLE, reg);
  10937. }
  10938. return 0;
  10939. }
  10940. int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
  10941. {
  10942. int ret = 0;
  10943. u64 reg;
  10944. if (dd->icode != ICODE_RTL_SILICON) {
  10945. if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
  10946. dd_dev_info(dd, "%s: tempsense not supported by HW\n",
  10947. __func__);
  10948. return -EINVAL;
  10949. }
  10950. reg = read_csr(dd, ASIC_STS_THERM);
  10951. temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
  10952. ASIC_STS_THERM_CURR_TEMP_MASK);
  10953. temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
  10954. ASIC_STS_THERM_LO_TEMP_MASK);
  10955. temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
  10956. ASIC_STS_THERM_HI_TEMP_MASK);
  10957. temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
  10958. ASIC_STS_THERM_CRIT_TEMP_MASK);
  10959. /* triggers is a 3-bit value - 1 bit per trigger. */
  10960. temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
  10961. return ret;
  10962. }
  10963. /* ========================================================================= */
  10964. /*
  10965. * Enable/disable chip from delivering interrupts.
  10966. */
  10967. void set_intr_state(struct hfi1_devdata *dd, u32 enable)
  10968. {
  10969. int i;
  10970. /*
  10971. * In HFI, the mask needs to be 1 to allow interrupts.
  10972. */
  10973. if (enable) {
  10974. /* enable all interrupts */
  10975. for (i = 0; i < CCE_NUM_INT_CSRS; i++)
  10976. write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
  10977. init_qsfp_int(dd);
  10978. } else {
  10979. for (i = 0; i < CCE_NUM_INT_CSRS; i++)
  10980. write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
  10981. }
  10982. }
  10983. /*
  10984. * Clear all interrupt sources on the chip.
  10985. */
  10986. static void clear_all_interrupts(struct hfi1_devdata *dd)
  10987. {
  10988. int i;
  10989. for (i = 0; i < CCE_NUM_INT_CSRS; i++)
  10990. write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
  10991. write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
  10992. write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
  10993. write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
  10994. write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
  10995. write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
  10996. write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
  10997. write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
  10998. for (i = 0; i < dd->chip_send_contexts; i++)
  10999. write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
  11000. for (i = 0; i < dd->chip_sdma_engines; i++)
  11001. write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
  11002. write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
  11003. write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
  11004. write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
  11005. }
  11006. /* Move to pcie.c? */
  11007. static void disable_intx(struct pci_dev *pdev)
  11008. {
  11009. pci_intx(pdev, 0);
  11010. }
  11011. static void clean_up_interrupts(struct hfi1_devdata *dd)
  11012. {
  11013. int i;
  11014. /* remove irqs - must happen before disabling/turning off */
  11015. if (dd->num_msix_entries) {
  11016. /* MSI-X */
  11017. struct hfi1_msix_entry *me = dd->msix_entries;
  11018. for (i = 0; i < dd->num_msix_entries; i++, me++) {
  11019. if (!me->arg) /* => no irq, no affinity */
  11020. continue;
  11021. hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
  11022. free_irq(me->msix.vector, me->arg);
  11023. }
  11024. } else {
  11025. /* INTx */
  11026. if (dd->requested_intx_irq) {
  11027. free_irq(dd->pcidev->irq, dd);
  11028. dd->requested_intx_irq = 0;
  11029. }
  11030. }
  11031. /* turn off interrupts */
  11032. if (dd->num_msix_entries) {
  11033. /* MSI-X */
  11034. pci_disable_msix(dd->pcidev);
  11035. } else {
  11036. /* INTx */
  11037. disable_intx(dd->pcidev);
  11038. }
  11039. /* clean structures */
  11040. kfree(dd->msix_entries);
  11041. dd->msix_entries = NULL;
  11042. dd->num_msix_entries = 0;
  11043. }
  11044. /*
  11045. * Remap the interrupt source from the general handler to the given MSI-X
  11046. * interrupt.
  11047. */
  11048. static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
  11049. {
  11050. u64 reg;
  11051. int m, n;
  11052. /* clear from the handled mask of the general interrupt */
  11053. m = isrc / 64;
  11054. n = isrc % 64;
  11055. dd->gi_mask[m] &= ~((u64)1 << n);
  11056. /* direct the chip source to the given MSI-X interrupt */
  11057. m = isrc / 8;
  11058. n = isrc % 8;
  11059. reg = read_csr(dd, CCE_INT_MAP + (8 * m));
  11060. reg &= ~((u64)0xff << (8 * n));
  11061. reg |= ((u64)msix_intr & 0xff) << (8 * n);
  11062. write_csr(dd, CCE_INT_MAP + (8 * m), reg);
  11063. }
  11064. static void remap_sdma_interrupts(struct hfi1_devdata *dd,
  11065. int engine, int msix_intr)
  11066. {
  11067. /*
  11068. * SDMA engine interrupt sources grouped by type, rather than
  11069. * engine. Per-engine interrupts are as follows:
  11070. * SDMA
  11071. * SDMAProgress
  11072. * SDMAIdle
  11073. */
  11074. remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
  11075. msix_intr);
  11076. remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
  11077. msix_intr);
  11078. remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
  11079. msix_intr);
  11080. }
  11081. static int request_intx_irq(struct hfi1_devdata *dd)
  11082. {
  11083. int ret;
  11084. snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
  11085. dd->unit);
  11086. ret = request_irq(dd->pcidev->irq, general_interrupt,
  11087. IRQF_SHARED, dd->intx_name, dd);
  11088. if (ret)
  11089. dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
  11090. ret);
  11091. else
  11092. dd->requested_intx_irq = 1;
  11093. return ret;
  11094. }
  11095. static int request_msix_irqs(struct hfi1_devdata *dd)
  11096. {
  11097. int first_general, last_general;
  11098. int first_sdma, last_sdma;
  11099. int first_rx, last_rx;
  11100. int i, ret = 0;
  11101. /* calculate the ranges we are going to use */
  11102. first_general = 0;
  11103. last_general = first_general + 1;
  11104. first_sdma = last_general;
  11105. last_sdma = first_sdma + dd->num_sdma;
  11106. first_rx = last_sdma;
  11107. last_rx = first_rx + dd->n_krcv_queues;
  11108. /*
  11109. * Sanity check - the code expects all SDMA chip source
  11110. * interrupts to be in the same CSR, starting at bit 0. Verify
  11111. * that this is true by checking the bit location of the start.
  11112. */
  11113. BUILD_BUG_ON(IS_SDMA_START % 64);
  11114. for (i = 0; i < dd->num_msix_entries; i++) {
  11115. struct hfi1_msix_entry *me = &dd->msix_entries[i];
  11116. const char *err_info;
  11117. irq_handler_t handler;
  11118. irq_handler_t thread = NULL;
  11119. void *arg;
  11120. int idx;
  11121. struct hfi1_ctxtdata *rcd = NULL;
  11122. struct sdma_engine *sde = NULL;
  11123. /* obtain the arguments to request_irq */
  11124. if (first_general <= i && i < last_general) {
  11125. idx = i - first_general;
  11126. handler = general_interrupt;
  11127. arg = dd;
  11128. snprintf(me->name, sizeof(me->name),
  11129. DRIVER_NAME "_%d", dd->unit);
  11130. err_info = "general";
  11131. me->type = IRQ_GENERAL;
  11132. } else if (first_sdma <= i && i < last_sdma) {
  11133. idx = i - first_sdma;
  11134. sde = &dd->per_sdma[idx];
  11135. handler = sdma_interrupt;
  11136. arg = sde;
  11137. snprintf(me->name, sizeof(me->name),
  11138. DRIVER_NAME "_%d sdma%d", dd->unit, idx);
  11139. err_info = "sdma";
  11140. remap_sdma_interrupts(dd, idx, i);
  11141. me->type = IRQ_SDMA;
  11142. } else if (first_rx <= i && i < last_rx) {
  11143. idx = i - first_rx;
  11144. rcd = dd->rcd[idx];
  11145. /* no interrupt if no rcd */
  11146. if (!rcd)
  11147. continue;
  11148. /*
  11149. * Set the interrupt register and mask for this
  11150. * context's interrupt.
  11151. */
  11152. rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
  11153. rcd->imask = ((u64)1) <<
  11154. ((IS_RCVAVAIL_START + idx) % 64);
  11155. handler = receive_context_interrupt;
  11156. thread = receive_context_thread;
  11157. arg = rcd;
  11158. snprintf(me->name, sizeof(me->name),
  11159. DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
  11160. err_info = "receive context";
  11161. remap_intr(dd, IS_RCVAVAIL_START + idx, i);
  11162. me->type = IRQ_RCVCTXT;
  11163. } else {
  11164. /* not in our expected range - complain, then
  11165. * ignore it
  11166. */
  11167. dd_dev_err(dd,
  11168. "Unexpected extra MSI-X interrupt %d\n", i);
  11169. continue;
  11170. }
  11171. /* no argument, no interrupt */
  11172. if (!arg)
  11173. continue;
  11174. /* make sure the name is terminated */
  11175. me->name[sizeof(me->name) - 1] = 0;
  11176. ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
  11177. me->name, arg);
  11178. if (ret) {
  11179. dd_dev_err(dd,
  11180. "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
  11181. err_info, me->msix.vector, idx, ret);
  11182. return ret;
  11183. }
  11184. /*
  11185. * assign arg after request_irq call, so it will be
  11186. * cleaned up
  11187. */
  11188. me->arg = arg;
  11189. ret = hfi1_get_irq_affinity(dd, me);
  11190. if (ret)
  11191. dd_dev_err(dd,
  11192. "unable to pin IRQ %d\n", ret);
  11193. }
  11194. return ret;
  11195. }
  11196. /*
  11197. * Set the general handler to accept all interrupts, remap all
  11198. * chip interrupts back to MSI-X 0.
  11199. */
  11200. static void reset_interrupts(struct hfi1_devdata *dd)
  11201. {
  11202. int i;
  11203. /* all interrupts handled by the general handler */
  11204. for (i = 0; i < CCE_NUM_INT_CSRS; i++)
  11205. dd->gi_mask[i] = ~(u64)0;
  11206. /* all chip interrupts map to MSI-X 0 */
  11207. for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
  11208. write_csr(dd, CCE_INT_MAP + (8 * i), 0);
  11209. }
  11210. static int set_up_interrupts(struct hfi1_devdata *dd)
  11211. {
  11212. struct hfi1_msix_entry *entries;
  11213. u32 total, request;
  11214. int i, ret;
  11215. int single_interrupt = 0; /* we expect to have all the interrupts */
  11216. /*
  11217. * Interrupt count:
  11218. * 1 general, "slow path" interrupt (includes the SDMA engines
  11219. * slow source, SDMACleanupDone)
  11220. * N interrupts - one per used SDMA engine
  11221. * M interrupt - one per kernel receive context
  11222. */
  11223. total = 1 + dd->num_sdma + dd->n_krcv_queues;
  11224. entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
  11225. if (!entries) {
  11226. ret = -ENOMEM;
  11227. goto fail;
  11228. }
  11229. /* 1-1 MSI-X entry assignment */
  11230. for (i = 0; i < total; i++)
  11231. entries[i].msix.entry = i;
  11232. /* ask for MSI-X interrupts */
  11233. request = total;
  11234. request_msix(dd, &request, entries);
  11235. if (request == 0) {
  11236. /* using INTx */
  11237. /* dd->num_msix_entries already zero */
  11238. kfree(entries);
  11239. single_interrupt = 1;
  11240. dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
  11241. } else {
  11242. /* using MSI-X */
  11243. dd->num_msix_entries = request;
  11244. dd->msix_entries = entries;
  11245. if (request != total) {
  11246. /* using MSI-X, with reduced interrupts */
  11247. dd_dev_err(
  11248. dd,
  11249. "cannot handle reduced interrupt case, want %u, got %u\n",
  11250. total, request);
  11251. ret = -EINVAL;
  11252. goto fail;
  11253. }
  11254. dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
  11255. }
  11256. /* mask all interrupts */
  11257. set_intr_state(dd, 0);
  11258. /* clear all pending interrupts */
  11259. clear_all_interrupts(dd);
  11260. /* reset general handler mask, chip MSI-X mappings */
  11261. reset_interrupts(dd);
  11262. if (single_interrupt)
  11263. ret = request_intx_irq(dd);
  11264. else
  11265. ret = request_msix_irqs(dd);
  11266. if (ret)
  11267. goto fail;
  11268. return 0;
  11269. fail:
  11270. clean_up_interrupts(dd);
  11271. return ret;
  11272. }
  11273. /*
  11274. * Set up context values in dd. Sets:
  11275. *
  11276. * num_rcv_contexts - number of contexts being used
  11277. * n_krcv_queues - number of kernel contexts
  11278. * first_user_ctxt - first non-kernel context in array of contexts
  11279. * freectxts - number of free user contexts
  11280. * num_send_contexts - number of PIO send contexts being used
  11281. */
  11282. static int set_up_context_variables(struct hfi1_devdata *dd)
  11283. {
  11284. int num_kernel_contexts;
  11285. int total_contexts;
  11286. int ret;
  11287. unsigned ngroups;
  11288. int qos_rmt_count;
  11289. int user_rmt_reduced;
  11290. /*
  11291. * Kernel receive contexts:
  11292. * - Context 0 - control context (VL15/multicast/error)
  11293. * - Context 1 - first kernel context
  11294. * - Context 2 - second kernel context
  11295. * ...
  11296. */
  11297. if (n_krcvqs)
  11298. /*
  11299. * n_krcvqs is the sum of module parameter kernel receive
  11300. * contexts, krcvqs[]. It does not include the control
  11301. * context, so add that.
  11302. */
  11303. num_kernel_contexts = n_krcvqs + 1;
  11304. else
  11305. num_kernel_contexts = DEFAULT_KRCVQS + 1;
  11306. /*
  11307. * Every kernel receive context needs an ACK send context.
  11308. * one send context is allocated for each VL{0-7} and VL15
  11309. */
  11310. if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
  11311. dd_dev_err(dd,
  11312. "Reducing # kernel rcv contexts to: %d, from %d\n",
  11313. (int)(dd->chip_send_contexts - num_vls - 1),
  11314. (int)num_kernel_contexts);
  11315. num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
  11316. }
  11317. /*
  11318. * User contexts:
  11319. * - default to 1 user context per real (non-HT) CPU core if
  11320. * num_user_contexts is negative
  11321. */
  11322. if (num_user_contexts < 0)
  11323. num_user_contexts =
  11324. cpumask_weight(&node_affinity.real_cpu_mask);
  11325. total_contexts = num_kernel_contexts + num_user_contexts;
  11326. /*
  11327. * Adjust the counts given a global max.
  11328. */
  11329. if (total_contexts > dd->chip_rcv_contexts) {
  11330. dd_dev_err(dd,
  11331. "Reducing # user receive contexts to: %d, from %d\n",
  11332. (int)(dd->chip_rcv_contexts - num_kernel_contexts),
  11333. (int)num_user_contexts);
  11334. num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
  11335. /* recalculate */
  11336. total_contexts = num_kernel_contexts + num_user_contexts;
  11337. }
  11338. /* each user context requires an entry in the RMT */
  11339. qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
  11340. if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
  11341. user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
  11342. dd_dev_err(dd,
  11343. "RMT size is reducing the number of user receive contexts from %d to %d\n",
  11344. (int)num_user_contexts,
  11345. user_rmt_reduced);
  11346. /* recalculate */
  11347. num_user_contexts = user_rmt_reduced;
  11348. total_contexts = num_kernel_contexts + num_user_contexts;
  11349. }
  11350. /* the first N are kernel contexts, the rest are user contexts */
  11351. dd->num_rcv_contexts = total_contexts;
  11352. dd->n_krcv_queues = num_kernel_contexts;
  11353. dd->first_user_ctxt = num_kernel_contexts;
  11354. dd->num_user_contexts = num_user_contexts;
  11355. dd->freectxts = num_user_contexts;
  11356. dd_dev_info(dd,
  11357. "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
  11358. (int)dd->chip_rcv_contexts,
  11359. (int)dd->num_rcv_contexts,
  11360. (int)dd->n_krcv_queues,
  11361. (int)dd->num_rcv_contexts - dd->n_krcv_queues);
  11362. /*
  11363. * Receive array allocation:
  11364. * All RcvArray entries are divided into groups of 8. This
  11365. * is required by the hardware and will speed up writes to
  11366. * consecutive entries by using write-combining of the entire
  11367. * cacheline.
  11368. *
  11369. * The number of groups are evenly divided among all contexts.
  11370. * any left over groups will be given to the first N user
  11371. * contexts.
  11372. */
  11373. dd->rcv_entries.group_size = RCV_INCREMENT;
  11374. ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
  11375. dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
  11376. dd->rcv_entries.nctxt_extra = ngroups -
  11377. (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
  11378. dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
  11379. dd->rcv_entries.ngroups,
  11380. dd->rcv_entries.nctxt_extra);
  11381. if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
  11382. MAX_EAGER_ENTRIES * 2) {
  11383. dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
  11384. dd->rcv_entries.group_size;
  11385. dd_dev_info(dd,
  11386. "RcvArray group count too high, change to %u\n",
  11387. dd->rcv_entries.ngroups);
  11388. dd->rcv_entries.nctxt_extra = 0;
  11389. }
  11390. /*
  11391. * PIO send contexts
  11392. */
  11393. ret = init_sc_pools_and_sizes(dd);
  11394. if (ret >= 0) { /* success */
  11395. dd->num_send_contexts = ret;
  11396. dd_dev_info(
  11397. dd,
  11398. "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
  11399. dd->chip_send_contexts,
  11400. dd->num_send_contexts,
  11401. dd->sc_sizes[SC_KERNEL].count,
  11402. dd->sc_sizes[SC_ACK].count,
  11403. dd->sc_sizes[SC_USER].count,
  11404. dd->sc_sizes[SC_VL15].count);
  11405. ret = 0; /* success */
  11406. }
  11407. return ret;
  11408. }
  11409. /*
  11410. * Set the device/port partition key table. The MAD code
  11411. * will ensure that, at least, the partial management
  11412. * partition key is present in the table.
  11413. */
  11414. static void set_partition_keys(struct hfi1_pportdata *ppd)
  11415. {
  11416. struct hfi1_devdata *dd = ppd->dd;
  11417. u64 reg = 0;
  11418. int i;
  11419. dd_dev_info(dd, "Setting partition keys\n");
  11420. for (i = 0; i < hfi1_get_npkeys(dd); i++) {
  11421. reg |= (ppd->pkeys[i] &
  11422. RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
  11423. ((i % 4) *
  11424. RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
  11425. /* Each register holds 4 PKey values. */
  11426. if ((i % 4) == 3) {
  11427. write_csr(dd, RCV_PARTITION_KEY +
  11428. ((i - 3) * 2), reg);
  11429. reg = 0;
  11430. }
  11431. }
  11432. /* Always enable HW pkeys check when pkeys table is set */
  11433. add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
  11434. }
  11435. /*
  11436. * These CSRs and memories are uninitialized on reset and must be
  11437. * written before reading to set the ECC/parity bits.
  11438. *
  11439. * NOTE: All user context CSRs that are not mmaped write-only
  11440. * (e.g. the TID flows) must be initialized even if the driver never
  11441. * reads them.
  11442. */
  11443. static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
  11444. {
  11445. int i, j;
  11446. /* CceIntMap */
  11447. for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
  11448. write_csr(dd, CCE_INT_MAP + (8 * i), 0);
  11449. /* SendCtxtCreditReturnAddr */
  11450. for (i = 0; i < dd->chip_send_contexts; i++)
  11451. write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
  11452. /* PIO Send buffers */
  11453. /* SDMA Send buffers */
  11454. /*
  11455. * These are not normally read, and (presently) have no method
  11456. * to be read, so are not pre-initialized
  11457. */
  11458. /* RcvHdrAddr */
  11459. /* RcvHdrTailAddr */
  11460. /* RcvTidFlowTable */
  11461. for (i = 0; i < dd->chip_rcv_contexts; i++) {
  11462. write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
  11463. write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
  11464. for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
  11465. write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
  11466. }
  11467. /* RcvArray */
  11468. for (i = 0; i < dd->chip_rcv_array_count; i++)
  11469. write_csr(dd, RCV_ARRAY + (8 * i),
  11470. RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
  11471. /* RcvQPMapTable */
  11472. for (i = 0; i < 32; i++)
  11473. write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
  11474. }
  11475. /*
  11476. * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
  11477. */
  11478. static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
  11479. u64 ctrl_bits)
  11480. {
  11481. unsigned long timeout;
  11482. u64 reg;
  11483. /* is the condition present? */
  11484. reg = read_csr(dd, CCE_STATUS);
  11485. if ((reg & status_bits) == 0)
  11486. return;
  11487. /* clear the condition */
  11488. write_csr(dd, CCE_CTRL, ctrl_bits);
  11489. /* wait for the condition to clear */
  11490. timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
  11491. while (1) {
  11492. reg = read_csr(dd, CCE_STATUS);
  11493. if ((reg & status_bits) == 0)
  11494. return;
  11495. if (time_after(jiffies, timeout)) {
  11496. dd_dev_err(dd,
  11497. "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
  11498. status_bits, reg & status_bits);
  11499. return;
  11500. }
  11501. udelay(1);
  11502. }
  11503. }
  11504. /* set CCE CSRs to chip reset defaults */
  11505. static void reset_cce_csrs(struct hfi1_devdata *dd)
  11506. {
  11507. int i;
  11508. /* CCE_REVISION read-only */
  11509. /* CCE_REVISION2 read-only */
  11510. /* CCE_CTRL - bits clear automatically */
  11511. /* CCE_STATUS read-only, use CceCtrl to clear */
  11512. clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
  11513. clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
  11514. clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
  11515. for (i = 0; i < CCE_NUM_SCRATCH; i++)
  11516. write_csr(dd, CCE_SCRATCH + (8 * i), 0);
  11517. /* CCE_ERR_STATUS read-only */
  11518. write_csr(dd, CCE_ERR_MASK, 0);
  11519. write_csr(dd, CCE_ERR_CLEAR, ~0ull);
  11520. /* CCE_ERR_FORCE leave alone */
  11521. for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
  11522. write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
  11523. write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
  11524. /* CCE_PCIE_CTRL leave alone */
  11525. for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
  11526. write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
  11527. write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
  11528. CCE_MSIX_TABLE_UPPER_RESETCSR);
  11529. }
  11530. for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
  11531. /* CCE_MSIX_PBA read-only */
  11532. write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
  11533. write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
  11534. }
  11535. for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
  11536. write_csr(dd, CCE_INT_MAP, 0);
  11537. for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
  11538. /* CCE_INT_STATUS read-only */
  11539. write_csr(dd, CCE_INT_MASK + (8 * i), 0);
  11540. write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
  11541. /* CCE_INT_FORCE leave alone */
  11542. /* CCE_INT_BLOCKED read-only */
  11543. }
  11544. for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
  11545. write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
  11546. }
  11547. /* set MISC CSRs to chip reset defaults */
  11548. static void reset_misc_csrs(struct hfi1_devdata *dd)
  11549. {
  11550. int i;
  11551. for (i = 0; i < 32; i++) {
  11552. write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
  11553. write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
  11554. write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
  11555. }
  11556. /*
  11557. * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
  11558. * only be written 128-byte chunks
  11559. */
  11560. /* init RSA engine to clear lingering errors */
  11561. write_csr(dd, MISC_CFG_RSA_CMD, 1);
  11562. write_csr(dd, MISC_CFG_RSA_MU, 0);
  11563. write_csr(dd, MISC_CFG_FW_CTRL, 0);
  11564. /* MISC_STS_8051_DIGEST read-only */
  11565. /* MISC_STS_SBM_DIGEST read-only */
  11566. /* MISC_STS_PCIE_DIGEST read-only */
  11567. /* MISC_STS_FAB_DIGEST read-only */
  11568. /* MISC_ERR_STATUS read-only */
  11569. write_csr(dd, MISC_ERR_MASK, 0);
  11570. write_csr(dd, MISC_ERR_CLEAR, ~0ull);
  11571. /* MISC_ERR_FORCE leave alone */
  11572. }
  11573. /* set TXE CSRs to chip reset defaults */
  11574. static void reset_txe_csrs(struct hfi1_devdata *dd)
  11575. {
  11576. int i;
  11577. /*
  11578. * TXE Kernel CSRs
  11579. */
  11580. write_csr(dd, SEND_CTRL, 0);
  11581. __cm_reset(dd, 0); /* reset CM internal state */
  11582. /* SEND_CONTEXTS read-only */
  11583. /* SEND_DMA_ENGINES read-only */
  11584. /* SEND_PIO_MEM_SIZE read-only */
  11585. /* SEND_DMA_MEM_SIZE read-only */
  11586. write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
  11587. pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
  11588. /* SEND_PIO_ERR_STATUS read-only */
  11589. write_csr(dd, SEND_PIO_ERR_MASK, 0);
  11590. write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
  11591. /* SEND_PIO_ERR_FORCE leave alone */
  11592. /* SEND_DMA_ERR_STATUS read-only */
  11593. write_csr(dd, SEND_DMA_ERR_MASK, 0);
  11594. write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
  11595. /* SEND_DMA_ERR_FORCE leave alone */
  11596. /* SEND_EGRESS_ERR_STATUS read-only */
  11597. write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
  11598. write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
  11599. /* SEND_EGRESS_ERR_FORCE leave alone */
  11600. write_csr(dd, SEND_BTH_QP, 0);
  11601. write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
  11602. write_csr(dd, SEND_SC2VLT0, 0);
  11603. write_csr(dd, SEND_SC2VLT1, 0);
  11604. write_csr(dd, SEND_SC2VLT2, 0);
  11605. write_csr(dd, SEND_SC2VLT3, 0);
  11606. write_csr(dd, SEND_LEN_CHECK0, 0);
  11607. write_csr(dd, SEND_LEN_CHECK1, 0);
  11608. /* SEND_ERR_STATUS read-only */
  11609. write_csr(dd, SEND_ERR_MASK, 0);
  11610. write_csr(dd, SEND_ERR_CLEAR, ~0ull);
  11611. /* SEND_ERR_FORCE read-only */
  11612. for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
  11613. write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
  11614. for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
  11615. write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
  11616. for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
  11617. write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
  11618. for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
  11619. write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
  11620. for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
  11621. write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
  11622. write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
  11623. write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
  11624. /* SEND_CM_CREDIT_USED_STATUS read-only */
  11625. write_csr(dd, SEND_CM_TIMER_CTRL, 0);
  11626. write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
  11627. write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
  11628. write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
  11629. write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
  11630. for (i = 0; i < TXE_NUM_DATA_VL; i++)
  11631. write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
  11632. write_csr(dd, SEND_CM_CREDIT_VL15, 0);
  11633. /* SEND_CM_CREDIT_USED_VL read-only */
  11634. /* SEND_CM_CREDIT_USED_VL15 read-only */
  11635. /* SEND_EGRESS_CTXT_STATUS read-only */
  11636. /* SEND_EGRESS_SEND_DMA_STATUS read-only */
  11637. write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
  11638. /* SEND_EGRESS_ERR_INFO read-only */
  11639. /* SEND_EGRESS_ERR_SOURCE read-only */
  11640. /*
  11641. * TXE Per-Context CSRs
  11642. */
  11643. for (i = 0; i < dd->chip_send_contexts; i++) {
  11644. write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
  11645. write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
  11646. write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
  11647. write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
  11648. write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
  11649. write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
  11650. write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
  11651. write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
  11652. write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
  11653. write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
  11654. write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
  11655. write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
  11656. }
  11657. /*
  11658. * TXE Per-SDMA CSRs
  11659. */
  11660. for (i = 0; i < dd->chip_sdma_engines; i++) {
  11661. write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
  11662. /* SEND_DMA_STATUS read-only */
  11663. write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
  11664. write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
  11665. write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
  11666. /* SEND_DMA_HEAD read-only */
  11667. write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
  11668. write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
  11669. /* SEND_DMA_IDLE_CNT read-only */
  11670. write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
  11671. write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
  11672. /* SEND_DMA_DESC_FETCHED_CNT read-only */
  11673. /* SEND_DMA_ENG_ERR_STATUS read-only */
  11674. write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
  11675. write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
  11676. /* SEND_DMA_ENG_ERR_FORCE leave alone */
  11677. write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
  11678. write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
  11679. write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
  11680. write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
  11681. write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
  11682. write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
  11683. write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
  11684. }
  11685. }
  11686. /*
  11687. * Expect on entry:
  11688. * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
  11689. */
  11690. static void init_rbufs(struct hfi1_devdata *dd)
  11691. {
  11692. u64 reg;
  11693. int count;
  11694. /*
  11695. * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
  11696. * clear.
  11697. */
  11698. count = 0;
  11699. while (1) {
  11700. reg = read_csr(dd, RCV_STATUS);
  11701. if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
  11702. | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
  11703. break;
  11704. /*
  11705. * Give up after 1ms - maximum wait time.
  11706. *
  11707. * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
  11708. * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
  11709. * 148 KB / (66% * 250MB/s) = 920us
  11710. */
  11711. if (count++ > 500) {
  11712. dd_dev_err(dd,
  11713. "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
  11714. __func__, reg);
  11715. break;
  11716. }
  11717. udelay(2); /* do not busy-wait the CSR */
  11718. }
  11719. /* start the init - expect RcvCtrl to be 0 */
  11720. write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
  11721. /*
  11722. * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
  11723. * period after the write before RcvStatus.RxRbufInitDone is valid.
  11724. * The delay in the first run through the loop below is sufficient and
  11725. * required before the first read of RcvStatus.RxRbufInintDone.
  11726. */
  11727. read_csr(dd, RCV_CTRL);
  11728. /* wait for the init to finish */
  11729. count = 0;
  11730. while (1) {
  11731. /* delay is required first time through - see above */
  11732. udelay(2); /* do not busy-wait the CSR */
  11733. reg = read_csr(dd, RCV_STATUS);
  11734. if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
  11735. break;
  11736. /* give up after 100us - slowest possible at 33MHz is 73us */
  11737. if (count++ > 50) {
  11738. dd_dev_err(dd,
  11739. "%s: RcvStatus.RxRbufInit not set, continuing\n",
  11740. __func__);
  11741. break;
  11742. }
  11743. }
  11744. }
  11745. /* set RXE CSRs to chip reset defaults */
  11746. static void reset_rxe_csrs(struct hfi1_devdata *dd)
  11747. {
  11748. int i, j;
  11749. /*
  11750. * RXE Kernel CSRs
  11751. */
  11752. write_csr(dd, RCV_CTRL, 0);
  11753. init_rbufs(dd);
  11754. /* RCV_STATUS read-only */
  11755. /* RCV_CONTEXTS read-only */
  11756. /* RCV_ARRAY_CNT read-only */
  11757. /* RCV_BUF_SIZE read-only */
  11758. write_csr(dd, RCV_BTH_QP, 0);
  11759. write_csr(dd, RCV_MULTICAST, 0);
  11760. write_csr(dd, RCV_BYPASS, 0);
  11761. write_csr(dd, RCV_VL15, 0);
  11762. /* this is a clear-down */
  11763. write_csr(dd, RCV_ERR_INFO,
  11764. RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
  11765. /* RCV_ERR_STATUS read-only */
  11766. write_csr(dd, RCV_ERR_MASK, 0);
  11767. write_csr(dd, RCV_ERR_CLEAR, ~0ull);
  11768. /* RCV_ERR_FORCE leave alone */
  11769. for (i = 0; i < 32; i++)
  11770. write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
  11771. for (i = 0; i < 4; i++)
  11772. write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
  11773. for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
  11774. write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
  11775. for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
  11776. write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
  11777. for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
  11778. write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
  11779. write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
  11780. write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
  11781. }
  11782. for (i = 0; i < 32; i++)
  11783. write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
  11784. /*
  11785. * RXE Kernel and User Per-Context CSRs
  11786. */
  11787. for (i = 0; i < dd->chip_rcv_contexts; i++) {
  11788. /* kernel */
  11789. write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
  11790. /* RCV_CTXT_STATUS read-only */
  11791. write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
  11792. write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
  11793. write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
  11794. write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
  11795. write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
  11796. write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
  11797. write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
  11798. write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
  11799. write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
  11800. write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
  11801. /* user */
  11802. /* RCV_HDR_TAIL read-only */
  11803. write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
  11804. /* RCV_EGR_INDEX_TAIL read-only */
  11805. write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
  11806. /* RCV_EGR_OFFSET_TAIL read-only */
  11807. for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
  11808. write_uctxt_csr(dd, i,
  11809. RCV_TID_FLOW_TABLE + (8 * j), 0);
  11810. }
  11811. }
  11812. }
  11813. /*
  11814. * Set sc2vl tables.
  11815. *
  11816. * They power on to zeros, so to avoid send context errors
  11817. * they need to be set:
  11818. *
  11819. * SC 0-7 -> VL 0-7 (respectively)
  11820. * SC 15 -> VL 15
  11821. * otherwise
  11822. * -> VL 0
  11823. */
  11824. static void init_sc2vl_tables(struct hfi1_devdata *dd)
  11825. {
  11826. int i;
  11827. /* init per architecture spec, constrained by hardware capability */
  11828. /* HFI maps sent packets */
  11829. write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
  11830. 0,
  11831. 0, 0, 1, 1,
  11832. 2, 2, 3, 3,
  11833. 4, 4, 5, 5,
  11834. 6, 6, 7, 7));
  11835. write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
  11836. 1,
  11837. 8, 0, 9, 0,
  11838. 10, 0, 11, 0,
  11839. 12, 0, 13, 0,
  11840. 14, 0, 15, 15));
  11841. write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
  11842. 2,
  11843. 16, 0, 17, 0,
  11844. 18, 0, 19, 0,
  11845. 20, 0, 21, 0,
  11846. 22, 0, 23, 0));
  11847. write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
  11848. 3,
  11849. 24, 0, 25, 0,
  11850. 26, 0, 27, 0,
  11851. 28, 0, 29, 0,
  11852. 30, 0, 31, 0));
  11853. /* DC maps received packets */
  11854. write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
  11855. 15_0,
  11856. 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
  11857. 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
  11858. write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
  11859. 31_16,
  11860. 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
  11861. 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
  11862. /* initialize the cached sc2vl values consistently with h/w */
  11863. for (i = 0; i < 32; i++) {
  11864. if (i < 8 || i == 15)
  11865. *((u8 *)(dd->sc2vl) + i) = (u8)i;
  11866. else
  11867. *((u8 *)(dd->sc2vl) + i) = 0;
  11868. }
  11869. }
  11870. /*
  11871. * Read chip sizes and then reset parts to sane, disabled, values. We cannot
  11872. * depend on the chip going through a power-on reset - a driver may be loaded
  11873. * and unloaded many times.
  11874. *
  11875. * Do not write any CSR values to the chip in this routine - there may be
  11876. * a reset following the (possible) FLR in this routine.
  11877. *
  11878. */
  11879. static void init_chip(struct hfi1_devdata *dd)
  11880. {
  11881. int i;
  11882. /*
  11883. * Put the HFI CSRs in a known state.
  11884. * Combine this with a DC reset.
  11885. *
  11886. * Stop the device from doing anything while we do a
  11887. * reset. We know there are no other active users of
  11888. * the device since we are now in charge. Turn off
  11889. * off all outbound and inbound traffic and make sure
  11890. * the device does not generate any interrupts.
  11891. */
  11892. /* disable send contexts and SDMA engines */
  11893. write_csr(dd, SEND_CTRL, 0);
  11894. for (i = 0; i < dd->chip_send_contexts; i++)
  11895. write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
  11896. for (i = 0; i < dd->chip_sdma_engines; i++)
  11897. write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
  11898. /* disable port (turn off RXE inbound traffic) and contexts */
  11899. write_csr(dd, RCV_CTRL, 0);
  11900. for (i = 0; i < dd->chip_rcv_contexts; i++)
  11901. write_csr(dd, RCV_CTXT_CTRL, 0);
  11902. /* mask all interrupt sources */
  11903. for (i = 0; i < CCE_NUM_INT_CSRS; i++)
  11904. write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
  11905. /*
  11906. * DC Reset: do a full DC reset before the register clear.
  11907. * A recommended length of time to hold is one CSR read,
  11908. * so reread the CceDcCtrl. Then, hold the DC in reset
  11909. * across the clear.
  11910. */
  11911. write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
  11912. (void)read_csr(dd, CCE_DC_CTRL);
  11913. if (use_flr) {
  11914. /*
  11915. * A FLR will reset the SPC core and part of the PCIe.
  11916. * The parts that need to be restored have already been
  11917. * saved.
  11918. */
  11919. dd_dev_info(dd, "Resetting CSRs with FLR\n");
  11920. /* do the FLR, the DC reset will remain */
  11921. hfi1_pcie_flr(dd);
  11922. /* restore command and BARs */
  11923. restore_pci_variables(dd);
  11924. if (is_ax(dd)) {
  11925. dd_dev_info(dd, "Resetting CSRs with FLR\n");
  11926. hfi1_pcie_flr(dd);
  11927. restore_pci_variables(dd);
  11928. }
  11929. } else {
  11930. dd_dev_info(dd, "Resetting CSRs with writes\n");
  11931. reset_cce_csrs(dd);
  11932. reset_txe_csrs(dd);
  11933. reset_rxe_csrs(dd);
  11934. reset_misc_csrs(dd);
  11935. }
  11936. /* clear the DC reset */
  11937. write_csr(dd, CCE_DC_CTRL, 0);
  11938. /* Set the LED off */
  11939. setextled(dd, 0);
  11940. /*
  11941. * Clear the QSFP reset.
  11942. * An FLR enforces a 0 on all out pins. The driver does not touch
  11943. * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
  11944. * anything plugged constantly in reset, if it pays attention
  11945. * to RESET_N.
  11946. * Prime examples of this are optical cables. Set all pins high.
  11947. * I2CCLK and I2CDAT will change per direction, and INT_N and
  11948. * MODPRS_N are input only and their value is ignored.
  11949. */
  11950. write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
  11951. write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
  11952. init_chip_resources(dd);
  11953. }
  11954. static void init_early_variables(struct hfi1_devdata *dd)
  11955. {
  11956. int i;
  11957. /* assign link credit variables */
  11958. dd->vau = CM_VAU;
  11959. dd->link_credits = CM_GLOBAL_CREDITS;
  11960. if (is_ax(dd))
  11961. dd->link_credits--;
  11962. dd->vcu = cu_to_vcu(hfi1_cu);
  11963. /* enough room for 8 MAD packets plus header - 17K */
  11964. dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
  11965. if (dd->vl15_init > dd->link_credits)
  11966. dd->vl15_init = dd->link_credits;
  11967. write_uninitialized_csrs_and_memories(dd);
  11968. if (HFI1_CAP_IS_KSET(PKEY_CHECK))
  11969. for (i = 0; i < dd->num_pports; i++) {
  11970. struct hfi1_pportdata *ppd = &dd->pport[i];
  11971. set_partition_keys(ppd);
  11972. }
  11973. init_sc2vl_tables(dd);
  11974. }
  11975. static void init_kdeth_qp(struct hfi1_devdata *dd)
  11976. {
  11977. /* user changed the KDETH_QP */
  11978. if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
  11979. /* out of range or illegal value */
  11980. dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
  11981. kdeth_qp = 0;
  11982. }
  11983. if (kdeth_qp == 0) /* not set, or failed range check */
  11984. kdeth_qp = DEFAULT_KDETH_QP;
  11985. write_csr(dd, SEND_BTH_QP,
  11986. (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
  11987. SEND_BTH_QP_KDETH_QP_SHIFT);
  11988. write_csr(dd, RCV_BTH_QP,
  11989. (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
  11990. RCV_BTH_QP_KDETH_QP_SHIFT);
  11991. }
  11992. /**
  11993. * init_qpmap_table
  11994. * @dd - device data
  11995. * @first_ctxt - first context
  11996. * @last_ctxt - first context
  11997. *
  11998. * This return sets the qpn mapping table that
  11999. * is indexed by qpn[8:1].
  12000. *
  12001. * The routine will round robin the 256 settings
  12002. * from first_ctxt to last_ctxt.
  12003. *
  12004. * The first/last looks ahead to having specialized
  12005. * receive contexts for mgmt and bypass. Normal
  12006. * verbs traffic will assumed to be on a range
  12007. * of receive contexts.
  12008. */
  12009. static void init_qpmap_table(struct hfi1_devdata *dd,
  12010. u32 first_ctxt,
  12011. u32 last_ctxt)
  12012. {
  12013. u64 reg = 0;
  12014. u64 regno = RCV_QP_MAP_TABLE;
  12015. int i;
  12016. u64 ctxt = first_ctxt;
  12017. for (i = 0; i < 256; i++) {
  12018. reg |= ctxt << (8 * (i % 8));
  12019. ctxt++;
  12020. if (ctxt > last_ctxt)
  12021. ctxt = first_ctxt;
  12022. if (i % 8 == 7) {
  12023. write_csr(dd, regno, reg);
  12024. reg = 0;
  12025. regno += 8;
  12026. }
  12027. }
  12028. add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
  12029. | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
  12030. }
  12031. struct rsm_map_table {
  12032. u64 map[NUM_MAP_REGS];
  12033. unsigned int used;
  12034. };
  12035. struct rsm_rule_data {
  12036. u8 offset;
  12037. u8 pkt_type;
  12038. u32 field1_off;
  12039. u32 field2_off;
  12040. u32 index1_off;
  12041. u32 index1_width;
  12042. u32 index2_off;
  12043. u32 index2_width;
  12044. u32 mask1;
  12045. u32 value1;
  12046. u32 mask2;
  12047. u32 value2;
  12048. };
  12049. /*
  12050. * Return an initialized RMT map table for users to fill in. OK if it
  12051. * returns NULL, indicating no table.
  12052. */
  12053. static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
  12054. {
  12055. struct rsm_map_table *rmt;
  12056. u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
  12057. rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
  12058. if (rmt) {
  12059. memset(rmt->map, rxcontext, sizeof(rmt->map));
  12060. rmt->used = 0;
  12061. }
  12062. return rmt;
  12063. }
  12064. /*
  12065. * Write the final RMT map table to the chip and free the table. OK if
  12066. * table is NULL.
  12067. */
  12068. static void complete_rsm_map_table(struct hfi1_devdata *dd,
  12069. struct rsm_map_table *rmt)
  12070. {
  12071. int i;
  12072. if (rmt) {
  12073. /* write table to chip */
  12074. for (i = 0; i < NUM_MAP_REGS; i++)
  12075. write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
  12076. /* enable RSM */
  12077. add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
  12078. }
  12079. }
  12080. /*
  12081. * Add a receive side mapping rule.
  12082. */
  12083. static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
  12084. struct rsm_rule_data *rrd)
  12085. {
  12086. write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
  12087. (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
  12088. 1ull << rule_index | /* enable bit */
  12089. (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
  12090. write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
  12091. (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
  12092. (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
  12093. (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
  12094. (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
  12095. (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
  12096. (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
  12097. write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
  12098. (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
  12099. (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
  12100. (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
  12101. (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
  12102. }
  12103. /* return the number of RSM map table entries that will be used for QOS */
  12104. static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
  12105. unsigned int *np)
  12106. {
  12107. int i;
  12108. unsigned int m, n;
  12109. u8 max_by_vl = 0;
  12110. /* is QOS active at all? */
  12111. if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
  12112. num_vls == 1 ||
  12113. krcvqsset <= 1)
  12114. goto no_qos;
  12115. /* determine bits for qpn */
  12116. for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
  12117. if (krcvqs[i] > max_by_vl)
  12118. max_by_vl = krcvqs[i];
  12119. if (max_by_vl > 32)
  12120. goto no_qos;
  12121. m = ilog2(__roundup_pow_of_two(max_by_vl));
  12122. /* determine bits for vl */
  12123. n = ilog2(__roundup_pow_of_two(num_vls));
  12124. /* reject if too much is used */
  12125. if ((m + n) > 7)
  12126. goto no_qos;
  12127. if (mp)
  12128. *mp = m;
  12129. if (np)
  12130. *np = n;
  12131. return 1 << (m + n);
  12132. no_qos:
  12133. if (mp)
  12134. *mp = 0;
  12135. if (np)
  12136. *np = 0;
  12137. return 0;
  12138. }
  12139. /**
  12140. * init_qos - init RX qos
  12141. * @dd - device data
  12142. * @rmt - RSM map table
  12143. *
  12144. * This routine initializes Rule 0 and the RSM map table to implement
  12145. * quality of service (qos).
  12146. *
  12147. * If all of the limit tests succeed, qos is applied based on the array
  12148. * interpretation of krcvqs where entry 0 is VL0.
  12149. *
  12150. * The number of vl bits (n) and the number of qpn bits (m) are computed to
  12151. * feed both the RSM map table and the single rule.
  12152. */
  12153. static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
  12154. {
  12155. struct rsm_rule_data rrd;
  12156. unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
  12157. unsigned int rmt_entries;
  12158. u64 reg;
  12159. if (!rmt)
  12160. goto bail;
  12161. rmt_entries = qos_rmt_entries(dd, &m, &n);
  12162. if (rmt_entries == 0)
  12163. goto bail;
  12164. qpns_per_vl = 1 << m;
  12165. /* enough room in the map table? */
  12166. rmt_entries = 1 << (m + n);
  12167. if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
  12168. goto bail;
  12169. /* add qos entries to the the RSM map table */
  12170. for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
  12171. unsigned tctxt;
  12172. for (qpn = 0, tctxt = ctxt;
  12173. krcvqs[i] && qpn < qpns_per_vl; qpn++) {
  12174. unsigned idx, regoff, regidx;
  12175. /* generate the index the hardware will produce */
  12176. idx = rmt->used + ((qpn << n) ^ i);
  12177. regoff = (idx % 8) * 8;
  12178. regidx = idx / 8;
  12179. /* replace default with context number */
  12180. reg = rmt->map[regidx];
  12181. reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
  12182. << regoff);
  12183. reg |= (u64)(tctxt++) << regoff;
  12184. rmt->map[regidx] = reg;
  12185. if (tctxt == ctxt + krcvqs[i])
  12186. tctxt = ctxt;
  12187. }
  12188. ctxt += krcvqs[i];
  12189. }
  12190. rrd.offset = rmt->used;
  12191. rrd.pkt_type = 2;
  12192. rrd.field1_off = LRH_BTH_MATCH_OFFSET;
  12193. rrd.field2_off = LRH_SC_MATCH_OFFSET;
  12194. rrd.index1_off = LRH_SC_SELECT_OFFSET;
  12195. rrd.index1_width = n;
  12196. rrd.index2_off = QPN_SELECT_OFFSET;
  12197. rrd.index2_width = m + n;
  12198. rrd.mask1 = LRH_BTH_MASK;
  12199. rrd.value1 = LRH_BTH_VALUE;
  12200. rrd.mask2 = LRH_SC_MASK;
  12201. rrd.value2 = LRH_SC_VALUE;
  12202. /* add rule 0 */
  12203. add_rsm_rule(dd, 0, &rrd);
  12204. /* mark RSM map entries as used */
  12205. rmt->used += rmt_entries;
  12206. /* map everything else to the mcast/err/vl15 context */
  12207. init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
  12208. dd->qos_shift = n + 1;
  12209. return;
  12210. bail:
  12211. dd->qos_shift = 1;
  12212. init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
  12213. }
  12214. static void init_user_fecn_handling(struct hfi1_devdata *dd,
  12215. struct rsm_map_table *rmt)
  12216. {
  12217. struct rsm_rule_data rrd;
  12218. u64 reg;
  12219. int i, idx, regoff, regidx;
  12220. u8 offset;
  12221. /* there needs to be enough room in the map table */
  12222. if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
  12223. dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
  12224. return;
  12225. }
  12226. /*
  12227. * RSM will extract the destination context as an index into the
  12228. * map table. The destination contexts are a sequential block
  12229. * in the range first_user_ctxt...num_rcv_contexts-1 (inclusive).
  12230. * Map entries are accessed as offset + extracted value. Adjust
  12231. * the added offset so this sequence can be placed anywhere in
  12232. * the table - as long as the entries themselves do not wrap.
  12233. * There are only enough bits in offset for the table size, so
  12234. * start with that to allow for a "negative" offset.
  12235. */
  12236. offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
  12237. (int)dd->first_user_ctxt);
  12238. for (i = dd->first_user_ctxt, idx = rmt->used;
  12239. i < dd->num_rcv_contexts; i++, idx++) {
  12240. /* replace with identity mapping */
  12241. regoff = (idx % 8) * 8;
  12242. regidx = idx / 8;
  12243. reg = rmt->map[regidx];
  12244. reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
  12245. reg |= (u64)i << regoff;
  12246. rmt->map[regidx] = reg;
  12247. }
  12248. /*
  12249. * For RSM intercept of Expected FECN packets:
  12250. * o packet type 0 - expected
  12251. * o match on F (bit 95), using select/match 1, and
  12252. * o match on SH (bit 133), using select/match 2.
  12253. *
  12254. * Use index 1 to extract the 8-bit receive context from DestQP
  12255. * (start at bit 64). Use that as the RSM map table index.
  12256. */
  12257. rrd.offset = offset;
  12258. rrd.pkt_type = 0;
  12259. rrd.field1_off = 95;
  12260. rrd.field2_off = 133;
  12261. rrd.index1_off = 64;
  12262. rrd.index1_width = 8;
  12263. rrd.index2_off = 0;
  12264. rrd.index2_width = 0;
  12265. rrd.mask1 = 1;
  12266. rrd.value1 = 1;
  12267. rrd.mask2 = 1;
  12268. rrd.value2 = 1;
  12269. /* add rule 1 */
  12270. add_rsm_rule(dd, 1, &rrd);
  12271. rmt->used += dd->num_user_contexts;
  12272. }
  12273. static void init_rxe(struct hfi1_devdata *dd)
  12274. {
  12275. struct rsm_map_table *rmt;
  12276. /* enable all receive errors */
  12277. write_csr(dd, RCV_ERR_MASK, ~0ull);
  12278. rmt = alloc_rsm_map_table(dd);
  12279. /* set up QOS, including the QPN map table */
  12280. init_qos(dd, rmt);
  12281. init_user_fecn_handling(dd, rmt);
  12282. complete_rsm_map_table(dd, rmt);
  12283. kfree(rmt);
  12284. /*
  12285. * make sure RcvCtrl.RcvWcb <= PCIe Device Control
  12286. * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
  12287. * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
  12288. * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
  12289. * Max_PayLoad_Size set to its minimum of 128.
  12290. *
  12291. * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
  12292. * (64 bytes). Max_Payload_Size is possibly modified upward in
  12293. * tune_pcie_caps() which is called after this routine.
  12294. */
  12295. }
  12296. static void init_other(struct hfi1_devdata *dd)
  12297. {
  12298. /* enable all CCE errors */
  12299. write_csr(dd, CCE_ERR_MASK, ~0ull);
  12300. /* enable *some* Misc errors */
  12301. write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
  12302. /* enable all DC errors, except LCB */
  12303. write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
  12304. write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
  12305. }
  12306. /*
  12307. * Fill out the given AU table using the given CU. A CU is defined in terms
  12308. * AUs. The table is a an encoding: given the index, how many AUs does that
  12309. * represent?
  12310. *
  12311. * NOTE: Assumes that the register layout is the same for the
  12312. * local and remote tables.
  12313. */
  12314. static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
  12315. u32 csr0to3, u32 csr4to7)
  12316. {
  12317. write_csr(dd, csr0to3,
  12318. 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
  12319. 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
  12320. 2ull * cu <<
  12321. SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
  12322. 4ull * cu <<
  12323. SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
  12324. write_csr(dd, csr4to7,
  12325. 8ull * cu <<
  12326. SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
  12327. 16ull * cu <<
  12328. SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
  12329. 32ull * cu <<
  12330. SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
  12331. 64ull * cu <<
  12332. SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
  12333. }
  12334. static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
  12335. {
  12336. assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
  12337. SEND_CM_LOCAL_AU_TABLE4_TO7);
  12338. }
  12339. void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
  12340. {
  12341. assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
  12342. SEND_CM_REMOTE_AU_TABLE4_TO7);
  12343. }
  12344. static void init_txe(struct hfi1_devdata *dd)
  12345. {
  12346. int i;
  12347. /* enable all PIO, SDMA, general, and Egress errors */
  12348. write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
  12349. write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
  12350. write_csr(dd, SEND_ERR_MASK, ~0ull);
  12351. write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
  12352. /* enable all per-context and per-SDMA engine errors */
  12353. for (i = 0; i < dd->chip_send_contexts; i++)
  12354. write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
  12355. for (i = 0; i < dd->chip_sdma_engines; i++)
  12356. write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
  12357. /* set the local CU to AU mapping */
  12358. assign_local_cm_au_table(dd, dd->vcu);
  12359. /*
  12360. * Set reasonable default for Credit Return Timer
  12361. * Don't set on Simulator - causes it to choke.
  12362. */
  12363. if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
  12364. write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
  12365. }
  12366. int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
  12367. {
  12368. struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
  12369. unsigned sctxt;
  12370. int ret = 0;
  12371. u64 reg;
  12372. if (!rcd || !rcd->sc) {
  12373. ret = -EINVAL;
  12374. goto done;
  12375. }
  12376. sctxt = rcd->sc->hw_context;
  12377. reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
  12378. ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
  12379. SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
  12380. /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
  12381. if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
  12382. reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
  12383. write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
  12384. /*
  12385. * Enable send-side J_KEY integrity check, unless this is A0 h/w
  12386. */
  12387. if (!is_ax(dd)) {
  12388. reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
  12389. reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
  12390. write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
  12391. }
  12392. /* Enable J_KEY check on receive context. */
  12393. reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
  12394. ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
  12395. RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
  12396. write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
  12397. done:
  12398. return ret;
  12399. }
  12400. int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
  12401. {
  12402. struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
  12403. unsigned sctxt;
  12404. int ret = 0;
  12405. u64 reg;
  12406. if (!rcd || !rcd->sc) {
  12407. ret = -EINVAL;
  12408. goto done;
  12409. }
  12410. sctxt = rcd->sc->hw_context;
  12411. write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
  12412. /*
  12413. * Disable send-side J_KEY integrity check, unless this is A0 h/w.
  12414. * This check would not have been enabled for A0 h/w, see
  12415. * set_ctxt_jkey().
  12416. */
  12417. if (!is_ax(dd)) {
  12418. reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
  12419. reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
  12420. write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
  12421. }
  12422. /* Turn off the J_KEY on the receive side */
  12423. write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
  12424. done:
  12425. return ret;
  12426. }
  12427. int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
  12428. {
  12429. struct hfi1_ctxtdata *rcd;
  12430. unsigned sctxt;
  12431. int ret = 0;
  12432. u64 reg;
  12433. if (ctxt < dd->num_rcv_contexts) {
  12434. rcd = dd->rcd[ctxt];
  12435. } else {
  12436. ret = -EINVAL;
  12437. goto done;
  12438. }
  12439. if (!rcd || !rcd->sc) {
  12440. ret = -EINVAL;
  12441. goto done;
  12442. }
  12443. sctxt = rcd->sc->hw_context;
  12444. reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
  12445. SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
  12446. write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
  12447. reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
  12448. reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
  12449. reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
  12450. write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
  12451. done:
  12452. return ret;
  12453. }
  12454. int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
  12455. {
  12456. struct hfi1_ctxtdata *rcd;
  12457. unsigned sctxt;
  12458. int ret = 0;
  12459. u64 reg;
  12460. if (ctxt < dd->num_rcv_contexts) {
  12461. rcd = dd->rcd[ctxt];
  12462. } else {
  12463. ret = -EINVAL;
  12464. goto done;
  12465. }
  12466. if (!rcd || !rcd->sc) {
  12467. ret = -EINVAL;
  12468. goto done;
  12469. }
  12470. sctxt = rcd->sc->hw_context;
  12471. reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
  12472. reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
  12473. write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
  12474. write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
  12475. done:
  12476. return ret;
  12477. }
  12478. /*
  12479. * Start doing the clean up the the chip. Our clean up happens in multiple
  12480. * stages and this is just the first.
  12481. */
  12482. void hfi1_start_cleanup(struct hfi1_devdata *dd)
  12483. {
  12484. aspm_exit(dd);
  12485. free_cntrs(dd);
  12486. free_rcverr(dd);
  12487. clean_up_interrupts(dd);
  12488. finish_chip_resources(dd);
  12489. }
  12490. #define HFI_BASE_GUID(dev) \
  12491. ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
  12492. /*
  12493. * Information can be shared between the two HFIs on the same ASIC
  12494. * in the same OS. This function finds the peer device and sets
  12495. * up a shared structure.
  12496. */
  12497. static int init_asic_data(struct hfi1_devdata *dd)
  12498. {
  12499. unsigned long flags;
  12500. struct hfi1_devdata *tmp, *peer = NULL;
  12501. struct hfi1_asic_data *asic_data;
  12502. int ret = 0;
  12503. /* pre-allocate the asic structure in case we are the first device */
  12504. asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
  12505. if (!asic_data)
  12506. return -ENOMEM;
  12507. spin_lock_irqsave(&hfi1_devs_lock, flags);
  12508. /* Find our peer device */
  12509. list_for_each_entry(tmp, &hfi1_dev_list, list) {
  12510. if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
  12511. dd->unit != tmp->unit) {
  12512. peer = tmp;
  12513. break;
  12514. }
  12515. }
  12516. if (peer) {
  12517. /* use already allocated structure */
  12518. dd->asic_data = peer->asic_data;
  12519. kfree(asic_data);
  12520. } else {
  12521. dd->asic_data = asic_data;
  12522. mutex_init(&dd->asic_data->asic_resource_mutex);
  12523. }
  12524. dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
  12525. spin_unlock_irqrestore(&hfi1_devs_lock, flags);
  12526. /* first one through - set up i2c devices */
  12527. if (!peer)
  12528. ret = set_up_i2c(dd, dd->asic_data);
  12529. return ret;
  12530. }
  12531. /*
  12532. * Set dd->boardname. Use a generic name if a name is not returned from
  12533. * EFI variable space.
  12534. *
  12535. * Return 0 on success, -ENOMEM if space could not be allocated.
  12536. */
  12537. static int obtain_boardname(struct hfi1_devdata *dd)
  12538. {
  12539. /* generic board description */
  12540. const char generic[] =
  12541. "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
  12542. unsigned long size;
  12543. int ret;
  12544. ret = read_hfi1_efi_var(dd, "description", &size,
  12545. (void **)&dd->boardname);
  12546. if (ret) {
  12547. dd_dev_info(dd, "Board description not found\n");
  12548. /* use generic description */
  12549. dd->boardname = kstrdup(generic, GFP_KERNEL);
  12550. if (!dd->boardname)
  12551. return -ENOMEM;
  12552. }
  12553. return 0;
  12554. }
  12555. /*
  12556. * Check the interrupt registers to make sure that they are mapped correctly.
  12557. * It is intended to help user identify any mismapping by VMM when the driver
  12558. * is running in a VM. This function should only be called before interrupt
  12559. * is set up properly.
  12560. *
  12561. * Return 0 on success, -EINVAL on failure.
  12562. */
  12563. static int check_int_registers(struct hfi1_devdata *dd)
  12564. {
  12565. u64 reg;
  12566. u64 all_bits = ~(u64)0;
  12567. u64 mask;
  12568. /* Clear CceIntMask[0] to avoid raising any interrupts */
  12569. mask = read_csr(dd, CCE_INT_MASK);
  12570. write_csr(dd, CCE_INT_MASK, 0ull);
  12571. reg = read_csr(dd, CCE_INT_MASK);
  12572. if (reg)
  12573. goto err_exit;
  12574. /* Clear all interrupt status bits */
  12575. write_csr(dd, CCE_INT_CLEAR, all_bits);
  12576. reg = read_csr(dd, CCE_INT_STATUS);
  12577. if (reg)
  12578. goto err_exit;
  12579. /* Set all interrupt status bits */
  12580. write_csr(dd, CCE_INT_FORCE, all_bits);
  12581. reg = read_csr(dd, CCE_INT_STATUS);
  12582. if (reg != all_bits)
  12583. goto err_exit;
  12584. /* Restore the interrupt mask */
  12585. write_csr(dd, CCE_INT_CLEAR, all_bits);
  12586. write_csr(dd, CCE_INT_MASK, mask);
  12587. return 0;
  12588. err_exit:
  12589. write_csr(dd, CCE_INT_MASK, mask);
  12590. dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
  12591. return -EINVAL;
  12592. }
  12593. /**
  12594. * Allocate and initialize the device structure for the hfi.
  12595. * @dev: the pci_dev for hfi1_ib device
  12596. * @ent: pci_device_id struct for this dev
  12597. *
  12598. * Also allocates, initializes, and returns the devdata struct for this
  12599. * device instance
  12600. *
  12601. * This is global, and is called directly at init to set up the
  12602. * chip-specific function pointers for later use.
  12603. */
  12604. struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
  12605. const struct pci_device_id *ent)
  12606. {
  12607. struct hfi1_devdata *dd;
  12608. struct hfi1_pportdata *ppd;
  12609. u64 reg;
  12610. int i, ret;
  12611. static const char * const inames[] = { /* implementation names */
  12612. "RTL silicon",
  12613. "RTL VCS simulation",
  12614. "RTL FPGA emulation",
  12615. "Functional simulator"
  12616. };
  12617. struct pci_dev *parent = pdev->bus->self;
  12618. dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
  12619. sizeof(struct hfi1_pportdata));
  12620. if (IS_ERR(dd))
  12621. goto bail;
  12622. ppd = dd->pport;
  12623. for (i = 0; i < dd->num_pports; i++, ppd++) {
  12624. int vl;
  12625. /* init common fields */
  12626. hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
  12627. /* DC supports 4 link widths */
  12628. ppd->link_width_supported =
  12629. OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
  12630. OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
  12631. ppd->link_width_downgrade_supported =
  12632. ppd->link_width_supported;
  12633. /* start out enabling only 4X */
  12634. ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
  12635. ppd->link_width_downgrade_enabled =
  12636. ppd->link_width_downgrade_supported;
  12637. /* link width active is 0 when link is down */
  12638. /* link width downgrade active is 0 when link is down */
  12639. if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
  12640. num_vls > HFI1_MAX_VLS_SUPPORTED) {
  12641. hfi1_early_err(&pdev->dev,
  12642. "Invalid num_vls %u, using %u VLs\n",
  12643. num_vls, HFI1_MAX_VLS_SUPPORTED);
  12644. num_vls = HFI1_MAX_VLS_SUPPORTED;
  12645. }
  12646. ppd->vls_supported = num_vls;
  12647. ppd->vls_operational = ppd->vls_supported;
  12648. ppd->actual_vls_operational = ppd->vls_supported;
  12649. /* Set the default MTU. */
  12650. for (vl = 0; vl < num_vls; vl++)
  12651. dd->vld[vl].mtu = hfi1_max_mtu;
  12652. dd->vld[15].mtu = MAX_MAD_PACKET;
  12653. /*
  12654. * Set the initial values to reasonable default, will be set
  12655. * for real when link is up.
  12656. */
  12657. ppd->lstate = IB_PORT_DOWN;
  12658. ppd->overrun_threshold = 0x4;
  12659. ppd->phy_error_threshold = 0xf;
  12660. ppd->port_crc_mode_enabled = link_crc_mask;
  12661. /* initialize supported LTP CRC mode */
  12662. ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
  12663. /* initialize enabled LTP CRC mode */
  12664. ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
  12665. /* start in offline */
  12666. ppd->host_link_state = HLS_DN_OFFLINE;
  12667. init_vl_arb_caches(ppd);
  12668. ppd->last_pstate = 0xff; /* invalid value */
  12669. }
  12670. dd->link_default = HLS_DN_POLL;
  12671. /*
  12672. * Do remaining PCIe setup and save PCIe values in dd.
  12673. * Any error printing is already done by the init code.
  12674. * On return, we have the chip mapped.
  12675. */
  12676. ret = hfi1_pcie_ddinit(dd, pdev, ent);
  12677. if (ret < 0)
  12678. goto bail_free;
  12679. /* verify that reads actually work, save revision for reset check */
  12680. dd->revision = read_csr(dd, CCE_REVISION);
  12681. if (dd->revision == ~(u64)0) {
  12682. dd_dev_err(dd, "cannot read chip CSRs\n");
  12683. ret = -EINVAL;
  12684. goto bail_cleanup;
  12685. }
  12686. dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
  12687. & CCE_REVISION_CHIP_REV_MAJOR_MASK;
  12688. dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
  12689. & CCE_REVISION_CHIP_REV_MINOR_MASK;
  12690. /*
  12691. * Check interrupt registers mapping if the driver has no access to
  12692. * the upstream component. In this case, it is likely that the driver
  12693. * is running in a VM.
  12694. */
  12695. if (!parent) {
  12696. ret = check_int_registers(dd);
  12697. if (ret)
  12698. goto bail_cleanup;
  12699. }
  12700. /*
  12701. * obtain the hardware ID - NOT related to unit, which is a
  12702. * software enumeration
  12703. */
  12704. reg = read_csr(dd, CCE_REVISION2);
  12705. dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
  12706. & CCE_REVISION2_HFI_ID_MASK;
  12707. /* the variable size will remove unwanted bits */
  12708. dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
  12709. dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
  12710. dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
  12711. dd->icode < ARRAY_SIZE(inames) ?
  12712. inames[dd->icode] : "unknown", (int)dd->irev);
  12713. /* speeds the hardware can support */
  12714. dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
  12715. /* speeds allowed to run at */
  12716. dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
  12717. /* give a reasonable active value, will be set on link up */
  12718. dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
  12719. dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
  12720. dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
  12721. dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
  12722. dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
  12723. dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
  12724. /* fix up link widths for emulation _p */
  12725. ppd = dd->pport;
  12726. if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
  12727. ppd->link_width_supported =
  12728. ppd->link_width_enabled =
  12729. ppd->link_width_downgrade_supported =
  12730. ppd->link_width_downgrade_enabled =
  12731. OPA_LINK_WIDTH_1X;
  12732. }
  12733. /* insure num_vls isn't larger than number of sdma engines */
  12734. if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
  12735. dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
  12736. num_vls, dd->chip_sdma_engines);
  12737. num_vls = dd->chip_sdma_engines;
  12738. ppd->vls_supported = dd->chip_sdma_engines;
  12739. ppd->vls_operational = ppd->vls_supported;
  12740. }
  12741. /*
  12742. * Convert the ns parameter to the 64 * cclocks used in the CSR.
  12743. * Limit the max if larger than the field holds. If timeout is
  12744. * non-zero, then the calculated field will be at least 1.
  12745. *
  12746. * Must be after icode is set up - the cclock rate depends
  12747. * on knowing the hardware being used.
  12748. */
  12749. dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
  12750. if (dd->rcv_intr_timeout_csr >
  12751. RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
  12752. dd->rcv_intr_timeout_csr =
  12753. RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
  12754. else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
  12755. dd->rcv_intr_timeout_csr = 1;
  12756. /* needs to be done before we look for the peer device */
  12757. read_guid(dd);
  12758. /* set up shared ASIC data with peer device */
  12759. ret = init_asic_data(dd);
  12760. if (ret)
  12761. goto bail_cleanup;
  12762. /* obtain chip sizes, reset chip CSRs */
  12763. init_chip(dd);
  12764. /* read in the PCIe link speed information */
  12765. ret = pcie_speeds(dd);
  12766. if (ret)
  12767. goto bail_cleanup;
  12768. /* Needs to be called before hfi1_firmware_init */
  12769. get_platform_config(dd);
  12770. /* read in firmware */
  12771. ret = hfi1_firmware_init(dd);
  12772. if (ret)
  12773. goto bail_cleanup;
  12774. /*
  12775. * In general, the PCIe Gen3 transition must occur after the
  12776. * chip has been idled (so it won't initiate any PCIe transactions
  12777. * e.g. an interrupt) and before the driver changes any registers
  12778. * (the transition will reset the registers).
  12779. *
  12780. * In particular, place this call after:
  12781. * - init_chip() - the chip will not initiate any PCIe transactions
  12782. * - pcie_speeds() - reads the current link speed
  12783. * - hfi1_firmware_init() - the needed firmware is ready to be
  12784. * downloaded
  12785. */
  12786. ret = do_pcie_gen3_transition(dd);
  12787. if (ret)
  12788. goto bail_cleanup;
  12789. /* start setting dd values and adjusting CSRs */
  12790. init_early_variables(dd);
  12791. parse_platform_config(dd);
  12792. ret = obtain_boardname(dd);
  12793. if (ret)
  12794. goto bail_cleanup;
  12795. snprintf(dd->boardversion, BOARD_VERS_MAX,
  12796. "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
  12797. HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
  12798. (u32)dd->majrev,
  12799. (u32)dd->minrev,
  12800. (dd->revision >> CCE_REVISION_SW_SHIFT)
  12801. & CCE_REVISION_SW_MASK);
  12802. ret = set_up_context_variables(dd);
  12803. if (ret)
  12804. goto bail_cleanup;
  12805. /* set initial RXE CSRs */
  12806. init_rxe(dd);
  12807. /* set initial TXE CSRs */
  12808. init_txe(dd);
  12809. /* set initial non-RXE, non-TXE CSRs */
  12810. init_other(dd);
  12811. /* set up KDETH QP prefix in both RX and TX CSRs */
  12812. init_kdeth_qp(dd);
  12813. ret = hfi1_dev_affinity_init(dd);
  12814. if (ret)
  12815. goto bail_cleanup;
  12816. /* send contexts must be set up before receive contexts */
  12817. ret = init_send_contexts(dd);
  12818. if (ret)
  12819. goto bail_cleanup;
  12820. ret = hfi1_create_ctxts(dd);
  12821. if (ret)
  12822. goto bail_cleanup;
  12823. dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
  12824. /*
  12825. * rcd[0] is guaranteed to be valid by this point. Also, all
  12826. * context are using the same value, as per the module parameter.
  12827. */
  12828. dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
  12829. ret = init_pervl_scs(dd);
  12830. if (ret)
  12831. goto bail_cleanup;
  12832. /* sdma init */
  12833. for (i = 0; i < dd->num_pports; ++i) {
  12834. ret = sdma_init(dd, i);
  12835. if (ret)
  12836. goto bail_cleanup;
  12837. }
  12838. /* use contexts created by hfi1_create_ctxts */
  12839. ret = set_up_interrupts(dd);
  12840. if (ret)
  12841. goto bail_cleanup;
  12842. /* set up LCB access - must be after set_up_interrupts() */
  12843. init_lcb_access(dd);
  12844. /*
  12845. * Serial number is created from the base guid:
  12846. * [27:24] = base guid [38:35]
  12847. * [23: 0] = base guid [23: 0]
  12848. */
  12849. snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
  12850. (dd->base_guid & 0xFFFFFF) |
  12851. ((dd->base_guid >> 11) & 0xF000000));
  12852. dd->oui1 = dd->base_guid >> 56 & 0xFF;
  12853. dd->oui2 = dd->base_guid >> 48 & 0xFF;
  12854. dd->oui3 = dd->base_guid >> 40 & 0xFF;
  12855. ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
  12856. if (ret)
  12857. goto bail_clear_intr;
  12858. thermal_init(dd);
  12859. ret = init_cntrs(dd);
  12860. if (ret)
  12861. goto bail_clear_intr;
  12862. ret = init_rcverr(dd);
  12863. if (ret)
  12864. goto bail_free_cntrs;
  12865. ret = eprom_init(dd);
  12866. if (ret)
  12867. goto bail_free_rcverr;
  12868. goto bail;
  12869. bail_free_rcverr:
  12870. free_rcverr(dd);
  12871. bail_free_cntrs:
  12872. free_cntrs(dd);
  12873. bail_clear_intr:
  12874. clean_up_interrupts(dd);
  12875. bail_cleanup:
  12876. hfi1_pcie_ddcleanup(dd);
  12877. bail_free:
  12878. hfi1_free_devdata(dd);
  12879. dd = ERR_PTR(ret);
  12880. bail:
  12881. return dd;
  12882. }
  12883. static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
  12884. u32 dw_len)
  12885. {
  12886. u32 delta_cycles;
  12887. u32 current_egress_rate = ppd->current_egress_rate;
  12888. /* rates here are in units of 10^6 bits/sec */
  12889. if (desired_egress_rate == -1)
  12890. return 0; /* shouldn't happen */
  12891. if (desired_egress_rate >= current_egress_rate)
  12892. return 0; /* we can't help go faster, only slower */
  12893. delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
  12894. egress_cycles(dw_len * 4, current_egress_rate);
  12895. return (u16)delta_cycles;
  12896. }
  12897. /**
  12898. * create_pbc - build a pbc for transmission
  12899. * @flags: special case flags or-ed in built pbc
  12900. * @srate: static rate
  12901. * @vl: vl
  12902. * @dwlen: dword length (header words + data words + pbc words)
  12903. *
  12904. * Create a PBC with the given flags, rate, VL, and length.
  12905. *
  12906. * NOTE: The PBC created will not insert any HCRC - all callers but one are
  12907. * for verbs, which does not use this PSM feature. The lone other caller
  12908. * is for the diagnostic interface which calls this if the user does not
  12909. * supply their own PBC.
  12910. */
  12911. u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
  12912. u32 dw_len)
  12913. {
  12914. u64 pbc, delay = 0;
  12915. if (unlikely(srate_mbs))
  12916. delay = delay_cycles(ppd, srate_mbs, dw_len);
  12917. pbc = flags
  12918. | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
  12919. | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
  12920. | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
  12921. | (dw_len & PBC_LENGTH_DWS_MASK)
  12922. << PBC_LENGTH_DWS_SHIFT;
  12923. return pbc;
  12924. }
  12925. #define SBUS_THERMAL 0x4f
  12926. #define SBUS_THERM_MONITOR_MODE 0x1
  12927. #define THERM_FAILURE(dev, ret, reason) \
  12928. dd_dev_err((dd), \
  12929. "Thermal sensor initialization failed: %s (%d)\n", \
  12930. (reason), (ret))
  12931. /*
  12932. * Initialize the thermal sensor.
  12933. *
  12934. * After initialization, enable polling of thermal sensor through
  12935. * SBus interface. In order for this to work, the SBus Master
  12936. * firmware has to be loaded due to the fact that the HW polling
  12937. * logic uses SBus interrupts, which are not supported with
  12938. * default firmware. Otherwise, no data will be returned through
  12939. * the ASIC_STS_THERM CSR.
  12940. */
  12941. static int thermal_init(struct hfi1_devdata *dd)
  12942. {
  12943. int ret = 0;
  12944. if (dd->icode != ICODE_RTL_SILICON ||
  12945. check_chip_resource(dd, CR_THERM_INIT, NULL))
  12946. return ret;
  12947. ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
  12948. if (ret) {
  12949. THERM_FAILURE(dd, ret, "Acquire SBus");
  12950. return ret;
  12951. }
  12952. dd_dev_info(dd, "Initializing thermal sensor\n");
  12953. /* Disable polling of thermal readings */
  12954. write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
  12955. msleep(100);
  12956. /* Thermal Sensor Initialization */
  12957. /* Step 1: Reset the Thermal SBus Receiver */
  12958. ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
  12959. RESET_SBUS_RECEIVER, 0);
  12960. if (ret) {
  12961. THERM_FAILURE(dd, ret, "Bus Reset");
  12962. goto done;
  12963. }
  12964. /* Step 2: Set Reset bit in Thermal block */
  12965. ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
  12966. WRITE_SBUS_RECEIVER, 0x1);
  12967. if (ret) {
  12968. THERM_FAILURE(dd, ret, "Therm Block Reset");
  12969. goto done;
  12970. }
  12971. /* Step 3: Write clock divider value (100MHz -> 2MHz) */
  12972. ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
  12973. WRITE_SBUS_RECEIVER, 0x32);
  12974. if (ret) {
  12975. THERM_FAILURE(dd, ret, "Write Clock Div");
  12976. goto done;
  12977. }
  12978. /* Step 4: Select temperature mode */
  12979. ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
  12980. WRITE_SBUS_RECEIVER,
  12981. SBUS_THERM_MONITOR_MODE);
  12982. if (ret) {
  12983. THERM_FAILURE(dd, ret, "Write Mode Sel");
  12984. goto done;
  12985. }
  12986. /* Step 5: De-assert block reset and start conversion */
  12987. ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
  12988. WRITE_SBUS_RECEIVER, 0x2);
  12989. if (ret) {
  12990. THERM_FAILURE(dd, ret, "Write Reset Deassert");
  12991. goto done;
  12992. }
  12993. /* Step 5.1: Wait for first conversion (21.5ms per spec) */
  12994. msleep(22);
  12995. /* Enable polling of thermal readings */
  12996. write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
  12997. /* Set initialized flag */
  12998. ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
  12999. if (ret)
  13000. THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
  13001. done:
  13002. release_chip_resource(dd, CR_SBUS);
  13003. return ret;
  13004. }
  13005. static void handle_temp_err(struct hfi1_devdata *dd)
  13006. {
  13007. struct hfi1_pportdata *ppd = &dd->pport[0];
  13008. /*
  13009. * Thermal Critical Interrupt
  13010. * Put the device into forced freeze mode, take link down to
  13011. * offline, and put DC into reset.
  13012. */
  13013. dd_dev_emerg(dd,
  13014. "Critical temperature reached! Forcing device into freeze mode!\n");
  13015. dd->flags |= HFI1_FORCED_FREEZE;
  13016. start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
  13017. /*
  13018. * Shut DC down as much and as quickly as possible.
  13019. *
  13020. * Step 1: Take the link down to OFFLINE. This will cause the
  13021. * 8051 to put the Serdes in reset. However, we don't want to
  13022. * go through the entire link state machine since we want to
  13023. * shutdown ASAP. Furthermore, this is not a graceful shutdown
  13024. * but rather an attempt to save the chip.
  13025. * Code below is almost the same as quiet_serdes() but avoids
  13026. * all the extra work and the sleeps.
  13027. */
  13028. ppd->driver_link_ready = 0;
  13029. ppd->link_enabled = 0;
  13030. set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
  13031. PLS_OFFLINE);
  13032. /*
  13033. * Step 2: Shutdown LCB and 8051
  13034. * After shutdown, do not restore DC_CFG_RESET value.
  13035. */
  13036. dc_shutdown(dd);
  13037. }