chip.c 444 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975139761397713978139791398013981139821398313984139851398613987139881398913990139911399213993139941399513996139971399813999140001400114002140031400414005140061400714008140091401014011140121401314014140151401614017140181401914020140211402214023140241402514026140271402814029140301403114032140331403414035140361403714038140391404014041140421404314044140451404614047140481404914050140511405214053140541405514056140571405814059140601406114062140631406414065140661406714068140691407014071140721407314074140751407614077140781407914080140811408214083140841408514086140871408814089140901409114092140931409414095140961409714098140991410014101141021410314104141051410614107141081410914110141111411214113141141411514116141171411814119141201412114122141231412414125141261412714128141291413014131141321413314134141351413614137141381413914140141411414214143141441414514146141471414814149141501415114152141531415414155141561415714158141591416014161141621416314164141651416614167141681416914170141711417214173141741417514176141771417814179141801418114182141831418414185141861418714188141891419014191141921419314194141951419614197141981419914200142011420214203142041420514206142071420814209142101421114212142131421414215142161421714218142191422014221142221422314224142251422614227142281422914230142311423214233142341423514236142371423814239142401424114242142431424414245142461424714248142491425014251142521425314254142551425614257142581425914260142611426214263142641426514266142671426814269142701427114272142731427414275142761427714278142791428014281142821428314284142851428614287142881428914290142911429214293142941429514296142971429814299143001430114302143031430414305143061430714308143091431014311143121431314314143151431614317143181431914320143211432214323143241432514326143271432814329143301433114332143331433414335143361433714338143391434014341143421434314344143451434614347143481434914350143511435214353143541435514356143571435814359143601436114362143631436414365143661436714368143691437014371143721437314374143751437614377143781437914380143811438214383143841438514386143871438814389143901439114392143931439414395143961439714398143991440014401144021440314404144051440614407144081440914410144111441214413144141441514416144171441814419144201442114422144231442414425144261442714428144291443014431144321443314434144351443614437144381443914440144411444214443144441444514446144471444814449144501445114452144531445414455144561445714458144591446014461144621446314464144651446614467144681446914470144711447214473144741447514476144771447814479144801448114482144831448414485144861448714488144891449014491144921449314494144951449614497144981449914500145011450214503145041450514506145071450814509145101451114512145131451414515145161451714518145191452014521145221452314524145251452614527145281452914530145311453214533145341453514536145371453814539145401454114542145431454414545145461454714548145491455014551145521455314554145551455614557145581455914560145611456214563145641456514566145671456814569145701457114572145731457414575145761457714578145791458014581145821458314584145851458614587145881458914590145911459214593145941459514596145971459814599146001460114602146031460414605146061460714608146091461014611146121461314614146151461614617146181461914620146211462214623146241462514626146271462814629146301463114632146331463414635146361463714638146391464014641146421464314644146451464614647146481464914650146511465214653146541465514656146571465814659146601466114662146631466414665146661466714668146691467014671146721467314674146751467614677146781467914680146811468214683146841468514686146871468814689146901469114692146931469414695146961469714698146991470014701147021470314704147051470614707147081470914710147111471214713147141471514716147171471814719147201472114722147231472414725147261472714728147291473014731147321473314734147351473614737147381473914740147411474214743147441474514746147471474814749147501475114752147531475414755147561475714758147591476014761147621476314764147651476614767147681476914770147711477214773147741477514776147771477814779147801478114782147831478414785147861478714788147891479014791147921479314794147951479614797147981479914800148011480214803148041480514806148071480814809148101481114812148131481414815148161481714818148191482014821148221482314824148251482614827148281482914830148311483214833148341483514836148371483814839148401484114842148431484414845148461484714848148491485014851148521485314854148551485614857148581485914860148611486214863148641486514866148671486814869148701487114872148731487414875148761487714878148791488014881148821488314884148851488614887148881488914890148911489214893148941489514896148971489814899149001490114902149031490414905149061490714908149091491014911149121491314914149151491614917149181491914920149211492214923149241492514926149271492814929149301493114932149331493414935149361493714938149391494014941149421494314944149451494614947149481494914950149511495214953149541495514956149571495814959149601496114962149631496414965149661496714968149691497014971149721497314974149751497614977149781497914980149811498214983149841498514986149871498814989149901499114992149931499414995149961499714998149991500015001150021500315004150051500615007150081500915010150111501215013150141501515016150171501815019150201502115022150231502415025150261502715028150291503015031150321503315034150351503615037150381503915040150411504215043150441504515046150471504815049150501505115052150531505415055150561505715058150591506015061150621506315064150651506615067150681506915070150711507215073150741507515076150771507815079150801508115082150831508415085150861508715088150891509015091150921509315094150951509615097150981509915100151011510215103151041510515106151071510815109151101511115112151131511415115151161511715118151191512015121151221512315124151251512615127151281512915130151311513215133151341513515136151371513815139151401514115142151431514415145151461514715148151491515015151151521515315154151551515615157151581515915160151611516215163151641516515166151671516815169151701517115172151731517415175151761517715178151791518015181151821518315184151851518615187151881518915190151911519215193151941519515196151971519815199152001520115202152031520415205152061520715208152091521015211152121521315214152151521615217152181521915220152211522215223
  1. /*
  2. * Copyright(c) 2015 - 2017 Intel Corporation.
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * BSD LICENSE
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright
  25. * notice, this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright
  27. * notice, this list of conditions and the following disclaimer in
  28. * the documentation and/or other materials provided with the
  29. * distribution.
  30. * - Neither the name of Intel Corporation nor the names of its
  31. * contributors may be used to endorse or promote products derived
  32. * from this software without specific prior written permission.
  33. *
  34. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. *
  46. */
  47. /*
  48. * This file contains all of the code that is specific to the HFI chip
  49. */
  50. #include <linux/pci.h>
  51. #include <linux/delay.h>
  52. #include <linux/interrupt.h>
  53. #include <linux/module.h>
  54. #include "hfi.h"
  55. #include "trace.h"
  56. #include "mad.h"
  57. #include "pio.h"
  58. #include "sdma.h"
  59. #include "eprom.h"
  60. #include "efivar.h"
  61. #include "platform.h"
  62. #include "aspm.h"
  63. #include "affinity.h"
  64. #include "debugfs.h"
  65. #define NUM_IB_PORTS 1
  66. uint kdeth_qp;
  67. module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
  68. MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
  69. uint num_vls = HFI1_MAX_VLS_SUPPORTED;
  70. module_param(num_vls, uint, S_IRUGO);
  71. MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
  72. /*
  73. * Default time to aggregate two 10K packets from the idle state
  74. * (timer not running). The timer starts at the end of the first packet,
  75. * so only the time for one 10K packet and header plus a bit extra is needed.
  76. * 10 * 1024 + 64 header byte = 10304 byte
  77. * 10304 byte / 12.5 GB/s = 824.32ns
  78. */
  79. uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
  80. module_param(rcv_intr_timeout, uint, S_IRUGO);
  81. MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
  82. uint rcv_intr_count = 16; /* same as qib */
  83. module_param(rcv_intr_count, uint, S_IRUGO);
  84. MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
  85. ushort link_crc_mask = SUPPORTED_CRCS;
  86. module_param(link_crc_mask, ushort, S_IRUGO);
  87. MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
  88. uint loopback;
  89. module_param_named(loopback, loopback, uint, S_IRUGO);
  90. MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
  91. /* Other driver tunables */
  92. uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
  93. static ushort crc_14b_sideband = 1;
  94. static uint use_flr = 1;
  95. uint quick_linkup; /* skip LNI */
  96. struct flag_table {
  97. u64 flag; /* the flag */
  98. char *str; /* description string */
  99. u16 extra; /* extra information */
  100. u16 unused0;
  101. u32 unused1;
  102. };
  103. /* str must be a string constant */
  104. #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
  105. #define FLAG_ENTRY0(str, flag) {flag, str, 0}
  106. /* Send Error Consequences */
  107. #define SEC_WRITE_DROPPED 0x1
  108. #define SEC_PACKET_DROPPED 0x2
  109. #define SEC_SC_HALTED 0x4 /* per-context only */
  110. #define SEC_SPC_FREEZE 0x8 /* per-HFI only */
  111. #define DEFAULT_KRCVQS 2
  112. #define MIN_KERNEL_KCTXTS 2
  113. #define FIRST_KERNEL_KCTXT 1
  114. /*
  115. * RSM instance allocation
  116. * 0 - Verbs
  117. * 1 - User Fecn Handling
  118. * 2 - Vnic
  119. */
  120. #define RSM_INS_VERBS 0
  121. #define RSM_INS_FECN 1
  122. #define RSM_INS_VNIC 2
  123. /* Bit offset into the GUID which carries HFI id information */
  124. #define GUID_HFI_INDEX_SHIFT 39
  125. /* extract the emulation revision */
  126. #define emulator_rev(dd) ((dd)->irev >> 8)
  127. /* parallel and serial emulation versions are 3 and 4 respectively */
  128. #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
  129. #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
  130. /* RSM fields for Verbs */
  131. /* packet type */
  132. #define IB_PACKET_TYPE 2ull
  133. #define QW_SHIFT 6ull
  134. /* QPN[7..1] */
  135. #define QPN_WIDTH 7ull
  136. /* LRH.BTH: QW 0, OFFSET 48 - for match */
  137. #define LRH_BTH_QW 0ull
  138. #define LRH_BTH_BIT_OFFSET 48ull
  139. #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
  140. #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
  141. #define LRH_BTH_SELECT
  142. #define LRH_BTH_MASK 3ull
  143. #define LRH_BTH_VALUE 2ull
  144. /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
  145. #define LRH_SC_QW 0ull
  146. #define LRH_SC_BIT_OFFSET 56ull
  147. #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
  148. #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
  149. #define LRH_SC_MASK 128ull
  150. #define LRH_SC_VALUE 0ull
  151. /* SC[n..0] QW 0, OFFSET 60 - for select */
  152. #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
  153. /* QPN[m+n:1] QW 1, OFFSET 1 */
  154. #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
  155. /* RSM fields for Vnic */
  156. /* L2_TYPE: QW 0, OFFSET 61 - for match */
  157. #define L2_TYPE_QW 0ull
  158. #define L2_TYPE_BIT_OFFSET 61ull
  159. #define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off))
  160. #define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
  161. #define L2_TYPE_MASK 3ull
  162. #define L2_16B_VALUE 2ull
  163. /* L4_TYPE QW 1, OFFSET 0 - for match */
  164. #define L4_TYPE_QW 1ull
  165. #define L4_TYPE_BIT_OFFSET 0ull
  166. #define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off))
  167. #define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
  168. #define L4_16B_TYPE_MASK 0xFFull
  169. #define L4_16B_ETH_VALUE 0x78ull
  170. /* 16B VESWID - for select */
  171. #define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull))
  172. /* 16B ENTROPY - for select */
  173. #define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull))
  174. /* defines to build power on SC2VL table */
  175. #define SC2VL_VAL( \
  176. num, \
  177. sc0, sc0val, \
  178. sc1, sc1val, \
  179. sc2, sc2val, \
  180. sc3, sc3val, \
  181. sc4, sc4val, \
  182. sc5, sc5val, \
  183. sc6, sc6val, \
  184. sc7, sc7val) \
  185. ( \
  186. ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
  187. ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
  188. ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
  189. ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
  190. ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
  191. ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
  192. ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
  193. ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
  194. )
  195. #define DC_SC_VL_VAL( \
  196. range, \
  197. e0, e0val, \
  198. e1, e1val, \
  199. e2, e2val, \
  200. e3, e3val, \
  201. e4, e4val, \
  202. e5, e5val, \
  203. e6, e6val, \
  204. e7, e7val, \
  205. e8, e8val, \
  206. e9, e9val, \
  207. e10, e10val, \
  208. e11, e11val, \
  209. e12, e12val, \
  210. e13, e13val, \
  211. e14, e14val, \
  212. e15, e15val) \
  213. ( \
  214. ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
  215. ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
  216. ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
  217. ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
  218. ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
  219. ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
  220. ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
  221. ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
  222. ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
  223. ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
  224. ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
  225. ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
  226. ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
  227. ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
  228. ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
  229. ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
  230. )
  231. /* all CceStatus sub-block freeze bits */
  232. #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
  233. | CCE_STATUS_RXE_FROZE_SMASK \
  234. | CCE_STATUS_TXE_FROZE_SMASK \
  235. | CCE_STATUS_TXE_PIO_FROZE_SMASK)
  236. /* all CceStatus sub-block TXE pause bits */
  237. #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
  238. | CCE_STATUS_TXE_PAUSED_SMASK \
  239. | CCE_STATUS_SDMA_PAUSED_SMASK)
  240. /* all CceStatus sub-block RXE pause bits */
  241. #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
  242. #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
  243. #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
  244. /*
  245. * CCE Error flags.
  246. */
  247. static struct flag_table cce_err_status_flags[] = {
  248. /* 0*/ FLAG_ENTRY0("CceCsrParityErr",
  249. CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
  250. /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
  251. CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
  252. /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
  253. CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
  254. /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
  255. CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
  256. /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
  257. CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
  258. /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
  259. CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
  260. /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
  261. CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
  262. /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
  263. CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
  264. /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
  265. CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
  266. /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
  267. CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
  268. /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
  269. CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
  270. /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
  271. CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
  272. /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
  273. CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
  274. /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
  275. CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
  276. /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
  277. CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
  278. /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
  279. CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
  280. /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
  281. CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
  282. /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
  283. CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
  284. /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
  285. CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
  286. /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
  287. CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
  288. /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
  289. CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
  290. /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
  291. CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
  292. /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
  293. CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
  294. /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
  295. CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
  296. /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
  297. CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
  298. /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
  299. CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
  300. /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
  301. CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
  302. /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
  303. CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
  304. /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
  305. CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
  306. /*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
  307. CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
  308. /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
  309. CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
  310. /*31*/ FLAG_ENTRY0("LATriggered",
  311. CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
  312. /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
  313. CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
  314. /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
  315. CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
  316. /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
  317. CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
  318. /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
  319. CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
  320. /*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
  321. CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
  322. /*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
  323. CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
  324. /*38*/ FLAG_ENTRY0("CceIntMapCorErr",
  325. CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
  326. /*39*/ FLAG_ENTRY0("CceIntMapUncErr",
  327. CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
  328. /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
  329. CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
  330. /*41-63 reserved*/
  331. };
  332. /*
  333. * Misc Error flags
  334. */
  335. #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
  336. static struct flag_table misc_err_status_flags[] = {
  337. /* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
  338. /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
  339. /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
  340. /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
  341. /* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
  342. /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
  343. /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
  344. /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
  345. /* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
  346. /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
  347. /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
  348. /*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
  349. /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
  350. };
  351. /*
  352. * TXE PIO Error flags and consequences
  353. */
  354. static struct flag_table pio_err_status_flags[] = {
  355. /* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
  356. SEC_WRITE_DROPPED,
  357. SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
  358. /* 1*/ FLAG_ENTRY("PioWriteAddrParity",
  359. SEC_SPC_FREEZE,
  360. SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
  361. /* 2*/ FLAG_ENTRY("PioCsrParity",
  362. SEC_SPC_FREEZE,
  363. SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
  364. /* 3*/ FLAG_ENTRY("PioSbMemFifo0",
  365. SEC_SPC_FREEZE,
  366. SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
  367. /* 4*/ FLAG_ENTRY("PioSbMemFifo1",
  368. SEC_SPC_FREEZE,
  369. SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
  370. /* 5*/ FLAG_ENTRY("PioPccFifoParity",
  371. SEC_SPC_FREEZE,
  372. SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
  373. /* 6*/ FLAG_ENTRY("PioPecFifoParity",
  374. SEC_SPC_FREEZE,
  375. SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
  376. /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
  377. SEC_SPC_FREEZE,
  378. SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
  379. /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
  380. SEC_SPC_FREEZE,
  381. SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
  382. /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
  383. SEC_SPC_FREEZE,
  384. SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
  385. /*10*/ FLAG_ENTRY("PioSmPktResetParity",
  386. SEC_SPC_FREEZE,
  387. SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
  388. /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
  389. SEC_SPC_FREEZE,
  390. SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
  391. /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
  392. SEC_SPC_FREEZE,
  393. SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
  394. /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
  395. 0,
  396. SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
  397. /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
  398. 0,
  399. SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
  400. /*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
  401. SEC_SPC_FREEZE,
  402. SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
  403. /*16*/ FLAG_ENTRY("PioPpmcPblFifo",
  404. SEC_SPC_FREEZE,
  405. SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
  406. /*17*/ FLAG_ENTRY("PioInitSmIn",
  407. 0,
  408. SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
  409. /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
  410. SEC_SPC_FREEZE,
  411. SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
  412. /*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
  413. SEC_SPC_FREEZE,
  414. SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
  415. /*20*/ FLAG_ENTRY("PioHostAddrMemCor",
  416. 0,
  417. SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
  418. /*21*/ FLAG_ENTRY("PioWriteDataParity",
  419. SEC_SPC_FREEZE,
  420. SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
  421. /*22*/ FLAG_ENTRY("PioStateMachine",
  422. SEC_SPC_FREEZE,
  423. SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
  424. /*23*/ FLAG_ENTRY("PioWriteQwValidParity",
  425. SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
  426. SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
  427. /*24*/ FLAG_ENTRY("PioBlockQwCountParity",
  428. SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
  429. SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
  430. /*25*/ FLAG_ENTRY("PioVlfVlLenParity",
  431. SEC_SPC_FREEZE,
  432. SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
  433. /*26*/ FLAG_ENTRY("PioVlfSopParity",
  434. SEC_SPC_FREEZE,
  435. SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
  436. /*27*/ FLAG_ENTRY("PioVlFifoParity",
  437. SEC_SPC_FREEZE,
  438. SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
  439. /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
  440. SEC_SPC_FREEZE,
  441. SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
  442. /*29*/ FLAG_ENTRY("PioPpmcSopLen",
  443. SEC_SPC_FREEZE,
  444. SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
  445. /*30-31 reserved*/
  446. /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
  447. SEC_SPC_FREEZE,
  448. SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
  449. /*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
  450. SEC_SPC_FREEZE,
  451. SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
  452. /*34*/ FLAG_ENTRY("PioPccSopHeadParity",
  453. SEC_SPC_FREEZE,
  454. SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
  455. /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
  456. SEC_SPC_FREEZE,
  457. SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
  458. /*36-63 reserved*/
  459. };
  460. /* TXE PIO errors that cause an SPC freeze */
  461. #define ALL_PIO_FREEZE_ERR \
  462. (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
  463. | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
  464. | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
  465. | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
  466. | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
  467. | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
  468. | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
  469. | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
  470. | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
  471. | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
  472. | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
  473. | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
  474. | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
  475. | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
  476. | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
  477. | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
  478. | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
  479. | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
  480. | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
  481. | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
  482. | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
  483. | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
  484. | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
  485. | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
  486. | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
  487. | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
  488. | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
  489. | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
  490. | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
  491. /*
  492. * TXE SDMA Error flags
  493. */
  494. static struct flag_table sdma_err_status_flags[] = {
  495. /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
  496. SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
  497. /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
  498. SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
  499. /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
  500. SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
  501. /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
  502. SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
  503. /*04-63 reserved*/
  504. };
  505. /* TXE SDMA errors that cause an SPC freeze */
  506. #define ALL_SDMA_FREEZE_ERR \
  507. (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
  508. | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
  509. | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
  510. /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
  511. #define PORT_DISCARD_EGRESS_ERRS \
  512. (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
  513. | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
  514. | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
  515. /*
  516. * TXE Egress Error flags
  517. */
  518. #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
  519. static struct flag_table egress_err_status_flags[] = {
  520. /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
  521. /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
  522. /* 2 reserved */
  523. /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
  524. SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
  525. /* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
  526. /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
  527. /* 6 reserved */
  528. /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
  529. SEES(TX_PIO_LAUNCH_INTF_PARITY)),
  530. /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
  531. SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
  532. /* 9-10 reserved */
  533. /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
  534. SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
  535. /*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
  536. /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
  537. /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
  538. /*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
  539. /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
  540. SEES(TX_SDMA0_DISALLOWED_PACKET)),
  541. /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
  542. SEES(TX_SDMA1_DISALLOWED_PACKET)),
  543. /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
  544. SEES(TX_SDMA2_DISALLOWED_PACKET)),
  545. /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
  546. SEES(TX_SDMA3_DISALLOWED_PACKET)),
  547. /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
  548. SEES(TX_SDMA4_DISALLOWED_PACKET)),
  549. /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
  550. SEES(TX_SDMA5_DISALLOWED_PACKET)),
  551. /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
  552. SEES(TX_SDMA6_DISALLOWED_PACKET)),
  553. /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
  554. SEES(TX_SDMA7_DISALLOWED_PACKET)),
  555. /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
  556. SEES(TX_SDMA8_DISALLOWED_PACKET)),
  557. /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
  558. SEES(TX_SDMA9_DISALLOWED_PACKET)),
  559. /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
  560. SEES(TX_SDMA10_DISALLOWED_PACKET)),
  561. /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
  562. SEES(TX_SDMA11_DISALLOWED_PACKET)),
  563. /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
  564. SEES(TX_SDMA12_DISALLOWED_PACKET)),
  565. /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
  566. SEES(TX_SDMA13_DISALLOWED_PACKET)),
  567. /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
  568. SEES(TX_SDMA14_DISALLOWED_PACKET)),
  569. /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
  570. SEES(TX_SDMA15_DISALLOWED_PACKET)),
  571. /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
  572. SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
  573. /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
  574. SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
  575. /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
  576. SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
  577. /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
  578. SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
  579. /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
  580. SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
  581. /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
  582. SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
  583. /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
  584. SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
  585. /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
  586. SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
  587. /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
  588. SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
  589. /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
  590. /*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
  591. /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
  592. /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
  593. /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
  594. /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
  595. /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
  596. /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
  597. /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
  598. /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
  599. /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
  600. /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
  601. /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
  602. /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
  603. /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
  604. /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
  605. /*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
  606. /*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
  607. /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
  608. /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
  609. /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
  610. /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
  611. SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
  612. /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
  613. SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
  614. };
  615. /*
  616. * TXE Egress Error Info flags
  617. */
  618. #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
  619. static struct flag_table egress_err_info_flags[] = {
  620. /* 0*/ FLAG_ENTRY0("Reserved", 0ull),
  621. /* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
  622. /* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
  623. /* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
  624. /* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
  625. /* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
  626. /* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
  627. /* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
  628. /* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
  629. /* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
  630. /*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
  631. /*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
  632. /*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
  633. /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
  634. /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
  635. /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
  636. /*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
  637. /*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
  638. /*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
  639. /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
  640. /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
  641. /*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
  642. };
  643. /* TXE Egress errors that cause an SPC freeze */
  644. #define ALL_TXE_EGRESS_FREEZE_ERR \
  645. (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
  646. | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
  647. | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
  648. | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
  649. | SEES(TX_LAUNCH_CSR_PARITY) \
  650. | SEES(TX_SBRD_CTL_CSR_PARITY) \
  651. | SEES(TX_CONFIG_PARITY) \
  652. | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
  653. | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
  654. | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
  655. | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
  656. | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
  657. | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
  658. | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
  659. | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
  660. | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
  661. | SEES(TX_CREDIT_RETURN_PARITY))
  662. /*
  663. * TXE Send error flags
  664. */
  665. #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
  666. static struct flag_table send_err_status_flags[] = {
  667. /* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
  668. /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
  669. /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
  670. };
  671. /*
  672. * TXE Send Context Error flags and consequences
  673. */
  674. static struct flag_table sc_err_status_flags[] = {
  675. /* 0*/ FLAG_ENTRY("InconsistentSop",
  676. SEC_PACKET_DROPPED | SEC_SC_HALTED,
  677. SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
  678. /* 1*/ FLAG_ENTRY("DisallowedPacket",
  679. SEC_PACKET_DROPPED | SEC_SC_HALTED,
  680. SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
  681. /* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
  682. SEC_WRITE_DROPPED | SEC_SC_HALTED,
  683. SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
  684. /* 3*/ FLAG_ENTRY("WriteOverflow",
  685. SEC_WRITE_DROPPED | SEC_SC_HALTED,
  686. SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
  687. /* 4*/ FLAG_ENTRY("WriteOutOfBounds",
  688. SEC_WRITE_DROPPED | SEC_SC_HALTED,
  689. SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
  690. /* 5-63 reserved*/
  691. };
  692. /*
  693. * RXE Receive Error flags
  694. */
  695. #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
  696. static struct flag_table rxe_err_status_flags[] = {
  697. /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
  698. /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
  699. /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
  700. /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
  701. /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
  702. /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
  703. /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
  704. /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
  705. /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
  706. /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
  707. /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
  708. /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
  709. /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
  710. /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
  711. /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
  712. /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
  713. /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
  714. RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
  715. /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
  716. /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
  717. /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
  718. RXES(RBUF_BLOCK_LIST_READ_UNC)),
  719. /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
  720. RXES(RBUF_BLOCK_LIST_READ_COR)),
  721. /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
  722. RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
  723. /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
  724. RXES(RBUF_CSR_QENT_CNT_PARITY)),
  725. /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
  726. RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
  727. /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
  728. RXES(RBUF_CSR_QVLD_BIT_PARITY)),
  729. /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
  730. /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
  731. /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
  732. RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
  733. /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
  734. /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
  735. /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
  736. /*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
  737. /*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
  738. /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
  739. /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
  740. /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
  741. RXES(RBUF_FL_INITDONE_PARITY)),
  742. /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
  743. RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
  744. /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
  745. /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
  746. /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
  747. /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
  748. RXES(LOOKUP_DES_PART1_UNC_COR)),
  749. /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
  750. RXES(LOOKUP_DES_PART2_PARITY)),
  751. /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
  752. /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
  753. /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
  754. /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
  755. /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
  756. /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
  757. /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
  758. /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
  759. /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
  760. /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
  761. /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
  762. /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
  763. /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
  764. /*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
  765. /*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
  766. /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
  767. /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
  768. /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
  769. /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
  770. /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
  771. /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
  772. /*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
  773. };
  774. /* RXE errors that will trigger an SPC freeze */
  775. #define ALL_RXE_FREEZE_ERR \
  776. (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
  777. | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
  778. | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
  779. | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
  780. | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
  781. | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
  782. | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
  783. | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
  784. | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
  785. | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
  786. | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
  787. | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
  788. | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
  789. | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
  790. | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
  791. | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
  792. | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
  793. | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
  794. | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
  795. | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
  796. | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
  797. | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
  798. | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
  799. | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
  800. | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
  801. | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
  802. | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
  803. | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
  804. | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
  805. | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
  806. | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
  807. | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
  808. | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
  809. | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
  810. | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
  811. | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
  812. | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
  813. | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
  814. | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
  815. | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
  816. | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
  817. | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
  818. | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
  819. | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
  820. #define RXE_FREEZE_ABORT_MASK \
  821. (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
  822. RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
  823. RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
  824. /*
  825. * DCC Error Flags
  826. */
  827. #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
  828. static struct flag_table dcc_err_flags[] = {
  829. FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
  830. FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
  831. FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
  832. FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
  833. FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
  834. FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
  835. FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
  836. FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
  837. FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
  838. FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
  839. FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
  840. FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
  841. FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
  842. FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
  843. FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
  844. FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
  845. FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
  846. FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
  847. FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
  848. FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
  849. FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
  850. FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
  851. FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
  852. FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
  853. FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
  854. FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
  855. FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
  856. FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
  857. FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
  858. FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
  859. FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
  860. FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
  861. FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
  862. FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
  863. FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
  864. FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
  865. FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
  866. FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
  867. FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
  868. FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
  869. FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
  870. FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
  871. FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
  872. FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
  873. FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
  874. FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
  875. };
  876. /*
  877. * LCB error flags
  878. */
  879. #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
  880. static struct flag_table lcb_err_flags[] = {
  881. /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
  882. /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
  883. /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
  884. /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
  885. LCBE(ALL_LNS_FAILED_REINIT_TEST)),
  886. /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
  887. /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
  888. /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
  889. /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
  890. /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
  891. /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
  892. /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
  893. /*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
  894. /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
  895. /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
  896. LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
  897. /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
  898. /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
  899. /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
  900. /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
  901. /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
  902. /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
  903. LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
  904. /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
  905. /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
  906. /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
  907. /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
  908. /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
  909. /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
  910. /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
  911. LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
  912. /*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
  913. /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
  914. LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
  915. /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
  916. LCBE(REDUNDANT_FLIT_PARITY_ERR))
  917. };
  918. /*
  919. * DC8051 Error Flags
  920. */
  921. #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
  922. static struct flag_table dc8051_err_flags[] = {
  923. FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
  924. FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
  925. FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
  926. FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
  927. FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
  928. FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
  929. FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
  930. FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
  931. FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
  932. D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
  933. FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
  934. };
  935. /*
  936. * DC8051 Information Error flags
  937. *
  938. * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
  939. */
  940. static struct flag_table dc8051_info_err_flags[] = {
  941. FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
  942. FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
  943. FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
  944. FLAG_ENTRY0("Serdes internal loopback failure",
  945. FAILED_SERDES_INTERNAL_LOOPBACK),
  946. FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
  947. FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
  948. FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
  949. FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
  950. FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
  951. FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
  952. FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
  953. FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
  954. FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
  955. FLAG_ENTRY0("External Device Request Timeout",
  956. EXTERNAL_DEVICE_REQ_TIMEOUT),
  957. };
  958. /*
  959. * DC8051 Information Host Information flags
  960. *
  961. * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
  962. */
  963. static struct flag_table dc8051_info_host_msg_flags[] = {
  964. FLAG_ENTRY0("Host request done", 0x0001),
  965. FLAG_ENTRY0("BC SMA message", 0x0002),
  966. FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
  967. FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
  968. FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
  969. FLAG_ENTRY0("External device config request", 0x0020),
  970. FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
  971. FLAG_ENTRY0("LinkUp achieved", 0x0080),
  972. FLAG_ENTRY0("Link going down", 0x0100),
  973. };
  974. static u32 encoded_size(u32 size);
  975. static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
  976. static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
  977. static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
  978. u8 *continuous);
  979. static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
  980. u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
  981. static void read_vc_remote_link_width(struct hfi1_devdata *dd,
  982. u8 *remote_tx_rate, u16 *link_widths);
  983. static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
  984. u8 *flag_bits, u16 *link_widths);
  985. static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
  986. u8 *device_rev);
  987. static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
  988. static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
  989. static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
  990. u8 *tx_polarity_inversion,
  991. u8 *rx_polarity_inversion, u8 *max_rate);
  992. static void handle_sdma_eng_err(struct hfi1_devdata *dd,
  993. unsigned int context, u64 err_status);
  994. static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
  995. static void handle_dcc_err(struct hfi1_devdata *dd,
  996. unsigned int context, u64 err_status);
  997. static void handle_lcb_err(struct hfi1_devdata *dd,
  998. unsigned int context, u64 err_status);
  999. static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
  1000. static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
  1001. static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
  1002. static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
  1003. static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
  1004. static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
  1005. static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
  1006. static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
  1007. static void set_partition_keys(struct hfi1_pportdata *ppd);
  1008. static const char *link_state_name(u32 state);
  1009. static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
  1010. u32 state);
  1011. static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
  1012. u64 *out_data);
  1013. static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
  1014. static int thermal_init(struct hfi1_devdata *dd);
  1015. static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
  1016. int msecs);
  1017. static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
  1018. static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
  1019. static void handle_temp_err(struct hfi1_devdata *dd);
  1020. static void dc_shutdown(struct hfi1_devdata *dd);
  1021. static void dc_start(struct hfi1_devdata *dd);
  1022. static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
  1023. unsigned int *np);
  1024. static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
  1025. static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
  1026. static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
  1027. /*
  1028. * Error interrupt table entry. This is used as input to the interrupt
  1029. * "clear down" routine used for all second tier error interrupt register.
  1030. * Second tier interrupt registers have a single bit representing them
  1031. * in the top-level CceIntStatus.
  1032. */
  1033. struct err_reg_info {
  1034. u32 status; /* status CSR offset */
  1035. u32 clear; /* clear CSR offset */
  1036. u32 mask; /* mask CSR offset */
  1037. void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
  1038. const char *desc;
  1039. };
  1040. #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
  1041. #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
  1042. #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
  1043. /*
  1044. * Helpers for building HFI and DC error interrupt table entries. Different
  1045. * helpers are needed because of inconsistent register names.
  1046. */
  1047. #define EE(reg, handler, desc) \
  1048. { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
  1049. handler, desc }
  1050. #define DC_EE1(reg, handler, desc) \
  1051. { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
  1052. #define DC_EE2(reg, handler, desc) \
  1053. { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
  1054. /*
  1055. * Table of the "misc" grouping of error interrupts. Each entry refers to
  1056. * another register containing more information.
  1057. */
  1058. static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
  1059. /* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
  1060. /* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
  1061. /* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
  1062. /* 3*/ { 0, 0, 0, NULL }, /* reserved */
  1063. /* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
  1064. /* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
  1065. /* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
  1066. /* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
  1067. /* the rest are reserved */
  1068. };
  1069. /*
  1070. * Index into the Various section of the interrupt sources
  1071. * corresponding to the Critical Temperature interrupt.
  1072. */
  1073. #define TCRIT_INT_SOURCE 4
  1074. /*
  1075. * SDMA error interrupt entry - refers to another register containing more
  1076. * information.
  1077. */
  1078. static const struct err_reg_info sdma_eng_err =
  1079. EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
  1080. static const struct err_reg_info various_err[NUM_VARIOUS] = {
  1081. /* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
  1082. /* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
  1083. /* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
  1084. /* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
  1085. /* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
  1086. /* rest are reserved */
  1087. };
  1088. /*
  1089. * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
  1090. * register can not be derived from the MTU value because 10K is not
  1091. * a power of 2. Therefore, we need a constant. Everything else can
  1092. * be calculated.
  1093. */
  1094. #define DCC_CFG_PORT_MTU_CAP_10240 7
  1095. /*
  1096. * Table of the DC grouping of error interrupts. Each entry refers to
  1097. * another register containing more information.
  1098. */
  1099. static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
  1100. /* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
  1101. /* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
  1102. /* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
  1103. /* 3*/ /* dc_lbm_int - special, see is_dc_int() */
  1104. /* the rest are reserved */
  1105. };
  1106. struct cntr_entry {
  1107. /*
  1108. * counter name
  1109. */
  1110. char *name;
  1111. /*
  1112. * csr to read for name (if applicable)
  1113. */
  1114. u64 csr;
  1115. /*
  1116. * offset into dd or ppd to store the counter's value
  1117. */
  1118. int offset;
  1119. /*
  1120. * flags
  1121. */
  1122. u8 flags;
  1123. /*
  1124. * accessor for stat element, context either dd or ppd
  1125. */
  1126. u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
  1127. int mode, u64 data);
  1128. };
  1129. #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
  1130. #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
  1131. #define CNTR_ELEM(name, csr, offset, flags, accessor) \
  1132. { \
  1133. name, \
  1134. csr, \
  1135. offset, \
  1136. flags, \
  1137. accessor \
  1138. }
  1139. /* 32bit RXE */
  1140. #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
  1141. CNTR_ELEM(#name, \
  1142. (counter * 8 + RCV_COUNTER_ARRAY32), \
  1143. 0, flags | CNTR_32BIT, \
  1144. port_access_u32_csr)
  1145. #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
  1146. CNTR_ELEM(#name, \
  1147. (counter * 8 + RCV_COUNTER_ARRAY32), \
  1148. 0, flags | CNTR_32BIT, \
  1149. dev_access_u32_csr)
  1150. /* 64bit RXE */
  1151. #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
  1152. CNTR_ELEM(#name, \
  1153. (counter * 8 + RCV_COUNTER_ARRAY64), \
  1154. 0, flags, \
  1155. port_access_u64_csr)
  1156. #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
  1157. CNTR_ELEM(#name, \
  1158. (counter * 8 + RCV_COUNTER_ARRAY64), \
  1159. 0, flags, \
  1160. dev_access_u64_csr)
  1161. #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
  1162. #define OVR_ELM(ctx) \
  1163. CNTR_ELEM("RcvHdrOvr" #ctx, \
  1164. (RCV_HDR_OVFL_CNT + ctx * 0x100), \
  1165. 0, CNTR_NORMAL, port_access_u64_csr)
  1166. /* 32bit TXE */
  1167. #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
  1168. CNTR_ELEM(#name, \
  1169. (counter * 8 + SEND_COUNTER_ARRAY32), \
  1170. 0, flags | CNTR_32BIT, \
  1171. port_access_u32_csr)
  1172. /* 64bit TXE */
  1173. #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
  1174. CNTR_ELEM(#name, \
  1175. (counter * 8 + SEND_COUNTER_ARRAY64), \
  1176. 0, flags, \
  1177. port_access_u64_csr)
  1178. # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
  1179. CNTR_ELEM(#name,\
  1180. counter * 8 + SEND_COUNTER_ARRAY64, \
  1181. 0, \
  1182. flags, \
  1183. dev_access_u64_csr)
  1184. /* CCE */
  1185. #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
  1186. CNTR_ELEM(#name, \
  1187. (counter * 8 + CCE_COUNTER_ARRAY32), \
  1188. 0, flags | CNTR_32BIT, \
  1189. dev_access_u32_csr)
  1190. #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
  1191. CNTR_ELEM(#name, \
  1192. (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
  1193. 0, flags | CNTR_32BIT, \
  1194. dev_access_u32_csr)
  1195. /* DC */
  1196. #define DC_PERF_CNTR(name, counter, flags) \
  1197. CNTR_ELEM(#name, \
  1198. counter, \
  1199. 0, \
  1200. flags, \
  1201. dev_access_u64_csr)
  1202. #define DC_PERF_CNTR_LCB(name, counter, flags) \
  1203. CNTR_ELEM(#name, \
  1204. counter, \
  1205. 0, \
  1206. flags, \
  1207. dc_access_lcb_cntr)
  1208. /* ibp counters */
  1209. #define SW_IBP_CNTR(name, cntr) \
  1210. CNTR_ELEM(#name, \
  1211. 0, \
  1212. 0, \
  1213. CNTR_SYNTH, \
  1214. access_ibp_##cntr)
  1215. u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
  1216. {
  1217. if (dd->flags & HFI1_PRESENT) {
  1218. return readq((void __iomem *)dd->kregbase + offset);
  1219. }
  1220. return -1;
  1221. }
  1222. void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
  1223. {
  1224. if (dd->flags & HFI1_PRESENT)
  1225. writeq(value, (void __iomem *)dd->kregbase + offset);
  1226. }
  1227. void __iomem *get_csr_addr(
  1228. struct hfi1_devdata *dd,
  1229. u32 offset)
  1230. {
  1231. return (void __iomem *)dd->kregbase + offset;
  1232. }
  1233. static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
  1234. int mode, u64 value)
  1235. {
  1236. u64 ret;
  1237. if (mode == CNTR_MODE_R) {
  1238. ret = read_csr(dd, csr);
  1239. } else if (mode == CNTR_MODE_W) {
  1240. write_csr(dd, csr, value);
  1241. ret = value;
  1242. } else {
  1243. dd_dev_err(dd, "Invalid cntr register access mode");
  1244. return 0;
  1245. }
  1246. hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
  1247. return ret;
  1248. }
  1249. /* Dev Access */
  1250. static u64 dev_access_u32_csr(const struct cntr_entry *entry,
  1251. void *context, int vl, int mode, u64 data)
  1252. {
  1253. struct hfi1_devdata *dd = context;
  1254. u64 csr = entry->csr;
  1255. if (entry->flags & CNTR_SDMA) {
  1256. if (vl == CNTR_INVALID_VL)
  1257. return 0;
  1258. csr += 0x100 * vl;
  1259. } else {
  1260. if (vl != CNTR_INVALID_VL)
  1261. return 0;
  1262. }
  1263. return read_write_csr(dd, csr, mode, data);
  1264. }
  1265. static u64 access_sde_err_cnt(const struct cntr_entry *entry,
  1266. void *context, int idx, int mode, u64 data)
  1267. {
  1268. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1269. if (dd->per_sdma && idx < dd->num_sdma)
  1270. return dd->per_sdma[idx].err_cnt;
  1271. return 0;
  1272. }
  1273. static u64 access_sde_int_cnt(const struct cntr_entry *entry,
  1274. void *context, int idx, int mode, u64 data)
  1275. {
  1276. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1277. if (dd->per_sdma && idx < dd->num_sdma)
  1278. return dd->per_sdma[idx].sdma_int_cnt;
  1279. return 0;
  1280. }
  1281. static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
  1282. void *context, int idx, int mode, u64 data)
  1283. {
  1284. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1285. if (dd->per_sdma && idx < dd->num_sdma)
  1286. return dd->per_sdma[idx].idle_int_cnt;
  1287. return 0;
  1288. }
  1289. static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
  1290. void *context, int idx, int mode,
  1291. u64 data)
  1292. {
  1293. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1294. if (dd->per_sdma && idx < dd->num_sdma)
  1295. return dd->per_sdma[idx].progress_int_cnt;
  1296. return 0;
  1297. }
  1298. static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
  1299. int vl, int mode, u64 data)
  1300. {
  1301. struct hfi1_devdata *dd = context;
  1302. u64 val = 0;
  1303. u64 csr = entry->csr;
  1304. if (entry->flags & CNTR_VL) {
  1305. if (vl == CNTR_INVALID_VL)
  1306. return 0;
  1307. csr += 8 * vl;
  1308. } else {
  1309. if (vl != CNTR_INVALID_VL)
  1310. return 0;
  1311. }
  1312. val = read_write_csr(dd, csr, mode, data);
  1313. return val;
  1314. }
  1315. static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
  1316. int vl, int mode, u64 data)
  1317. {
  1318. struct hfi1_devdata *dd = context;
  1319. u32 csr = entry->csr;
  1320. int ret = 0;
  1321. if (vl != CNTR_INVALID_VL)
  1322. return 0;
  1323. if (mode == CNTR_MODE_R)
  1324. ret = read_lcb_csr(dd, csr, &data);
  1325. else if (mode == CNTR_MODE_W)
  1326. ret = write_lcb_csr(dd, csr, data);
  1327. if (ret) {
  1328. dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
  1329. return 0;
  1330. }
  1331. hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
  1332. return data;
  1333. }
  1334. /* Port Access */
  1335. static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
  1336. int vl, int mode, u64 data)
  1337. {
  1338. struct hfi1_pportdata *ppd = context;
  1339. if (vl != CNTR_INVALID_VL)
  1340. return 0;
  1341. return read_write_csr(ppd->dd, entry->csr, mode, data);
  1342. }
  1343. static u64 port_access_u64_csr(const struct cntr_entry *entry,
  1344. void *context, int vl, int mode, u64 data)
  1345. {
  1346. struct hfi1_pportdata *ppd = context;
  1347. u64 val;
  1348. u64 csr = entry->csr;
  1349. if (entry->flags & CNTR_VL) {
  1350. if (vl == CNTR_INVALID_VL)
  1351. return 0;
  1352. csr += 8 * vl;
  1353. } else {
  1354. if (vl != CNTR_INVALID_VL)
  1355. return 0;
  1356. }
  1357. val = read_write_csr(ppd->dd, csr, mode, data);
  1358. return val;
  1359. }
  1360. /* Software defined */
  1361. static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
  1362. u64 data)
  1363. {
  1364. u64 ret;
  1365. if (mode == CNTR_MODE_R) {
  1366. ret = *cntr;
  1367. } else if (mode == CNTR_MODE_W) {
  1368. *cntr = data;
  1369. ret = data;
  1370. } else {
  1371. dd_dev_err(dd, "Invalid cntr sw access mode");
  1372. return 0;
  1373. }
  1374. hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
  1375. return ret;
  1376. }
  1377. static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
  1378. int vl, int mode, u64 data)
  1379. {
  1380. struct hfi1_pportdata *ppd = context;
  1381. if (vl != CNTR_INVALID_VL)
  1382. return 0;
  1383. return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
  1384. }
  1385. static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
  1386. int vl, int mode, u64 data)
  1387. {
  1388. struct hfi1_pportdata *ppd = context;
  1389. if (vl != CNTR_INVALID_VL)
  1390. return 0;
  1391. return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
  1392. }
  1393. static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
  1394. void *context, int vl, int mode,
  1395. u64 data)
  1396. {
  1397. struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
  1398. if (vl != CNTR_INVALID_VL)
  1399. return 0;
  1400. return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
  1401. }
  1402. static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
  1403. void *context, int vl, int mode, u64 data)
  1404. {
  1405. struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
  1406. u64 zero = 0;
  1407. u64 *counter;
  1408. if (vl == CNTR_INVALID_VL)
  1409. counter = &ppd->port_xmit_discards;
  1410. else if (vl >= 0 && vl < C_VL_COUNT)
  1411. counter = &ppd->port_xmit_discards_vl[vl];
  1412. else
  1413. counter = &zero;
  1414. return read_write_sw(ppd->dd, counter, mode, data);
  1415. }
  1416. static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
  1417. void *context, int vl, int mode,
  1418. u64 data)
  1419. {
  1420. struct hfi1_pportdata *ppd = context;
  1421. if (vl != CNTR_INVALID_VL)
  1422. return 0;
  1423. return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
  1424. mode, data);
  1425. }
  1426. static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
  1427. void *context, int vl, int mode, u64 data)
  1428. {
  1429. struct hfi1_pportdata *ppd = context;
  1430. if (vl != CNTR_INVALID_VL)
  1431. return 0;
  1432. return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
  1433. mode, data);
  1434. }
  1435. u64 get_all_cpu_total(u64 __percpu *cntr)
  1436. {
  1437. int cpu;
  1438. u64 counter = 0;
  1439. for_each_possible_cpu(cpu)
  1440. counter += *per_cpu_ptr(cntr, cpu);
  1441. return counter;
  1442. }
  1443. static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
  1444. u64 __percpu *cntr,
  1445. int vl, int mode, u64 data)
  1446. {
  1447. u64 ret = 0;
  1448. if (vl != CNTR_INVALID_VL)
  1449. return 0;
  1450. if (mode == CNTR_MODE_R) {
  1451. ret = get_all_cpu_total(cntr) - *z_val;
  1452. } else if (mode == CNTR_MODE_W) {
  1453. /* A write can only zero the counter */
  1454. if (data == 0)
  1455. *z_val = get_all_cpu_total(cntr);
  1456. else
  1457. dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
  1458. } else {
  1459. dd_dev_err(dd, "Invalid cntr sw cpu access mode");
  1460. return 0;
  1461. }
  1462. return ret;
  1463. }
  1464. static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
  1465. void *context, int vl, int mode, u64 data)
  1466. {
  1467. struct hfi1_devdata *dd = context;
  1468. return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
  1469. mode, data);
  1470. }
  1471. static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
  1472. void *context, int vl, int mode, u64 data)
  1473. {
  1474. struct hfi1_devdata *dd = context;
  1475. return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
  1476. mode, data);
  1477. }
  1478. static u64 access_sw_pio_wait(const struct cntr_entry *entry,
  1479. void *context, int vl, int mode, u64 data)
  1480. {
  1481. struct hfi1_devdata *dd = context;
  1482. return dd->verbs_dev.n_piowait;
  1483. }
  1484. static u64 access_sw_pio_drain(const struct cntr_entry *entry,
  1485. void *context, int vl, int mode, u64 data)
  1486. {
  1487. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1488. return dd->verbs_dev.n_piodrain;
  1489. }
  1490. static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
  1491. void *context, int vl, int mode, u64 data)
  1492. {
  1493. struct hfi1_devdata *dd = context;
  1494. return dd->verbs_dev.n_txwait;
  1495. }
  1496. static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
  1497. void *context, int vl, int mode, u64 data)
  1498. {
  1499. struct hfi1_devdata *dd = context;
  1500. return dd->verbs_dev.n_kmem_wait;
  1501. }
  1502. static u64 access_sw_send_schedule(const struct cntr_entry *entry,
  1503. void *context, int vl, int mode, u64 data)
  1504. {
  1505. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1506. return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
  1507. mode, data);
  1508. }
  1509. /* Software counters for the error status bits within MISC_ERR_STATUS */
  1510. static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
  1511. void *context, int vl, int mode,
  1512. u64 data)
  1513. {
  1514. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1515. return dd->misc_err_status_cnt[12];
  1516. }
  1517. static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
  1518. void *context, int vl, int mode,
  1519. u64 data)
  1520. {
  1521. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1522. return dd->misc_err_status_cnt[11];
  1523. }
  1524. static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
  1525. void *context, int vl, int mode,
  1526. u64 data)
  1527. {
  1528. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1529. return dd->misc_err_status_cnt[10];
  1530. }
  1531. static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
  1532. void *context, int vl,
  1533. int mode, u64 data)
  1534. {
  1535. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1536. return dd->misc_err_status_cnt[9];
  1537. }
  1538. static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
  1539. void *context, int vl, int mode,
  1540. u64 data)
  1541. {
  1542. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1543. return dd->misc_err_status_cnt[8];
  1544. }
  1545. static u64 access_misc_efuse_read_bad_addr_err_cnt(
  1546. const struct cntr_entry *entry,
  1547. void *context, int vl, int mode, u64 data)
  1548. {
  1549. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1550. return dd->misc_err_status_cnt[7];
  1551. }
  1552. static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
  1553. void *context, int vl,
  1554. int mode, u64 data)
  1555. {
  1556. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1557. return dd->misc_err_status_cnt[6];
  1558. }
  1559. static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
  1560. void *context, int vl, int mode,
  1561. u64 data)
  1562. {
  1563. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1564. return dd->misc_err_status_cnt[5];
  1565. }
  1566. static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
  1567. void *context, int vl, int mode,
  1568. u64 data)
  1569. {
  1570. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1571. return dd->misc_err_status_cnt[4];
  1572. }
  1573. static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
  1574. void *context, int vl,
  1575. int mode, u64 data)
  1576. {
  1577. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1578. return dd->misc_err_status_cnt[3];
  1579. }
  1580. static u64 access_misc_csr_write_bad_addr_err_cnt(
  1581. const struct cntr_entry *entry,
  1582. void *context, int vl, int mode, u64 data)
  1583. {
  1584. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1585. return dd->misc_err_status_cnt[2];
  1586. }
  1587. static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
  1588. void *context, int vl,
  1589. int mode, u64 data)
  1590. {
  1591. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1592. return dd->misc_err_status_cnt[1];
  1593. }
  1594. static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
  1595. void *context, int vl, int mode,
  1596. u64 data)
  1597. {
  1598. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1599. return dd->misc_err_status_cnt[0];
  1600. }
  1601. /*
  1602. * Software counter for the aggregate of
  1603. * individual CceErrStatus counters
  1604. */
  1605. static u64 access_sw_cce_err_status_aggregated_cnt(
  1606. const struct cntr_entry *entry,
  1607. void *context, int vl, int mode, u64 data)
  1608. {
  1609. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1610. return dd->sw_cce_err_status_aggregate;
  1611. }
  1612. /*
  1613. * Software counters corresponding to each of the
  1614. * error status bits within CceErrStatus
  1615. */
  1616. static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
  1617. void *context, int vl, int mode,
  1618. u64 data)
  1619. {
  1620. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1621. return dd->cce_err_status_cnt[40];
  1622. }
  1623. static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
  1624. void *context, int vl, int mode,
  1625. u64 data)
  1626. {
  1627. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1628. return dd->cce_err_status_cnt[39];
  1629. }
  1630. static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
  1631. void *context, int vl, int mode,
  1632. u64 data)
  1633. {
  1634. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1635. return dd->cce_err_status_cnt[38];
  1636. }
  1637. static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
  1638. void *context, int vl, int mode,
  1639. u64 data)
  1640. {
  1641. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1642. return dd->cce_err_status_cnt[37];
  1643. }
  1644. static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
  1645. void *context, int vl, int mode,
  1646. u64 data)
  1647. {
  1648. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1649. return dd->cce_err_status_cnt[36];
  1650. }
  1651. static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
  1652. const struct cntr_entry *entry,
  1653. void *context, int vl, int mode, u64 data)
  1654. {
  1655. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1656. return dd->cce_err_status_cnt[35];
  1657. }
  1658. static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
  1659. const struct cntr_entry *entry,
  1660. void *context, int vl, int mode, u64 data)
  1661. {
  1662. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1663. return dd->cce_err_status_cnt[34];
  1664. }
  1665. static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
  1666. void *context, int vl,
  1667. int mode, u64 data)
  1668. {
  1669. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1670. return dd->cce_err_status_cnt[33];
  1671. }
  1672. static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
  1673. void *context, int vl, int mode,
  1674. u64 data)
  1675. {
  1676. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1677. return dd->cce_err_status_cnt[32];
  1678. }
  1679. static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
  1680. void *context, int vl, int mode, u64 data)
  1681. {
  1682. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1683. return dd->cce_err_status_cnt[31];
  1684. }
  1685. static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
  1686. void *context, int vl, int mode,
  1687. u64 data)
  1688. {
  1689. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1690. return dd->cce_err_status_cnt[30];
  1691. }
  1692. static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
  1693. void *context, int vl, int mode,
  1694. u64 data)
  1695. {
  1696. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1697. return dd->cce_err_status_cnt[29];
  1698. }
  1699. static u64 access_pcic_transmit_back_parity_err_cnt(
  1700. const struct cntr_entry *entry,
  1701. void *context, int vl, int mode, u64 data)
  1702. {
  1703. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1704. return dd->cce_err_status_cnt[28];
  1705. }
  1706. static u64 access_pcic_transmit_front_parity_err_cnt(
  1707. const struct cntr_entry *entry,
  1708. void *context, int vl, int mode, u64 data)
  1709. {
  1710. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1711. return dd->cce_err_status_cnt[27];
  1712. }
  1713. static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
  1714. void *context, int vl, int mode,
  1715. u64 data)
  1716. {
  1717. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1718. return dd->cce_err_status_cnt[26];
  1719. }
  1720. static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
  1721. void *context, int vl, int mode,
  1722. u64 data)
  1723. {
  1724. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1725. return dd->cce_err_status_cnt[25];
  1726. }
  1727. static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
  1728. void *context, int vl, int mode,
  1729. u64 data)
  1730. {
  1731. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1732. return dd->cce_err_status_cnt[24];
  1733. }
  1734. static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
  1735. void *context, int vl, int mode,
  1736. u64 data)
  1737. {
  1738. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1739. return dd->cce_err_status_cnt[23];
  1740. }
  1741. static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
  1742. void *context, int vl,
  1743. int mode, u64 data)
  1744. {
  1745. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1746. return dd->cce_err_status_cnt[22];
  1747. }
  1748. static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
  1749. void *context, int vl, int mode,
  1750. u64 data)
  1751. {
  1752. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1753. return dd->cce_err_status_cnt[21];
  1754. }
  1755. static u64 access_pcic_n_post_dat_q_parity_err_cnt(
  1756. const struct cntr_entry *entry,
  1757. void *context, int vl, int mode, u64 data)
  1758. {
  1759. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1760. return dd->cce_err_status_cnt[20];
  1761. }
  1762. static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
  1763. void *context, int vl,
  1764. int mode, u64 data)
  1765. {
  1766. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1767. return dd->cce_err_status_cnt[19];
  1768. }
  1769. static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
  1770. void *context, int vl, int mode,
  1771. u64 data)
  1772. {
  1773. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1774. return dd->cce_err_status_cnt[18];
  1775. }
  1776. static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
  1777. void *context, int vl, int mode,
  1778. u64 data)
  1779. {
  1780. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1781. return dd->cce_err_status_cnt[17];
  1782. }
  1783. static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
  1784. void *context, int vl, int mode,
  1785. u64 data)
  1786. {
  1787. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1788. return dd->cce_err_status_cnt[16];
  1789. }
  1790. static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
  1791. void *context, int vl, int mode,
  1792. u64 data)
  1793. {
  1794. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1795. return dd->cce_err_status_cnt[15];
  1796. }
  1797. static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
  1798. void *context, int vl,
  1799. int mode, u64 data)
  1800. {
  1801. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1802. return dd->cce_err_status_cnt[14];
  1803. }
  1804. static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
  1805. void *context, int vl, int mode,
  1806. u64 data)
  1807. {
  1808. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1809. return dd->cce_err_status_cnt[13];
  1810. }
  1811. static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
  1812. const struct cntr_entry *entry,
  1813. void *context, int vl, int mode, u64 data)
  1814. {
  1815. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1816. return dd->cce_err_status_cnt[12];
  1817. }
  1818. static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
  1819. const struct cntr_entry *entry,
  1820. void *context, int vl, int mode, u64 data)
  1821. {
  1822. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1823. return dd->cce_err_status_cnt[11];
  1824. }
  1825. static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
  1826. const struct cntr_entry *entry,
  1827. void *context, int vl, int mode, u64 data)
  1828. {
  1829. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1830. return dd->cce_err_status_cnt[10];
  1831. }
  1832. static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
  1833. const struct cntr_entry *entry,
  1834. void *context, int vl, int mode, u64 data)
  1835. {
  1836. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1837. return dd->cce_err_status_cnt[9];
  1838. }
  1839. static u64 access_cce_cli2_async_fifo_parity_err_cnt(
  1840. const struct cntr_entry *entry,
  1841. void *context, int vl, int mode, u64 data)
  1842. {
  1843. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1844. return dd->cce_err_status_cnt[8];
  1845. }
  1846. static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
  1847. void *context, int vl,
  1848. int mode, u64 data)
  1849. {
  1850. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1851. return dd->cce_err_status_cnt[7];
  1852. }
  1853. static u64 access_cce_cli0_async_fifo_parity_err_cnt(
  1854. const struct cntr_entry *entry,
  1855. void *context, int vl, int mode, u64 data)
  1856. {
  1857. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1858. return dd->cce_err_status_cnt[6];
  1859. }
  1860. static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
  1861. void *context, int vl, int mode,
  1862. u64 data)
  1863. {
  1864. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1865. return dd->cce_err_status_cnt[5];
  1866. }
  1867. static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
  1868. void *context, int vl, int mode,
  1869. u64 data)
  1870. {
  1871. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1872. return dd->cce_err_status_cnt[4];
  1873. }
  1874. static u64 access_cce_trgt_async_fifo_parity_err_cnt(
  1875. const struct cntr_entry *entry,
  1876. void *context, int vl, int mode, u64 data)
  1877. {
  1878. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1879. return dd->cce_err_status_cnt[3];
  1880. }
  1881. static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
  1882. void *context, int vl,
  1883. int mode, u64 data)
  1884. {
  1885. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1886. return dd->cce_err_status_cnt[2];
  1887. }
  1888. static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
  1889. void *context, int vl,
  1890. int mode, u64 data)
  1891. {
  1892. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1893. return dd->cce_err_status_cnt[1];
  1894. }
  1895. static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
  1896. void *context, int vl, int mode,
  1897. u64 data)
  1898. {
  1899. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1900. return dd->cce_err_status_cnt[0];
  1901. }
  1902. /*
  1903. * Software counters corresponding to each of the
  1904. * error status bits within RcvErrStatus
  1905. */
  1906. static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
  1907. void *context, int vl, int mode,
  1908. u64 data)
  1909. {
  1910. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1911. return dd->rcv_err_status_cnt[63];
  1912. }
  1913. static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
  1914. void *context, int vl,
  1915. int mode, u64 data)
  1916. {
  1917. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1918. return dd->rcv_err_status_cnt[62];
  1919. }
  1920. static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
  1921. void *context, int vl, int mode,
  1922. u64 data)
  1923. {
  1924. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1925. return dd->rcv_err_status_cnt[61];
  1926. }
  1927. static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
  1928. void *context, int vl, int mode,
  1929. u64 data)
  1930. {
  1931. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1932. return dd->rcv_err_status_cnt[60];
  1933. }
  1934. static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
  1935. void *context, int vl,
  1936. int mode, u64 data)
  1937. {
  1938. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1939. return dd->rcv_err_status_cnt[59];
  1940. }
  1941. static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
  1942. void *context, int vl,
  1943. int mode, u64 data)
  1944. {
  1945. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1946. return dd->rcv_err_status_cnt[58];
  1947. }
  1948. static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
  1949. void *context, int vl, int mode,
  1950. u64 data)
  1951. {
  1952. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1953. return dd->rcv_err_status_cnt[57];
  1954. }
  1955. static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
  1956. void *context, int vl, int mode,
  1957. u64 data)
  1958. {
  1959. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1960. return dd->rcv_err_status_cnt[56];
  1961. }
  1962. static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
  1963. void *context, int vl, int mode,
  1964. u64 data)
  1965. {
  1966. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1967. return dd->rcv_err_status_cnt[55];
  1968. }
  1969. static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
  1970. const struct cntr_entry *entry,
  1971. void *context, int vl, int mode, u64 data)
  1972. {
  1973. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1974. return dd->rcv_err_status_cnt[54];
  1975. }
  1976. static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
  1977. const struct cntr_entry *entry,
  1978. void *context, int vl, int mode, u64 data)
  1979. {
  1980. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1981. return dd->rcv_err_status_cnt[53];
  1982. }
  1983. static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
  1984. void *context, int vl,
  1985. int mode, u64 data)
  1986. {
  1987. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1988. return dd->rcv_err_status_cnt[52];
  1989. }
  1990. static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
  1991. void *context, int vl,
  1992. int mode, u64 data)
  1993. {
  1994. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1995. return dd->rcv_err_status_cnt[51];
  1996. }
  1997. static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
  1998. void *context, int vl,
  1999. int mode, u64 data)
  2000. {
  2001. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2002. return dd->rcv_err_status_cnt[50];
  2003. }
  2004. static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
  2005. void *context, int vl,
  2006. int mode, u64 data)
  2007. {
  2008. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2009. return dd->rcv_err_status_cnt[49];
  2010. }
  2011. static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
  2012. void *context, int vl,
  2013. int mode, u64 data)
  2014. {
  2015. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2016. return dd->rcv_err_status_cnt[48];
  2017. }
  2018. static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
  2019. void *context, int vl,
  2020. int mode, u64 data)
  2021. {
  2022. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2023. return dd->rcv_err_status_cnt[47];
  2024. }
  2025. static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
  2026. void *context, int vl, int mode,
  2027. u64 data)
  2028. {
  2029. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2030. return dd->rcv_err_status_cnt[46];
  2031. }
  2032. static u64 access_rx_hq_intr_csr_parity_err_cnt(
  2033. const struct cntr_entry *entry,
  2034. void *context, int vl, int mode, u64 data)
  2035. {
  2036. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2037. return dd->rcv_err_status_cnt[45];
  2038. }
  2039. static u64 access_rx_lookup_csr_parity_err_cnt(
  2040. const struct cntr_entry *entry,
  2041. void *context, int vl, int mode, u64 data)
  2042. {
  2043. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2044. return dd->rcv_err_status_cnt[44];
  2045. }
  2046. static u64 access_rx_lookup_rcv_array_cor_err_cnt(
  2047. const struct cntr_entry *entry,
  2048. void *context, int vl, int mode, u64 data)
  2049. {
  2050. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2051. return dd->rcv_err_status_cnt[43];
  2052. }
  2053. static u64 access_rx_lookup_rcv_array_unc_err_cnt(
  2054. const struct cntr_entry *entry,
  2055. void *context, int vl, int mode, u64 data)
  2056. {
  2057. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2058. return dd->rcv_err_status_cnt[42];
  2059. }
  2060. static u64 access_rx_lookup_des_part2_parity_err_cnt(
  2061. const struct cntr_entry *entry,
  2062. void *context, int vl, int mode, u64 data)
  2063. {
  2064. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2065. return dd->rcv_err_status_cnt[41];
  2066. }
  2067. static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
  2068. const struct cntr_entry *entry,
  2069. void *context, int vl, int mode, u64 data)
  2070. {
  2071. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2072. return dd->rcv_err_status_cnt[40];
  2073. }
  2074. static u64 access_rx_lookup_des_part1_unc_err_cnt(
  2075. const struct cntr_entry *entry,
  2076. void *context, int vl, int mode, u64 data)
  2077. {
  2078. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2079. return dd->rcv_err_status_cnt[39];
  2080. }
  2081. static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
  2082. const struct cntr_entry *entry,
  2083. void *context, int vl, int mode, u64 data)
  2084. {
  2085. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2086. return dd->rcv_err_status_cnt[38];
  2087. }
  2088. static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
  2089. const struct cntr_entry *entry,
  2090. void *context, int vl, int mode, u64 data)
  2091. {
  2092. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2093. return dd->rcv_err_status_cnt[37];
  2094. }
  2095. static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
  2096. const struct cntr_entry *entry,
  2097. void *context, int vl, int mode, u64 data)
  2098. {
  2099. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2100. return dd->rcv_err_status_cnt[36];
  2101. }
  2102. static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
  2103. const struct cntr_entry *entry,
  2104. void *context, int vl, int mode, u64 data)
  2105. {
  2106. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2107. return dd->rcv_err_status_cnt[35];
  2108. }
  2109. static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
  2110. const struct cntr_entry *entry,
  2111. void *context, int vl, int mode, u64 data)
  2112. {
  2113. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2114. return dd->rcv_err_status_cnt[34];
  2115. }
  2116. static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
  2117. const struct cntr_entry *entry,
  2118. void *context, int vl, int mode, u64 data)
  2119. {
  2120. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2121. return dd->rcv_err_status_cnt[33];
  2122. }
  2123. static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
  2124. void *context, int vl, int mode,
  2125. u64 data)
  2126. {
  2127. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2128. return dd->rcv_err_status_cnt[32];
  2129. }
  2130. static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
  2131. void *context, int vl, int mode,
  2132. u64 data)
  2133. {
  2134. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2135. return dd->rcv_err_status_cnt[31];
  2136. }
  2137. static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
  2138. void *context, int vl, int mode,
  2139. u64 data)
  2140. {
  2141. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2142. return dd->rcv_err_status_cnt[30];
  2143. }
  2144. static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
  2145. void *context, int vl, int mode,
  2146. u64 data)
  2147. {
  2148. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2149. return dd->rcv_err_status_cnt[29];
  2150. }
  2151. static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
  2152. void *context, int vl,
  2153. int mode, u64 data)
  2154. {
  2155. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2156. return dd->rcv_err_status_cnt[28];
  2157. }
  2158. static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
  2159. const struct cntr_entry *entry,
  2160. void *context, int vl, int mode, u64 data)
  2161. {
  2162. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2163. return dd->rcv_err_status_cnt[27];
  2164. }
  2165. static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
  2166. const struct cntr_entry *entry,
  2167. void *context, int vl, int mode, u64 data)
  2168. {
  2169. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2170. return dd->rcv_err_status_cnt[26];
  2171. }
  2172. static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
  2173. const struct cntr_entry *entry,
  2174. void *context, int vl, int mode, u64 data)
  2175. {
  2176. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2177. return dd->rcv_err_status_cnt[25];
  2178. }
  2179. static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
  2180. const struct cntr_entry *entry,
  2181. void *context, int vl, int mode, u64 data)
  2182. {
  2183. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2184. return dd->rcv_err_status_cnt[24];
  2185. }
  2186. static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
  2187. const struct cntr_entry *entry,
  2188. void *context, int vl, int mode, u64 data)
  2189. {
  2190. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2191. return dd->rcv_err_status_cnt[23];
  2192. }
  2193. static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
  2194. const struct cntr_entry *entry,
  2195. void *context, int vl, int mode, u64 data)
  2196. {
  2197. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2198. return dd->rcv_err_status_cnt[22];
  2199. }
  2200. static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
  2201. const struct cntr_entry *entry,
  2202. void *context, int vl, int mode, u64 data)
  2203. {
  2204. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2205. return dd->rcv_err_status_cnt[21];
  2206. }
  2207. static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
  2208. const struct cntr_entry *entry,
  2209. void *context, int vl, int mode, u64 data)
  2210. {
  2211. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2212. return dd->rcv_err_status_cnt[20];
  2213. }
  2214. static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
  2215. const struct cntr_entry *entry,
  2216. void *context, int vl, int mode, u64 data)
  2217. {
  2218. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2219. return dd->rcv_err_status_cnt[19];
  2220. }
  2221. static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
  2222. void *context, int vl,
  2223. int mode, u64 data)
  2224. {
  2225. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2226. return dd->rcv_err_status_cnt[18];
  2227. }
  2228. static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
  2229. void *context, int vl,
  2230. int mode, u64 data)
  2231. {
  2232. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2233. return dd->rcv_err_status_cnt[17];
  2234. }
  2235. static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
  2236. const struct cntr_entry *entry,
  2237. void *context, int vl, int mode, u64 data)
  2238. {
  2239. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2240. return dd->rcv_err_status_cnt[16];
  2241. }
  2242. static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
  2243. const struct cntr_entry *entry,
  2244. void *context, int vl, int mode, u64 data)
  2245. {
  2246. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2247. return dd->rcv_err_status_cnt[15];
  2248. }
  2249. static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
  2250. void *context, int vl,
  2251. int mode, u64 data)
  2252. {
  2253. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2254. return dd->rcv_err_status_cnt[14];
  2255. }
  2256. static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
  2257. void *context, int vl,
  2258. int mode, u64 data)
  2259. {
  2260. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2261. return dd->rcv_err_status_cnt[13];
  2262. }
  2263. static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
  2264. void *context, int vl, int mode,
  2265. u64 data)
  2266. {
  2267. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2268. return dd->rcv_err_status_cnt[12];
  2269. }
  2270. static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
  2271. void *context, int vl, int mode,
  2272. u64 data)
  2273. {
  2274. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2275. return dd->rcv_err_status_cnt[11];
  2276. }
  2277. static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
  2278. void *context, int vl, int mode,
  2279. u64 data)
  2280. {
  2281. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2282. return dd->rcv_err_status_cnt[10];
  2283. }
  2284. static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
  2285. void *context, int vl, int mode,
  2286. u64 data)
  2287. {
  2288. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2289. return dd->rcv_err_status_cnt[9];
  2290. }
  2291. static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
  2292. void *context, int vl, int mode,
  2293. u64 data)
  2294. {
  2295. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2296. return dd->rcv_err_status_cnt[8];
  2297. }
  2298. static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
  2299. const struct cntr_entry *entry,
  2300. void *context, int vl, int mode, u64 data)
  2301. {
  2302. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2303. return dd->rcv_err_status_cnt[7];
  2304. }
  2305. static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
  2306. const struct cntr_entry *entry,
  2307. void *context, int vl, int mode, u64 data)
  2308. {
  2309. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2310. return dd->rcv_err_status_cnt[6];
  2311. }
  2312. static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
  2313. void *context, int vl, int mode,
  2314. u64 data)
  2315. {
  2316. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2317. return dd->rcv_err_status_cnt[5];
  2318. }
  2319. static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
  2320. void *context, int vl, int mode,
  2321. u64 data)
  2322. {
  2323. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2324. return dd->rcv_err_status_cnt[4];
  2325. }
  2326. static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
  2327. void *context, int vl, int mode,
  2328. u64 data)
  2329. {
  2330. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2331. return dd->rcv_err_status_cnt[3];
  2332. }
  2333. static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
  2334. void *context, int vl, int mode,
  2335. u64 data)
  2336. {
  2337. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2338. return dd->rcv_err_status_cnt[2];
  2339. }
  2340. static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
  2341. void *context, int vl, int mode,
  2342. u64 data)
  2343. {
  2344. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2345. return dd->rcv_err_status_cnt[1];
  2346. }
  2347. static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
  2348. void *context, int vl, int mode,
  2349. u64 data)
  2350. {
  2351. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2352. return dd->rcv_err_status_cnt[0];
  2353. }
  2354. /*
  2355. * Software counters corresponding to each of the
  2356. * error status bits within SendPioErrStatus
  2357. */
  2358. static u64 access_pio_pec_sop_head_parity_err_cnt(
  2359. const struct cntr_entry *entry,
  2360. void *context, int vl, int mode, u64 data)
  2361. {
  2362. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2363. return dd->send_pio_err_status_cnt[35];
  2364. }
  2365. static u64 access_pio_pcc_sop_head_parity_err_cnt(
  2366. const struct cntr_entry *entry,
  2367. void *context, int vl, int mode, u64 data)
  2368. {
  2369. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2370. return dd->send_pio_err_status_cnt[34];
  2371. }
  2372. static u64 access_pio_last_returned_cnt_parity_err_cnt(
  2373. const struct cntr_entry *entry,
  2374. void *context, int vl, int mode, u64 data)
  2375. {
  2376. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2377. return dd->send_pio_err_status_cnt[33];
  2378. }
  2379. static u64 access_pio_current_free_cnt_parity_err_cnt(
  2380. const struct cntr_entry *entry,
  2381. void *context, int vl, int mode, u64 data)
  2382. {
  2383. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2384. return dd->send_pio_err_status_cnt[32];
  2385. }
  2386. static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
  2387. void *context, int vl, int mode,
  2388. u64 data)
  2389. {
  2390. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2391. return dd->send_pio_err_status_cnt[31];
  2392. }
  2393. static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
  2394. void *context, int vl, int mode,
  2395. u64 data)
  2396. {
  2397. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2398. return dd->send_pio_err_status_cnt[30];
  2399. }
  2400. static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
  2401. void *context, int vl, int mode,
  2402. u64 data)
  2403. {
  2404. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2405. return dd->send_pio_err_status_cnt[29];
  2406. }
  2407. static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
  2408. const struct cntr_entry *entry,
  2409. void *context, int vl, int mode, u64 data)
  2410. {
  2411. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2412. return dd->send_pio_err_status_cnt[28];
  2413. }
  2414. static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
  2415. void *context, int vl, int mode,
  2416. u64 data)
  2417. {
  2418. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2419. return dd->send_pio_err_status_cnt[27];
  2420. }
  2421. static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
  2422. void *context, int vl, int mode,
  2423. u64 data)
  2424. {
  2425. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2426. return dd->send_pio_err_status_cnt[26];
  2427. }
  2428. static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
  2429. void *context, int vl,
  2430. int mode, u64 data)
  2431. {
  2432. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2433. return dd->send_pio_err_status_cnt[25];
  2434. }
  2435. static u64 access_pio_block_qw_count_parity_err_cnt(
  2436. const struct cntr_entry *entry,
  2437. void *context, int vl, int mode, u64 data)
  2438. {
  2439. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2440. return dd->send_pio_err_status_cnt[24];
  2441. }
  2442. static u64 access_pio_write_qw_valid_parity_err_cnt(
  2443. const struct cntr_entry *entry,
  2444. void *context, int vl, int mode, u64 data)
  2445. {
  2446. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2447. return dd->send_pio_err_status_cnt[23];
  2448. }
  2449. static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
  2450. void *context, int vl, int mode,
  2451. u64 data)
  2452. {
  2453. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2454. return dd->send_pio_err_status_cnt[22];
  2455. }
  2456. static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
  2457. void *context, int vl,
  2458. int mode, u64 data)
  2459. {
  2460. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2461. return dd->send_pio_err_status_cnt[21];
  2462. }
  2463. static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
  2464. void *context, int vl,
  2465. int mode, u64 data)
  2466. {
  2467. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2468. return dd->send_pio_err_status_cnt[20];
  2469. }
  2470. static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
  2471. void *context, int vl,
  2472. int mode, u64 data)
  2473. {
  2474. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2475. return dd->send_pio_err_status_cnt[19];
  2476. }
  2477. static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
  2478. const struct cntr_entry *entry,
  2479. void *context, int vl, int mode, u64 data)
  2480. {
  2481. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2482. return dd->send_pio_err_status_cnt[18];
  2483. }
  2484. static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
  2485. void *context, int vl, int mode,
  2486. u64 data)
  2487. {
  2488. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2489. return dd->send_pio_err_status_cnt[17];
  2490. }
  2491. static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
  2492. void *context, int vl, int mode,
  2493. u64 data)
  2494. {
  2495. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2496. return dd->send_pio_err_status_cnt[16];
  2497. }
  2498. static u64 access_pio_credit_ret_fifo_parity_err_cnt(
  2499. const struct cntr_entry *entry,
  2500. void *context, int vl, int mode, u64 data)
  2501. {
  2502. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2503. return dd->send_pio_err_status_cnt[15];
  2504. }
  2505. static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
  2506. const struct cntr_entry *entry,
  2507. void *context, int vl, int mode, u64 data)
  2508. {
  2509. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2510. return dd->send_pio_err_status_cnt[14];
  2511. }
  2512. static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
  2513. const struct cntr_entry *entry,
  2514. void *context, int vl, int mode, u64 data)
  2515. {
  2516. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2517. return dd->send_pio_err_status_cnt[13];
  2518. }
  2519. static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
  2520. const struct cntr_entry *entry,
  2521. void *context, int vl, int mode, u64 data)
  2522. {
  2523. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2524. return dd->send_pio_err_status_cnt[12];
  2525. }
  2526. static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
  2527. const struct cntr_entry *entry,
  2528. void *context, int vl, int mode, u64 data)
  2529. {
  2530. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2531. return dd->send_pio_err_status_cnt[11];
  2532. }
  2533. static u64 access_pio_sm_pkt_reset_parity_err_cnt(
  2534. const struct cntr_entry *entry,
  2535. void *context, int vl, int mode, u64 data)
  2536. {
  2537. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2538. return dd->send_pio_err_status_cnt[10];
  2539. }
  2540. static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
  2541. const struct cntr_entry *entry,
  2542. void *context, int vl, int mode, u64 data)
  2543. {
  2544. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2545. return dd->send_pio_err_status_cnt[9];
  2546. }
  2547. static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
  2548. const struct cntr_entry *entry,
  2549. void *context, int vl, int mode, u64 data)
  2550. {
  2551. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2552. return dd->send_pio_err_status_cnt[8];
  2553. }
  2554. static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
  2555. const struct cntr_entry *entry,
  2556. void *context, int vl, int mode, u64 data)
  2557. {
  2558. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2559. return dd->send_pio_err_status_cnt[7];
  2560. }
  2561. static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
  2562. void *context, int vl, int mode,
  2563. u64 data)
  2564. {
  2565. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2566. return dd->send_pio_err_status_cnt[6];
  2567. }
  2568. static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
  2569. void *context, int vl, int mode,
  2570. u64 data)
  2571. {
  2572. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2573. return dd->send_pio_err_status_cnt[5];
  2574. }
  2575. static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
  2576. void *context, int vl, int mode,
  2577. u64 data)
  2578. {
  2579. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2580. return dd->send_pio_err_status_cnt[4];
  2581. }
  2582. static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
  2583. void *context, int vl, int mode,
  2584. u64 data)
  2585. {
  2586. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2587. return dd->send_pio_err_status_cnt[3];
  2588. }
  2589. static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
  2590. void *context, int vl, int mode,
  2591. u64 data)
  2592. {
  2593. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2594. return dd->send_pio_err_status_cnt[2];
  2595. }
  2596. static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
  2597. void *context, int vl,
  2598. int mode, u64 data)
  2599. {
  2600. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2601. return dd->send_pio_err_status_cnt[1];
  2602. }
  2603. static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
  2604. void *context, int vl, int mode,
  2605. u64 data)
  2606. {
  2607. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2608. return dd->send_pio_err_status_cnt[0];
  2609. }
  2610. /*
  2611. * Software counters corresponding to each of the
  2612. * error status bits within SendDmaErrStatus
  2613. */
  2614. static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
  2615. const struct cntr_entry *entry,
  2616. void *context, int vl, int mode, u64 data)
  2617. {
  2618. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2619. return dd->send_dma_err_status_cnt[3];
  2620. }
  2621. static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
  2622. const struct cntr_entry *entry,
  2623. void *context, int vl, int mode, u64 data)
  2624. {
  2625. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2626. return dd->send_dma_err_status_cnt[2];
  2627. }
  2628. static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
  2629. void *context, int vl, int mode,
  2630. u64 data)
  2631. {
  2632. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2633. return dd->send_dma_err_status_cnt[1];
  2634. }
  2635. static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
  2636. void *context, int vl, int mode,
  2637. u64 data)
  2638. {
  2639. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2640. return dd->send_dma_err_status_cnt[0];
  2641. }
  2642. /*
  2643. * Software counters corresponding to each of the
  2644. * error status bits within SendEgressErrStatus
  2645. */
  2646. static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
  2647. const struct cntr_entry *entry,
  2648. void *context, int vl, int mode, u64 data)
  2649. {
  2650. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2651. return dd->send_egress_err_status_cnt[63];
  2652. }
  2653. static u64 access_tx_read_sdma_memory_csr_err_cnt(
  2654. const struct cntr_entry *entry,
  2655. void *context, int vl, int mode, u64 data)
  2656. {
  2657. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2658. return dd->send_egress_err_status_cnt[62];
  2659. }
  2660. static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
  2661. void *context, int vl, int mode,
  2662. u64 data)
  2663. {
  2664. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2665. return dd->send_egress_err_status_cnt[61];
  2666. }
  2667. static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
  2668. void *context, int vl,
  2669. int mode, u64 data)
  2670. {
  2671. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2672. return dd->send_egress_err_status_cnt[60];
  2673. }
  2674. static u64 access_tx_read_sdma_memory_cor_err_cnt(
  2675. const struct cntr_entry *entry,
  2676. void *context, int vl, int mode, u64 data)
  2677. {
  2678. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2679. return dd->send_egress_err_status_cnt[59];
  2680. }
  2681. static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
  2682. void *context, int vl, int mode,
  2683. u64 data)
  2684. {
  2685. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2686. return dd->send_egress_err_status_cnt[58];
  2687. }
  2688. static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
  2689. void *context, int vl, int mode,
  2690. u64 data)
  2691. {
  2692. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2693. return dd->send_egress_err_status_cnt[57];
  2694. }
  2695. static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
  2696. void *context, int vl, int mode,
  2697. u64 data)
  2698. {
  2699. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2700. return dd->send_egress_err_status_cnt[56];
  2701. }
  2702. static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
  2703. void *context, int vl, int mode,
  2704. u64 data)
  2705. {
  2706. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2707. return dd->send_egress_err_status_cnt[55];
  2708. }
  2709. static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
  2710. void *context, int vl, int mode,
  2711. u64 data)
  2712. {
  2713. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2714. return dd->send_egress_err_status_cnt[54];
  2715. }
  2716. static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
  2717. void *context, int vl, int mode,
  2718. u64 data)
  2719. {
  2720. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2721. return dd->send_egress_err_status_cnt[53];
  2722. }
  2723. static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
  2724. void *context, int vl, int mode,
  2725. u64 data)
  2726. {
  2727. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2728. return dd->send_egress_err_status_cnt[52];
  2729. }
  2730. static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
  2731. void *context, int vl, int mode,
  2732. u64 data)
  2733. {
  2734. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2735. return dd->send_egress_err_status_cnt[51];
  2736. }
  2737. static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
  2738. void *context, int vl, int mode,
  2739. u64 data)
  2740. {
  2741. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2742. return dd->send_egress_err_status_cnt[50];
  2743. }
  2744. static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
  2745. void *context, int vl, int mode,
  2746. u64 data)
  2747. {
  2748. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2749. return dd->send_egress_err_status_cnt[49];
  2750. }
  2751. static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
  2752. void *context, int vl, int mode,
  2753. u64 data)
  2754. {
  2755. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2756. return dd->send_egress_err_status_cnt[48];
  2757. }
  2758. static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
  2759. void *context, int vl, int mode,
  2760. u64 data)
  2761. {
  2762. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2763. return dd->send_egress_err_status_cnt[47];
  2764. }
  2765. static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
  2766. void *context, int vl, int mode,
  2767. u64 data)
  2768. {
  2769. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2770. return dd->send_egress_err_status_cnt[46];
  2771. }
  2772. static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
  2773. void *context, int vl, int mode,
  2774. u64 data)
  2775. {
  2776. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2777. return dd->send_egress_err_status_cnt[45];
  2778. }
  2779. static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
  2780. void *context, int vl,
  2781. int mode, u64 data)
  2782. {
  2783. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2784. return dd->send_egress_err_status_cnt[44];
  2785. }
  2786. static u64 access_tx_read_sdma_memory_unc_err_cnt(
  2787. const struct cntr_entry *entry,
  2788. void *context, int vl, int mode, u64 data)
  2789. {
  2790. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2791. return dd->send_egress_err_status_cnt[43];
  2792. }
  2793. static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
  2794. void *context, int vl, int mode,
  2795. u64 data)
  2796. {
  2797. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2798. return dd->send_egress_err_status_cnt[42];
  2799. }
  2800. static u64 access_tx_credit_return_partiy_err_cnt(
  2801. const struct cntr_entry *entry,
  2802. void *context, int vl, int mode, u64 data)
  2803. {
  2804. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2805. return dd->send_egress_err_status_cnt[41];
  2806. }
  2807. static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
  2808. const struct cntr_entry *entry,
  2809. void *context, int vl, int mode, u64 data)
  2810. {
  2811. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2812. return dd->send_egress_err_status_cnt[40];
  2813. }
  2814. static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
  2815. const struct cntr_entry *entry,
  2816. void *context, int vl, int mode, u64 data)
  2817. {
  2818. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2819. return dd->send_egress_err_status_cnt[39];
  2820. }
  2821. static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
  2822. const struct cntr_entry *entry,
  2823. void *context, int vl, int mode, u64 data)
  2824. {
  2825. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2826. return dd->send_egress_err_status_cnt[38];
  2827. }
  2828. static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
  2829. const struct cntr_entry *entry,
  2830. void *context, int vl, int mode, u64 data)
  2831. {
  2832. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2833. return dd->send_egress_err_status_cnt[37];
  2834. }
  2835. static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
  2836. const struct cntr_entry *entry,
  2837. void *context, int vl, int mode, u64 data)
  2838. {
  2839. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2840. return dd->send_egress_err_status_cnt[36];
  2841. }
  2842. static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
  2843. const struct cntr_entry *entry,
  2844. void *context, int vl, int mode, u64 data)
  2845. {
  2846. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2847. return dd->send_egress_err_status_cnt[35];
  2848. }
  2849. static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
  2850. const struct cntr_entry *entry,
  2851. void *context, int vl, int mode, u64 data)
  2852. {
  2853. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2854. return dd->send_egress_err_status_cnt[34];
  2855. }
  2856. static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
  2857. const struct cntr_entry *entry,
  2858. void *context, int vl, int mode, u64 data)
  2859. {
  2860. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2861. return dd->send_egress_err_status_cnt[33];
  2862. }
  2863. static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
  2864. const struct cntr_entry *entry,
  2865. void *context, int vl, int mode, u64 data)
  2866. {
  2867. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2868. return dd->send_egress_err_status_cnt[32];
  2869. }
  2870. static u64 access_tx_sdma15_disallowed_packet_err_cnt(
  2871. const struct cntr_entry *entry,
  2872. void *context, int vl, int mode, u64 data)
  2873. {
  2874. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2875. return dd->send_egress_err_status_cnt[31];
  2876. }
  2877. static u64 access_tx_sdma14_disallowed_packet_err_cnt(
  2878. const struct cntr_entry *entry,
  2879. void *context, int vl, int mode, u64 data)
  2880. {
  2881. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2882. return dd->send_egress_err_status_cnt[30];
  2883. }
  2884. static u64 access_tx_sdma13_disallowed_packet_err_cnt(
  2885. const struct cntr_entry *entry,
  2886. void *context, int vl, int mode, u64 data)
  2887. {
  2888. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2889. return dd->send_egress_err_status_cnt[29];
  2890. }
  2891. static u64 access_tx_sdma12_disallowed_packet_err_cnt(
  2892. const struct cntr_entry *entry,
  2893. void *context, int vl, int mode, u64 data)
  2894. {
  2895. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2896. return dd->send_egress_err_status_cnt[28];
  2897. }
  2898. static u64 access_tx_sdma11_disallowed_packet_err_cnt(
  2899. const struct cntr_entry *entry,
  2900. void *context, int vl, int mode, u64 data)
  2901. {
  2902. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2903. return dd->send_egress_err_status_cnt[27];
  2904. }
  2905. static u64 access_tx_sdma10_disallowed_packet_err_cnt(
  2906. const struct cntr_entry *entry,
  2907. void *context, int vl, int mode, u64 data)
  2908. {
  2909. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2910. return dd->send_egress_err_status_cnt[26];
  2911. }
  2912. static u64 access_tx_sdma9_disallowed_packet_err_cnt(
  2913. const struct cntr_entry *entry,
  2914. void *context, int vl, int mode, u64 data)
  2915. {
  2916. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2917. return dd->send_egress_err_status_cnt[25];
  2918. }
  2919. static u64 access_tx_sdma8_disallowed_packet_err_cnt(
  2920. const struct cntr_entry *entry,
  2921. void *context, int vl, int mode, u64 data)
  2922. {
  2923. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2924. return dd->send_egress_err_status_cnt[24];
  2925. }
  2926. static u64 access_tx_sdma7_disallowed_packet_err_cnt(
  2927. const struct cntr_entry *entry,
  2928. void *context, int vl, int mode, u64 data)
  2929. {
  2930. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2931. return dd->send_egress_err_status_cnt[23];
  2932. }
  2933. static u64 access_tx_sdma6_disallowed_packet_err_cnt(
  2934. const struct cntr_entry *entry,
  2935. void *context, int vl, int mode, u64 data)
  2936. {
  2937. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2938. return dd->send_egress_err_status_cnt[22];
  2939. }
  2940. static u64 access_tx_sdma5_disallowed_packet_err_cnt(
  2941. const struct cntr_entry *entry,
  2942. void *context, int vl, int mode, u64 data)
  2943. {
  2944. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2945. return dd->send_egress_err_status_cnt[21];
  2946. }
  2947. static u64 access_tx_sdma4_disallowed_packet_err_cnt(
  2948. const struct cntr_entry *entry,
  2949. void *context, int vl, int mode, u64 data)
  2950. {
  2951. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2952. return dd->send_egress_err_status_cnt[20];
  2953. }
  2954. static u64 access_tx_sdma3_disallowed_packet_err_cnt(
  2955. const struct cntr_entry *entry,
  2956. void *context, int vl, int mode, u64 data)
  2957. {
  2958. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2959. return dd->send_egress_err_status_cnt[19];
  2960. }
  2961. static u64 access_tx_sdma2_disallowed_packet_err_cnt(
  2962. const struct cntr_entry *entry,
  2963. void *context, int vl, int mode, u64 data)
  2964. {
  2965. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2966. return dd->send_egress_err_status_cnt[18];
  2967. }
  2968. static u64 access_tx_sdma1_disallowed_packet_err_cnt(
  2969. const struct cntr_entry *entry,
  2970. void *context, int vl, int mode, u64 data)
  2971. {
  2972. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2973. return dd->send_egress_err_status_cnt[17];
  2974. }
  2975. static u64 access_tx_sdma0_disallowed_packet_err_cnt(
  2976. const struct cntr_entry *entry,
  2977. void *context, int vl, int mode, u64 data)
  2978. {
  2979. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2980. return dd->send_egress_err_status_cnt[16];
  2981. }
  2982. static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
  2983. void *context, int vl, int mode,
  2984. u64 data)
  2985. {
  2986. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2987. return dd->send_egress_err_status_cnt[15];
  2988. }
  2989. static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
  2990. void *context, int vl,
  2991. int mode, u64 data)
  2992. {
  2993. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2994. return dd->send_egress_err_status_cnt[14];
  2995. }
  2996. static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
  2997. void *context, int vl, int mode,
  2998. u64 data)
  2999. {
  3000. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3001. return dd->send_egress_err_status_cnt[13];
  3002. }
  3003. static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
  3004. void *context, int vl, int mode,
  3005. u64 data)
  3006. {
  3007. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3008. return dd->send_egress_err_status_cnt[12];
  3009. }
  3010. static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
  3011. const struct cntr_entry *entry,
  3012. void *context, int vl, int mode, u64 data)
  3013. {
  3014. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3015. return dd->send_egress_err_status_cnt[11];
  3016. }
  3017. static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
  3018. void *context, int vl, int mode,
  3019. u64 data)
  3020. {
  3021. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3022. return dd->send_egress_err_status_cnt[10];
  3023. }
  3024. static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
  3025. void *context, int vl, int mode,
  3026. u64 data)
  3027. {
  3028. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3029. return dd->send_egress_err_status_cnt[9];
  3030. }
  3031. static u64 access_tx_sdma_launch_intf_parity_err_cnt(
  3032. const struct cntr_entry *entry,
  3033. void *context, int vl, int mode, u64 data)
  3034. {
  3035. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3036. return dd->send_egress_err_status_cnt[8];
  3037. }
  3038. static u64 access_tx_pio_launch_intf_parity_err_cnt(
  3039. const struct cntr_entry *entry,
  3040. void *context, int vl, int mode, u64 data)
  3041. {
  3042. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3043. return dd->send_egress_err_status_cnt[7];
  3044. }
  3045. static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
  3046. void *context, int vl, int mode,
  3047. u64 data)
  3048. {
  3049. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3050. return dd->send_egress_err_status_cnt[6];
  3051. }
  3052. static u64 access_tx_incorrect_link_state_err_cnt(
  3053. const struct cntr_entry *entry,
  3054. void *context, int vl, int mode, u64 data)
  3055. {
  3056. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3057. return dd->send_egress_err_status_cnt[5];
  3058. }
  3059. static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
  3060. void *context, int vl, int mode,
  3061. u64 data)
  3062. {
  3063. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3064. return dd->send_egress_err_status_cnt[4];
  3065. }
  3066. static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
  3067. const struct cntr_entry *entry,
  3068. void *context, int vl, int mode, u64 data)
  3069. {
  3070. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3071. return dd->send_egress_err_status_cnt[3];
  3072. }
  3073. static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
  3074. void *context, int vl, int mode,
  3075. u64 data)
  3076. {
  3077. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3078. return dd->send_egress_err_status_cnt[2];
  3079. }
  3080. static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
  3081. const struct cntr_entry *entry,
  3082. void *context, int vl, int mode, u64 data)
  3083. {
  3084. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3085. return dd->send_egress_err_status_cnt[1];
  3086. }
  3087. static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
  3088. const struct cntr_entry *entry,
  3089. void *context, int vl, int mode, u64 data)
  3090. {
  3091. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3092. return dd->send_egress_err_status_cnt[0];
  3093. }
  3094. /*
  3095. * Software counters corresponding to each of the
  3096. * error status bits within SendErrStatus
  3097. */
  3098. static u64 access_send_csr_write_bad_addr_err_cnt(
  3099. const struct cntr_entry *entry,
  3100. void *context, int vl, int mode, u64 data)
  3101. {
  3102. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3103. return dd->send_err_status_cnt[2];
  3104. }
  3105. static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
  3106. void *context, int vl,
  3107. int mode, u64 data)
  3108. {
  3109. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3110. return dd->send_err_status_cnt[1];
  3111. }
  3112. static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
  3113. void *context, int vl, int mode,
  3114. u64 data)
  3115. {
  3116. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3117. return dd->send_err_status_cnt[0];
  3118. }
  3119. /*
  3120. * Software counters corresponding to each of the
  3121. * error status bits within SendCtxtErrStatus
  3122. */
  3123. static u64 access_pio_write_out_of_bounds_err_cnt(
  3124. const struct cntr_entry *entry,
  3125. void *context, int vl, int mode, u64 data)
  3126. {
  3127. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3128. return dd->sw_ctxt_err_status_cnt[4];
  3129. }
  3130. static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
  3131. void *context, int vl, int mode,
  3132. u64 data)
  3133. {
  3134. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3135. return dd->sw_ctxt_err_status_cnt[3];
  3136. }
  3137. static u64 access_pio_write_crosses_boundary_err_cnt(
  3138. const struct cntr_entry *entry,
  3139. void *context, int vl, int mode, u64 data)
  3140. {
  3141. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3142. return dd->sw_ctxt_err_status_cnt[2];
  3143. }
  3144. static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
  3145. void *context, int vl,
  3146. int mode, u64 data)
  3147. {
  3148. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3149. return dd->sw_ctxt_err_status_cnt[1];
  3150. }
  3151. static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
  3152. void *context, int vl, int mode,
  3153. u64 data)
  3154. {
  3155. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3156. return dd->sw_ctxt_err_status_cnt[0];
  3157. }
  3158. /*
  3159. * Software counters corresponding to each of the
  3160. * error status bits within SendDmaEngErrStatus
  3161. */
  3162. static u64 access_sdma_header_request_fifo_cor_err_cnt(
  3163. const struct cntr_entry *entry,
  3164. void *context, int vl, int mode, u64 data)
  3165. {
  3166. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3167. return dd->sw_send_dma_eng_err_status_cnt[23];
  3168. }
  3169. static u64 access_sdma_header_storage_cor_err_cnt(
  3170. const struct cntr_entry *entry,
  3171. void *context, int vl, int mode, u64 data)
  3172. {
  3173. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3174. return dd->sw_send_dma_eng_err_status_cnt[22];
  3175. }
  3176. static u64 access_sdma_packet_tracking_cor_err_cnt(
  3177. const struct cntr_entry *entry,
  3178. void *context, int vl, int mode, u64 data)
  3179. {
  3180. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3181. return dd->sw_send_dma_eng_err_status_cnt[21];
  3182. }
  3183. static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
  3184. void *context, int vl, int mode,
  3185. u64 data)
  3186. {
  3187. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3188. return dd->sw_send_dma_eng_err_status_cnt[20];
  3189. }
  3190. static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
  3191. void *context, int vl, int mode,
  3192. u64 data)
  3193. {
  3194. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3195. return dd->sw_send_dma_eng_err_status_cnt[19];
  3196. }
  3197. static u64 access_sdma_header_request_fifo_unc_err_cnt(
  3198. const struct cntr_entry *entry,
  3199. void *context, int vl, int mode, u64 data)
  3200. {
  3201. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3202. return dd->sw_send_dma_eng_err_status_cnt[18];
  3203. }
  3204. static u64 access_sdma_header_storage_unc_err_cnt(
  3205. const struct cntr_entry *entry,
  3206. void *context, int vl, int mode, u64 data)
  3207. {
  3208. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3209. return dd->sw_send_dma_eng_err_status_cnt[17];
  3210. }
  3211. static u64 access_sdma_packet_tracking_unc_err_cnt(
  3212. const struct cntr_entry *entry,
  3213. void *context, int vl, int mode, u64 data)
  3214. {
  3215. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3216. return dd->sw_send_dma_eng_err_status_cnt[16];
  3217. }
  3218. static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
  3219. void *context, int vl, int mode,
  3220. u64 data)
  3221. {
  3222. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3223. return dd->sw_send_dma_eng_err_status_cnt[15];
  3224. }
  3225. static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
  3226. void *context, int vl, int mode,
  3227. u64 data)
  3228. {
  3229. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3230. return dd->sw_send_dma_eng_err_status_cnt[14];
  3231. }
  3232. static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
  3233. void *context, int vl, int mode,
  3234. u64 data)
  3235. {
  3236. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3237. return dd->sw_send_dma_eng_err_status_cnt[13];
  3238. }
  3239. static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
  3240. void *context, int vl, int mode,
  3241. u64 data)
  3242. {
  3243. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3244. return dd->sw_send_dma_eng_err_status_cnt[12];
  3245. }
  3246. static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
  3247. void *context, int vl, int mode,
  3248. u64 data)
  3249. {
  3250. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3251. return dd->sw_send_dma_eng_err_status_cnt[11];
  3252. }
  3253. static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
  3254. void *context, int vl, int mode,
  3255. u64 data)
  3256. {
  3257. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3258. return dd->sw_send_dma_eng_err_status_cnt[10];
  3259. }
  3260. static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
  3261. void *context, int vl, int mode,
  3262. u64 data)
  3263. {
  3264. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3265. return dd->sw_send_dma_eng_err_status_cnt[9];
  3266. }
  3267. static u64 access_sdma_packet_desc_overflow_err_cnt(
  3268. const struct cntr_entry *entry,
  3269. void *context, int vl, int mode, u64 data)
  3270. {
  3271. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3272. return dd->sw_send_dma_eng_err_status_cnt[8];
  3273. }
  3274. static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
  3275. void *context, int vl,
  3276. int mode, u64 data)
  3277. {
  3278. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3279. return dd->sw_send_dma_eng_err_status_cnt[7];
  3280. }
  3281. static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
  3282. void *context, int vl, int mode, u64 data)
  3283. {
  3284. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3285. return dd->sw_send_dma_eng_err_status_cnt[6];
  3286. }
  3287. static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
  3288. void *context, int vl, int mode,
  3289. u64 data)
  3290. {
  3291. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3292. return dd->sw_send_dma_eng_err_status_cnt[5];
  3293. }
  3294. static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
  3295. void *context, int vl, int mode,
  3296. u64 data)
  3297. {
  3298. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3299. return dd->sw_send_dma_eng_err_status_cnt[4];
  3300. }
  3301. static u64 access_sdma_tail_out_of_bounds_err_cnt(
  3302. const struct cntr_entry *entry,
  3303. void *context, int vl, int mode, u64 data)
  3304. {
  3305. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3306. return dd->sw_send_dma_eng_err_status_cnt[3];
  3307. }
  3308. static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
  3309. void *context, int vl, int mode,
  3310. u64 data)
  3311. {
  3312. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3313. return dd->sw_send_dma_eng_err_status_cnt[2];
  3314. }
  3315. static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
  3316. void *context, int vl, int mode,
  3317. u64 data)
  3318. {
  3319. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3320. return dd->sw_send_dma_eng_err_status_cnt[1];
  3321. }
  3322. static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
  3323. void *context, int vl, int mode,
  3324. u64 data)
  3325. {
  3326. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3327. return dd->sw_send_dma_eng_err_status_cnt[0];
  3328. }
  3329. static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
  3330. void *context, int vl, int mode,
  3331. u64 data)
  3332. {
  3333. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3334. u64 val = 0;
  3335. u64 csr = entry->csr;
  3336. val = read_write_csr(dd, csr, mode, data);
  3337. if (mode == CNTR_MODE_R) {
  3338. val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
  3339. CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
  3340. } else if (mode == CNTR_MODE_W) {
  3341. dd->sw_rcv_bypass_packet_errors = 0;
  3342. } else {
  3343. dd_dev_err(dd, "Invalid cntr register access mode");
  3344. return 0;
  3345. }
  3346. return val;
  3347. }
  3348. #define def_access_sw_cpu(cntr) \
  3349. static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
  3350. void *context, int vl, int mode, u64 data) \
  3351. { \
  3352. struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
  3353. return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
  3354. ppd->ibport_data.rvp.cntr, vl, \
  3355. mode, data); \
  3356. }
  3357. def_access_sw_cpu(rc_acks);
  3358. def_access_sw_cpu(rc_qacks);
  3359. def_access_sw_cpu(rc_delayed_comp);
  3360. #define def_access_ibp_counter(cntr) \
  3361. static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
  3362. void *context, int vl, int mode, u64 data) \
  3363. { \
  3364. struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
  3365. \
  3366. if (vl != CNTR_INVALID_VL) \
  3367. return 0; \
  3368. \
  3369. return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
  3370. mode, data); \
  3371. }
  3372. def_access_ibp_counter(loop_pkts);
  3373. def_access_ibp_counter(rc_resends);
  3374. def_access_ibp_counter(rnr_naks);
  3375. def_access_ibp_counter(other_naks);
  3376. def_access_ibp_counter(rc_timeouts);
  3377. def_access_ibp_counter(pkt_drops);
  3378. def_access_ibp_counter(dmawait);
  3379. def_access_ibp_counter(rc_seqnak);
  3380. def_access_ibp_counter(rc_dupreq);
  3381. def_access_ibp_counter(rdma_seq);
  3382. def_access_ibp_counter(unaligned);
  3383. def_access_ibp_counter(seq_naks);
  3384. static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
  3385. [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
  3386. [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
  3387. CNTR_NORMAL),
  3388. [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
  3389. CNTR_NORMAL),
  3390. [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
  3391. RCV_TID_FLOW_GEN_MISMATCH_CNT,
  3392. CNTR_NORMAL),
  3393. [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
  3394. CNTR_NORMAL),
  3395. [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
  3396. RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
  3397. [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
  3398. CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
  3399. [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
  3400. CNTR_NORMAL),
  3401. [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
  3402. CNTR_NORMAL),
  3403. [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
  3404. CNTR_NORMAL),
  3405. [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
  3406. CNTR_NORMAL),
  3407. [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
  3408. CNTR_NORMAL),
  3409. [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
  3410. CNTR_NORMAL),
  3411. [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
  3412. CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
  3413. [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
  3414. CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
  3415. [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
  3416. CNTR_SYNTH),
  3417. [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
  3418. access_dc_rcv_err_cnt),
  3419. [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
  3420. CNTR_SYNTH),
  3421. [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
  3422. CNTR_SYNTH),
  3423. [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
  3424. CNTR_SYNTH),
  3425. [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
  3426. DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
  3427. [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
  3428. DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
  3429. CNTR_SYNTH),
  3430. [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
  3431. DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
  3432. [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
  3433. CNTR_SYNTH),
  3434. [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
  3435. CNTR_SYNTH),
  3436. [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
  3437. CNTR_SYNTH),
  3438. [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
  3439. CNTR_SYNTH),
  3440. [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
  3441. CNTR_SYNTH),
  3442. [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
  3443. CNTR_SYNTH),
  3444. [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
  3445. CNTR_SYNTH),
  3446. [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
  3447. CNTR_SYNTH | CNTR_VL),
  3448. [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
  3449. CNTR_SYNTH | CNTR_VL),
  3450. [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
  3451. [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
  3452. CNTR_SYNTH | CNTR_VL),
  3453. [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
  3454. [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
  3455. CNTR_SYNTH | CNTR_VL),
  3456. [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
  3457. CNTR_SYNTH),
  3458. [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
  3459. CNTR_SYNTH | CNTR_VL),
  3460. [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
  3461. CNTR_SYNTH),
  3462. [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
  3463. CNTR_SYNTH | CNTR_VL),
  3464. [C_DC_TOTAL_CRC] =
  3465. DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
  3466. CNTR_SYNTH),
  3467. [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
  3468. CNTR_SYNTH),
  3469. [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
  3470. CNTR_SYNTH),
  3471. [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
  3472. CNTR_SYNTH),
  3473. [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
  3474. CNTR_SYNTH),
  3475. [C_DC_CRC_MULT_LN] =
  3476. DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
  3477. CNTR_SYNTH),
  3478. [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
  3479. CNTR_SYNTH),
  3480. [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
  3481. CNTR_SYNTH),
  3482. [C_DC_SEQ_CRC_CNT] =
  3483. DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
  3484. CNTR_SYNTH),
  3485. [C_DC_ESC0_ONLY_CNT] =
  3486. DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
  3487. CNTR_SYNTH),
  3488. [C_DC_ESC0_PLUS1_CNT] =
  3489. DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
  3490. CNTR_SYNTH),
  3491. [C_DC_ESC0_PLUS2_CNT] =
  3492. DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
  3493. CNTR_SYNTH),
  3494. [C_DC_REINIT_FROM_PEER_CNT] =
  3495. DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
  3496. CNTR_SYNTH),
  3497. [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
  3498. CNTR_SYNTH),
  3499. [C_DC_MISC_FLG_CNT] =
  3500. DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
  3501. CNTR_SYNTH),
  3502. [C_DC_PRF_GOOD_LTP_CNT] =
  3503. DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
  3504. [C_DC_PRF_ACCEPTED_LTP_CNT] =
  3505. DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
  3506. CNTR_SYNTH),
  3507. [C_DC_PRF_RX_FLIT_CNT] =
  3508. DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
  3509. [C_DC_PRF_TX_FLIT_CNT] =
  3510. DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
  3511. [C_DC_PRF_CLK_CNTR] =
  3512. DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
  3513. [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
  3514. DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
  3515. [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
  3516. DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
  3517. CNTR_SYNTH),
  3518. [C_DC_PG_STS_TX_SBE_CNT] =
  3519. DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
  3520. [C_DC_PG_STS_TX_MBE_CNT] =
  3521. DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
  3522. CNTR_SYNTH),
  3523. [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
  3524. access_sw_cpu_intr),
  3525. [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
  3526. access_sw_cpu_rcv_limit),
  3527. [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
  3528. access_sw_vtx_wait),
  3529. [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
  3530. access_sw_pio_wait),
  3531. [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
  3532. access_sw_pio_drain),
  3533. [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
  3534. access_sw_kmem_wait),
  3535. [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
  3536. access_sw_send_schedule),
  3537. [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
  3538. SEND_DMA_DESC_FETCHED_CNT, 0,
  3539. CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
  3540. dev_access_u32_csr),
  3541. [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
  3542. CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
  3543. access_sde_int_cnt),
  3544. [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
  3545. CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
  3546. access_sde_err_cnt),
  3547. [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
  3548. CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
  3549. access_sde_idle_int_cnt),
  3550. [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
  3551. CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
  3552. access_sde_progress_int_cnt),
  3553. /* MISC_ERR_STATUS */
  3554. [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
  3555. CNTR_NORMAL,
  3556. access_misc_pll_lock_fail_err_cnt),
  3557. [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
  3558. CNTR_NORMAL,
  3559. access_misc_mbist_fail_err_cnt),
  3560. [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
  3561. CNTR_NORMAL,
  3562. access_misc_invalid_eep_cmd_err_cnt),
  3563. [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
  3564. CNTR_NORMAL,
  3565. access_misc_efuse_done_parity_err_cnt),
  3566. [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
  3567. CNTR_NORMAL,
  3568. access_misc_efuse_write_err_cnt),
  3569. [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
  3570. 0, CNTR_NORMAL,
  3571. access_misc_efuse_read_bad_addr_err_cnt),
  3572. [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
  3573. CNTR_NORMAL,
  3574. access_misc_efuse_csr_parity_err_cnt),
  3575. [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
  3576. CNTR_NORMAL,
  3577. access_misc_fw_auth_failed_err_cnt),
  3578. [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
  3579. CNTR_NORMAL,
  3580. access_misc_key_mismatch_err_cnt),
  3581. [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
  3582. CNTR_NORMAL,
  3583. access_misc_sbus_write_failed_err_cnt),
  3584. [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
  3585. CNTR_NORMAL,
  3586. access_misc_csr_write_bad_addr_err_cnt),
  3587. [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
  3588. CNTR_NORMAL,
  3589. access_misc_csr_read_bad_addr_err_cnt),
  3590. [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
  3591. CNTR_NORMAL,
  3592. access_misc_csr_parity_err_cnt),
  3593. /* CceErrStatus */
  3594. [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
  3595. CNTR_NORMAL,
  3596. access_sw_cce_err_status_aggregated_cnt),
  3597. [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
  3598. CNTR_NORMAL,
  3599. access_cce_msix_csr_parity_err_cnt),
  3600. [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
  3601. CNTR_NORMAL,
  3602. access_cce_int_map_unc_err_cnt),
  3603. [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
  3604. CNTR_NORMAL,
  3605. access_cce_int_map_cor_err_cnt),
  3606. [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
  3607. CNTR_NORMAL,
  3608. access_cce_msix_table_unc_err_cnt),
  3609. [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
  3610. CNTR_NORMAL,
  3611. access_cce_msix_table_cor_err_cnt),
  3612. [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
  3613. 0, CNTR_NORMAL,
  3614. access_cce_rxdma_conv_fifo_parity_err_cnt),
  3615. [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
  3616. 0, CNTR_NORMAL,
  3617. access_cce_rcpl_async_fifo_parity_err_cnt),
  3618. [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
  3619. CNTR_NORMAL,
  3620. access_cce_seg_write_bad_addr_err_cnt),
  3621. [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
  3622. CNTR_NORMAL,
  3623. access_cce_seg_read_bad_addr_err_cnt),
  3624. [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
  3625. CNTR_NORMAL,
  3626. access_la_triggered_cnt),
  3627. [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
  3628. CNTR_NORMAL,
  3629. access_cce_trgt_cpl_timeout_err_cnt),
  3630. [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
  3631. CNTR_NORMAL,
  3632. access_pcic_receive_parity_err_cnt),
  3633. [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
  3634. CNTR_NORMAL,
  3635. access_pcic_transmit_back_parity_err_cnt),
  3636. [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
  3637. 0, CNTR_NORMAL,
  3638. access_pcic_transmit_front_parity_err_cnt),
  3639. [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
  3640. CNTR_NORMAL,
  3641. access_pcic_cpl_dat_q_unc_err_cnt),
  3642. [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
  3643. CNTR_NORMAL,
  3644. access_pcic_cpl_hd_q_unc_err_cnt),
  3645. [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
  3646. CNTR_NORMAL,
  3647. access_pcic_post_dat_q_unc_err_cnt),
  3648. [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
  3649. CNTR_NORMAL,
  3650. access_pcic_post_hd_q_unc_err_cnt),
  3651. [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
  3652. CNTR_NORMAL,
  3653. access_pcic_retry_sot_mem_unc_err_cnt),
  3654. [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
  3655. CNTR_NORMAL,
  3656. access_pcic_retry_mem_unc_err),
  3657. [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
  3658. CNTR_NORMAL,
  3659. access_pcic_n_post_dat_q_parity_err_cnt),
  3660. [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
  3661. CNTR_NORMAL,
  3662. access_pcic_n_post_h_q_parity_err_cnt),
  3663. [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
  3664. CNTR_NORMAL,
  3665. access_pcic_cpl_dat_q_cor_err_cnt),
  3666. [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
  3667. CNTR_NORMAL,
  3668. access_pcic_cpl_hd_q_cor_err_cnt),
  3669. [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
  3670. CNTR_NORMAL,
  3671. access_pcic_post_dat_q_cor_err_cnt),
  3672. [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
  3673. CNTR_NORMAL,
  3674. access_pcic_post_hd_q_cor_err_cnt),
  3675. [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
  3676. CNTR_NORMAL,
  3677. access_pcic_retry_sot_mem_cor_err_cnt),
  3678. [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
  3679. CNTR_NORMAL,
  3680. access_pcic_retry_mem_cor_err_cnt),
  3681. [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
  3682. "CceCli1AsyncFifoDbgParityError", 0, 0,
  3683. CNTR_NORMAL,
  3684. access_cce_cli1_async_fifo_dbg_parity_err_cnt),
  3685. [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
  3686. "CceCli1AsyncFifoRxdmaParityError", 0, 0,
  3687. CNTR_NORMAL,
  3688. access_cce_cli1_async_fifo_rxdma_parity_err_cnt
  3689. ),
  3690. [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
  3691. "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
  3692. CNTR_NORMAL,
  3693. access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
  3694. [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
  3695. "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
  3696. CNTR_NORMAL,
  3697. access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
  3698. [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
  3699. 0, CNTR_NORMAL,
  3700. access_cce_cli2_async_fifo_parity_err_cnt),
  3701. [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
  3702. CNTR_NORMAL,
  3703. access_cce_csr_cfg_bus_parity_err_cnt),
  3704. [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
  3705. 0, CNTR_NORMAL,
  3706. access_cce_cli0_async_fifo_parity_err_cnt),
  3707. [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
  3708. CNTR_NORMAL,
  3709. access_cce_rspd_data_parity_err_cnt),
  3710. [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
  3711. CNTR_NORMAL,
  3712. access_cce_trgt_access_err_cnt),
  3713. [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
  3714. 0, CNTR_NORMAL,
  3715. access_cce_trgt_async_fifo_parity_err_cnt),
  3716. [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
  3717. CNTR_NORMAL,
  3718. access_cce_csr_write_bad_addr_err_cnt),
  3719. [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
  3720. CNTR_NORMAL,
  3721. access_cce_csr_read_bad_addr_err_cnt),
  3722. [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
  3723. CNTR_NORMAL,
  3724. access_ccs_csr_parity_err_cnt),
  3725. /* RcvErrStatus */
  3726. [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
  3727. CNTR_NORMAL,
  3728. access_rx_csr_parity_err_cnt),
  3729. [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
  3730. CNTR_NORMAL,
  3731. access_rx_csr_write_bad_addr_err_cnt),
  3732. [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
  3733. CNTR_NORMAL,
  3734. access_rx_csr_read_bad_addr_err_cnt),
  3735. [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
  3736. CNTR_NORMAL,
  3737. access_rx_dma_csr_unc_err_cnt),
  3738. [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
  3739. CNTR_NORMAL,
  3740. access_rx_dma_dq_fsm_encoding_err_cnt),
  3741. [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
  3742. CNTR_NORMAL,
  3743. access_rx_dma_eq_fsm_encoding_err_cnt),
  3744. [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
  3745. CNTR_NORMAL,
  3746. access_rx_dma_csr_parity_err_cnt),
  3747. [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
  3748. CNTR_NORMAL,
  3749. access_rx_rbuf_data_cor_err_cnt),
  3750. [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
  3751. CNTR_NORMAL,
  3752. access_rx_rbuf_data_unc_err_cnt),
  3753. [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
  3754. CNTR_NORMAL,
  3755. access_rx_dma_data_fifo_rd_cor_err_cnt),
  3756. [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
  3757. CNTR_NORMAL,
  3758. access_rx_dma_data_fifo_rd_unc_err_cnt),
  3759. [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
  3760. CNTR_NORMAL,
  3761. access_rx_dma_hdr_fifo_rd_cor_err_cnt),
  3762. [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
  3763. CNTR_NORMAL,
  3764. access_rx_dma_hdr_fifo_rd_unc_err_cnt),
  3765. [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
  3766. CNTR_NORMAL,
  3767. access_rx_rbuf_desc_part2_cor_err_cnt),
  3768. [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
  3769. CNTR_NORMAL,
  3770. access_rx_rbuf_desc_part2_unc_err_cnt),
  3771. [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
  3772. CNTR_NORMAL,
  3773. access_rx_rbuf_desc_part1_cor_err_cnt),
  3774. [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
  3775. CNTR_NORMAL,
  3776. access_rx_rbuf_desc_part1_unc_err_cnt),
  3777. [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
  3778. CNTR_NORMAL,
  3779. access_rx_hq_intr_fsm_err_cnt),
  3780. [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
  3781. CNTR_NORMAL,
  3782. access_rx_hq_intr_csr_parity_err_cnt),
  3783. [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
  3784. CNTR_NORMAL,
  3785. access_rx_lookup_csr_parity_err_cnt),
  3786. [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
  3787. CNTR_NORMAL,
  3788. access_rx_lookup_rcv_array_cor_err_cnt),
  3789. [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
  3790. CNTR_NORMAL,
  3791. access_rx_lookup_rcv_array_unc_err_cnt),
  3792. [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
  3793. 0, CNTR_NORMAL,
  3794. access_rx_lookup_des_part2_parity_err_cnt),
  3795. [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
  3796. 0, CNTR_NORMAL,
  3797. access_rx_lookup_des_part1_unc_cor_err_cnt),
  3798. [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
  3799. CNTR_NORMAL,
  3800. access_rx_lookup_des_part1_unc_err_cnt),
  3801. [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
  3802. CNTR_NORMAL,
  3803. access_rx_rbuf_next_free_buf_cor_err_cnt),
  3804. [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
  3805. CNTR_NORMAL,
  3806. access_rx_rbuf_next_free_buf_unc_err_cnt),
  3807. [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
  3808. "RxRbufFlInitWrAddrParityErr", 0, 0,
  3809. CNTR_NORMAL,
  3810. access_rbuf_fl_init_wr_addr_parity_err_cnt),
  3811. [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
  3812. 0, CNTR_NORMAL,
  3813. access_rx_rbuf_fl_initdone_parity_err_cnt),
  3814. [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
  3815. 0, CNTR_NORMAL,
  3816. access_rx_rbuf_fl_write_addr_parity_err_cnt),
  3817. [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
  3818. CNTR_NORMAL,
  3819. access_rx_rbuf_fl_rd_addr_parity_err_cnt),
  3820. [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
  3821. CNTR_NORMAL,
  3822. access_rx_rbuf_empty_err_cnt),
  3823. [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
  3824. CNTR_NORMAL,
  3825. access_rx_rbuf_full_err_cnt),
  3826. [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
  3827. CNTR_NORMAL,
  3828. access_rbuf_bad_lookup_err_cnt),
  3829. [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
  3830. CNTR_NORMAL,
  3831. access_rbuf_ctx_id_parity_err_cnt),
  3832. [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
  3833. CNTR_NORMAL,
  3834. access_rbuf_csr_qeopdw_parity_err_cnt),
  3835. [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
  3836. "RxRbufCsrQNumOfPktParityErr", 0, 0,
  3837. CNTR_NORMAL,
  3838. access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
  3839. [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
  3840. "RxRbufCsrQTlPtrParityErr", 0, 0,
  3841. CNTR_NORMAL,
  3842. access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
  3843. [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
  3844. 0, CNTR_NORMAL,
  3845. access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
  3846. [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
  3847. 0, CNTR_NORMAL,
  3848. access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
  3849. [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
  3850. 0, 0, CNTR_NORMAL,
  3851. access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
  3852. [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
  3853. 0, CNTR_NORMAL,
  3854. access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
  3855. [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
  3856. "RxRbufCsrQHeadBufNumParityErr", 0, 0,
  3857. CNTR_NORMAL,
  3858. access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
  3859. [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
  3860. 0, CNTR_NORMAL,
  3861. access_rx_rbuf_block_list_read_cor_err_cnt),
  3862. [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
  3863. 0, CNTR_NORMAL,
  3864. access_rx_rbuf_block_list_read_unc_err_cnt),
  3865. [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
  3866. CNTR_NORMAL,
  3867. access_rx_rbuf_lookup_des_cor_err_cnt),
  3868. [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
  3869. CNTR_NORMAL,
  3870. access_rx_rbuf_lookup_des_unc_err_cnt),
  3871. [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
  3872. "RxRbufLookupDesRegUncCorErr", 0, 0,
  3873. CNTR_NORMAL,
  3874. access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
  3875. [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
  3876. CNTR_NORMAL,
  3877. access_rx_rbuf_lookup_des_reg_unc_err_cnt),
  3878. [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
  3879. CNTR_NORMAL,
  3880. access_rx_rbuf_free_list_cor_err_cnt),
  3881. [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
  3882. CNTR_NORMAL,
  3883. access_rx_rbuf_free_list_unc_err_cnt),
  3884. [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
  3885. CNTR_NORMAL,
  3886. access_rx_rcv_fsm_encoding_err_cnt),
  3887. [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
  3888. CNTR_NORMAL,
  3889. access_rx_dma_flag_cor_err_cnt),
  3890. [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
  3891. CNTR_NORMAL,
  3892. access_rx_dma_flag_unc_err_cnt),
  3893. [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
  3894. CNTR_NORMAL,
  3895. access_rx_dc_sop_eop_parity_err_cnt),
  3896. [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
  3897. CNTR_NORMAL,
  3898. access_rx_rcv_csr_parity_err_cnt),
  3899. [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
  3900. CNTR_NORMAL,
  3901. access_rx_rcv_qp_map_table_cor_err_cnt),
  3902. [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
  3903. CNTR_NORMAL,
  3904. access_rx_rcv_qp_map_table_unc_err_cnt),
  3905. [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
  3906. CNTR_NORMAL,
  3907. access_rx_rcv_data_cor_err_cnt),
  3908. [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
  3909. CNTR_NORMAL,
  3910. access_rx_rcv_data_unc_err_cnt),
  3911. [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
  3912. CNTR_NORMAL,
  3913. access_rx_rcv_hdr_cor_err_cnt),
  3914. [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
  3915. CNTR_NORMAL,
  3916. access_rx_rcv_hdr_unc_err_cnt),
  3917. [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
  3918. CNTR_NORMAL,
  3919. access_rx_dc_intf_parity_err_cnt),
  3920. [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
  3921. CNTR_NORMAL,
  3922. access_rx_dma_csr_cor_err_cnt),
  3923. /* SendPioErrStatus */
  3924. [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
  3925. CNTR_NORMAL,
  3926. access_pio_pec_sop_head_parity_err_cnt),
  3927. [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
  3928. CNTR_NORMAL,
  3929. access_pio_pcc_sop_head_parity_err_cnt),
  3930. [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
  3931. 0, 0, CNTR_NORMAL,
  3932. access_pio_last_returned_cnt_parity_err_cnt),
  3933. [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
  3934. 0, CNTR_NORMAL,
  3935. access_pio_current_free_cnt_parity_err_cnt),
  3936. [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
  3937. CNTR_NORMAL,
  3938. access_pio_reserved_31_err_cnt),
  3939. [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
  3940. CNTR_NORMAL,
  3941. access_pio_reserved_30_err_cnt),
  3942. [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
  3943. CNTR_NORMAL,
  3944. access_pio_ppmc_sop_len_err_cnt),
  3945. [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
  3946. CNTR_NORMAL,
  3947. access_pio_ppmc_bqc_mem_parity_err_cnt),
  3948. [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
  3949. CNTR_NORMAL,
  3950. access_pio_vl_fifo_parity_err_cnt),
  3951. [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
  3952. CNTR_NORMAL,
  3953. access_pio_vlf_sop_parity_err_cnt),
  3954. [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
  3955. CNTR_NORMAL,
  3956. access_pio_vlf_v1_len_parity_err_cnt),
  3957. [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
  3958. CNTR_NORMAL,
  3959. access_pio_block_qw_count_parity_err_cnt),
  3960. [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
  3961. CNTR_NORMAL,
  3962. access_pio_write_qw_valid_parity_err_cnt),
  3963. [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
  3964. CNTR_NORMAL,
  3965. access_pio_state_machine_err_cnt),
  3966. [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
  3967. CNTR_NORMAL,
  3968. access_pio_write_data_parity_err_cnt),
  3969. [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
  3970. CNTR_NORMAL,
  3971. access_pio_host_addr_mem_cor_err_cnt),
  3972. [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
  3973. CNTR_NORMAL,
  3974. access_pio_host_addr_mem_unc_err_cnt),
  3975. [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
  3976. CNTR_NORMAL,
  3977. access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
  3978. [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
  3979. CNTR_NORMAL,
  3980. access_pio_init_sm_in_err_cnt),
  3981. [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
  3982. CNTR_NORMAL,
  3983. access_pio_ppmc_pbl_fifo_err_cnt),
  3984. [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
  3985. 0, CNTR_NORMAL,
  3986. access_pio_credit_ret_fifo_parity_err_cnt),
  3987. [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
  3988. CNTR_NORMAL,
  3989. access_pio_v1_len_mem_bank1_cor_err_cnt),
  3990. [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
  3991. CNTR_NORMAL,
  3992. access_pio_v1_len_mem_bank0_cor_err_cnt),
  3993. [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
  3994. CNTR_NORMAL,
  3995. access_pio_v1_len_mem_bank1_unc_err_cnt),
  3996. [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
  3997. CNTR_NORMAL,
  3998. access_pio_v1_len_mem_bank0_unc_err_cnt),
  3999. [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
  4000. CNTR_NORMAL,
  4001. access_pio_sm_pkt_reset_parity_err_cnt),
  4002. [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
  4003. CNTR_NORMAL,
  4004. access_pio_pkt_evict_fifo_parity_err_cnt),
  4005. [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
  4006. "PioSbrdctrlCrrelFifoParityErr", 0, 0,
  4007. CNTR_NORMAL,
  4008. access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
  4009. [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
  4010. CNTR_NORMAL,
  4011. access_pio_sbrdctl_crrel_parity_err_cnt),
  4012. [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
  4013. CNTR_NORMAL,
  4014. access_pio_pec_fifo_parity_err_cnt),
  4015. [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
  4016. CNTR_NORMAL,
  4017. access_pio_pcc_fifo_parity_err_cnt),
  4018. [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
  4019. CNTR_NORMAL,
  4020. access_pio_sb_mem_fifo1_err_cnt),
  4021. [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
  4022. CNTR_NORMAL,
  4023. access_pio_sb_mem_fifo0_err_cnt),
  4024. [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
  4025. CNTR_NORMAL,
  4026. access_pio_csr_parity_err_cnt),
  4027. [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
  4028. CNTR_NORMAL,
  4029. access_pio_write_addr_parity_err_cnt),
  4030. [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
  4031. CNTR_NORMAL,
  4032. access_pio_write_bad_ctxt_err_cnt),
  4033. /* SendDmaErrStatus */
  4034. [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
  4035. 0, CNTR_NORMAL,
  4036. access_sdma_pcie_req_tracking_cor_err_cnt),
  4037. [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
  4038. 0, CNTR_NORMAL,
  4039. access_sdma_pcie_req_tracking_unc_err_cnt),
  4040. [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
  4041. CNTR_NORMAL,
  4042. access_sdma_csr_parity_err_cnt),
  4043. [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
  4044. CNTR_NORMAL,
  4045. access_sdma_rpy_tag_err_cnt),
  4046. /* SendEgressErrStatus */
  4047. [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
  4048. CNTR_NORMAL,
  4049. access_tx_read_pio_memory_csr_unc_err_cnt),
  4050. [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
  4051. 0, CNTR_NORMAL,
  4052. access_tx_read_sdma_memory_csr_err_cnt),
  4053. [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
  4054. CNTR_NORMAL,
  4055. access_tx_egress_fifo_cor_err_cnt),
  4056. [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
  4057. CNTR_NORMAL,
  4058. access_tx_read_pio_memory_cor_err_cnt),
  4059. [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
  4060. CNTR_NORMAL,
  4061. access_tx_read_sdma_memory_cor_err_cnt),
  4062. [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
  4063. CNTR_NORMAL,
  4064. access_tx_sb_hdr_cor_err_cnt),
  4065. [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
  4066. CNTR_NORMAL,
  4067. access_tx_credit_overrun_err_cnt),
  4068. [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
  4069. CNTR_NORMAL,
  4070. access_tx_launch_fifo8_cor_err_cnt),
  4071. [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
  4072. CNTR_NORMAL,
  4073. access_tx_launch_fifo7_cor_err_cnt),
  4074. [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
  4075. CNTR_NORMAL,
  4076. access_tx_launch_fifo6_cor_err_cnt),
  4077. [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
  4078. CNTR_NORMAL,
  4079. access_tx_launch_fifo5_cor_err_cnt),
  4080. [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
  4081. CNTR_NORMAL,
  4082. access_tx_launch_fifo4_cor_err_cnt),
  4083. [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
  4084. CNTR_NORMAL,
  4085. access_tx_launch_fifo3_cor_err_cnt),
  4086. [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
  4087. CNTR_NORMAL,
  4088. access_tx_launch_fifo2_cor_err_cnt),
  4089. [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
  4090. CNTR_NORMAL,
  4091. access_tx_launch_fifo1_cor_err_cnt),
  4092. [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
  4093. CNTR_NORMAL,
  4094. access_tx_launch_fifo0_cor_err_cnt),
  4095. [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
  4096. CNTR_NORMAL,
  4097. access_tx_credit_return_vl_err_cnt),
  4098. [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
  4099. CNTR_NORMAL,
  4100. access_tx_hcrc_insertion_err_cnt),
  4101. [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
  4102. CNTR_NORMAL,
  4103. access_tx_egress_fifo_unc_err_cnt),
  4104. [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
  4105. CNTR_NORMAL,
  4106. access_tx_read_pio_memory_unc_err_cnt),
  4107. [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
  4108. CNTR_NORMAL,
  4109. access_tx_read_sdma_memory_unc_err_cnt),
  4110. [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
  4111. CNTR_NORMAL,
  4112. access_tx_sb_hdr_unc_err_cnt),
  4113. [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
  4114. CNTR_NORMAL,
  4115. access_tx_credit_return_partiy_err_cnt),
  4116. [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
  4117. 0, 0, CNTR_NORMAL,
  4118. access_tx_launch_fifo8_unc_or_parity_err_cnt),
  4119. [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
  4120. 0, 0, CNTR_NORMAL,
  4121. access_tx_launch_fifo7_unc_or_parity_err_cnt),
  4122. [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
  4123. 0, 0, CNTR_NORMAL,
  4124. access_tx_launch_fifo6_unc_or_parity_err_cnt),
  4125. [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
  4126. 0, 0, CNTR_NORMAL,
  4127. access_tx_launch_fifo5_unc_or_parity_err_cnt),
  4128. [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
  4129. 0, 0, CNTR_NORMAL,
  4130. access_tx_launch_fifo4_unc_or_parity_err_cnt),
  4131. [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
  4132. 0, 0, CNTR_NORMAL,
  4133. access_tx_launch_fifo3_unc_or_parity_err_cnt),
  4134. [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
  4135. 0, 0, CNTR_NORMAL,
  4136. access_tx_launch_fifo2_unc_or_parity_err_cnt),
  4137. [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
  4138. 0, 0, CNTR_NORMAL,
  4139. access_tx_launch_fifo1_unc_or_parity_err_cnt),
  4140. [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
  4141. 0, 0, CNTR_NORMAL,
  4142. access_tx_launch_fifo0_unc_or_parity_err_cnt),
  4143. [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
  4144. 0, 0, CNTR_NORMAL,
  4145. access_tx_sdma15_disallowed_packet_err_cnt),
  4146. [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
  4147. 0, 0, CNTR_NORMAL,
  4148. access_tx_sdma14_disallowed_packet_err_cnt),
  4149. [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
  4150. 0, 0, CNTR_NORMAL,
  4151. access_tx_sdma13_disallowed_packet_err_cnt),
  4152. [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
  4153. 0, 0, CNTR_NORMAL,
  4154. access_tx_sdma12_disallowed_packet_err_cnt),
  4155. [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
  4156. 0, 0, CNTR_NORMAL,
  4157. access_tx_sdma11_disallowed_packet_err_cnt),
  4158. [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
  4159. 0, 0, CNTR_NORMAL,
  4160. access_tx_sdma10_disallowed_packet_err_cnt),
  4161. [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
  4162. 0, 0, CNTR_NORMAL,
  4163. access_tx_sdma9_disallowed_packet_err_cnt),
  4164. [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
  4165. 0, 0, CNTR_NORMAL,
  4166. access_tx_sdma8_disallowed_packet_err_cnt),
  4167. [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
  4168. 0, 0, CNTR_NORMAL,
  4169. access_tx_sdma7_disallowed_packet_err_cnt),
  4170. [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
  4171. 0, 0, CNTR_NORMAL,
  4172. access_tx_sdma6_disallowed_packet_err_cnt),
  4173. [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
  4174. 0, 0, CNTR_NORMAL,
  4175. access_tx_sdma5_disallowed_packet_err_cnt),
  4176. [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
  4177. 0, 0, CNTR_NORMAL,
  4178. access_tx_sdma4_disallowed_packet_err_cnt),
  4179. [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
  4180. 0, 0, CNTR_NORMAL,
  4181. access_tx_sdma3_disallowed_packet_err_cnt),
  4182. [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
  4183. 0, 0, CNTR_NORMAL,
  4184. access_tx_sdma2_disallowed_packet_err_cnt),
  4185. [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
  4186. 0, 0, CNTR_NORMAL,
  4187. access_tx_sdma1_disallowed_packet_err_cnt),
  4188. [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
  4189. 0, 0, CNTR_NORMAL,
  4190. access_tx_sdma0_disallowed_packet_err_cnt),
  4191. [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
  4192. CNTR_NORMAL,
  4193. access_tx_config_parity_err_cnt),
  4194. [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
  4195. CNTR_NORMAL,
  4196. access_tx_sbrd_ctl_csr_parity_err_cnt),
  4197. [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
  4198. CNTR_NORMAL,
  4199. access_tx_launch_csr_parity_err_cnt),
  4200. [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
  4201. CNTR_NORMAL,
  4202. access_tx_illegal_vl_err_cnt),
  4203. [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
  4204. "TxSbrdCtlStateMachineParityErr", 0, 0,
  4205. CNTR_NORMAL,
  4206. access_tx_sbrd_ctl_state_machine_parity_err_cnt),
  4207. [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
  4208. CNTR_NORMAL,
  4209. access_egress_reserved_10_err_cnt),
  4210. [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
  4211. CNTR_NORMAL,
  4212. access_egress_reserved_9_err_cnt),
  4213. [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
  4214. 0, 0, CNTR_NORMAL,
  4215. access_tx_sdma_launch_intf_parity_err_cnt),
  4216. [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
  4217. CNTR_NORMAL,
  4218. access_tx_pio_launch_intf_parity_err_cnt),
  4219. [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
  4220. CNTR_NORMAL,
  4221. access_egress_reserved_6_err_cnt),
  4222. [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
  4223. CNTR_NORMAL,
  4224. access_tx_incorrect_link_state_err_cnt),
  4225. [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
  4226. CNTR_NORMAL,
  4227. access_tx_linkdown_err_cnt),
  4228. [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
  4229. "EgressFifoUnderrunOrParityErr", 0, 0,
  4230. CNTR_NORMAL,
  4231. access_tx_egress_fifi_underrun_or_parity_err_cnt),
  4232. [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
  4233. CNTR_NORMAL,
  4234. access_egress_reserved_2_err_cnt),
  4235. [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
  4236. CNTR_NORMAL,
  4237. access_tx_pkt_integrity_mem_unc_err_cnt),
  4238. [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
  4239. CNTR_NORMAL,
  4240. access_tx_pkt_integrity_mem_cor_err_cnt),
  4241. /* SendErrStatus */
  4242. [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
  4243. CNTR_NORMAL,
  4244. access_send_csr_write_bad_addr_err_cnt),
  4245. [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
  4246. CNTR_NORMAL,
  4247. access_send_csr_read_bad_addr_err_cnt),
  4248. [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
  4249. CNTR_NORMAL,
  4250. access_send_csr_parity_cnt),
  4251. /* SendCtxtErrStatus */
  4252. [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
  4253. CNTR_NORMAL,
  4254. access_pio_write_out_of_bounds_err_cnt),
  4255. [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
  4256. CNTR_NORMAL,
  4257. access_pio_write_overflow_err_cnt),
  4258. [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
  4259. 0, 0, CNTR_NORMAL,
  4260. access_pio_write_crosses_boundary_err_cnt),
  4261. [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
  4262. CNTR_NORMAL,
  4263. access_pio_disallowed_packet_err_cnt),
  4264. [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
  4265. CNTR_NORMAL,
  4266. access_pio_inconsistent_sop_err_cnt),
  4267. /* SendDmaEngErrStatus */
  4268. [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
  4269. 0, 0, CNTR_NORMAL,
  4270. access_sdma_header_request_fifo_cor_err_cnt),
  4271. [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
  4272. CNTR_NORMAL,
  4273. access_sdma_header_storage_cor_err_cnt),
  4274. [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
  4275. CNTR_NORMAL,
  4276. access_sdma_packet_tracking_cor_err_cnt),
  4277. [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
  4278. CNTR_NORMAL,
  4279. access_sdma_assembly_cor_err_cnt),
  4280. [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
  4281. CNTR_NORMAL,
  4282. access_sdma_desc_table_cor_err_cnt),
  4283. [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
  4284. 0, 0, CNTR_NORMAL,
  4285. access_sdma_header_request_fifo_unc_err_cnt),
  4286. [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
  4287. CNTR_NORMAL,
  4288. access_sdma_header_storage_unc_err_cnt),
  4289. [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
  4290. CNTR_NORMAL,
  4291. access_sdma_packet_tracking_unc_err_cnt),
  4292. [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
  4293. CNTR_NORMAL,
  4294. access_sdma_assembly_unc_err_cnt),
  4295. [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
  4296. CNTR_NORMAL,
  4297. access_sdma_desc_table_unc_err_cnt),
  4298. [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
  4299. CNTR_NORMAL,
  4300. access_sdma_timeout_err_cnt),
  4301. [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
  4302. CNTR_NORMAL,
  4303. access_sdma_header_length_err_cnt),
  4304. [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
  4305. CNTR_NORMAL,
  4306. access_sdma_header_address_err_cnt),
  4307. [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
  4308. CNTR_NORMAL,
  4309. access_sdma_header_select_err_cnt),
  4310. [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
  4311. CNTR_NORMAL,
  4312. access_sdma_reserved_9_err_cnt),
  4313. [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
  4314. CNTR_NORMAL,
  4315. access_sdma_packet_desc_overflow_err_cnt),
  4316. [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
  4317. CNTR_NORMAL,
  4318. access_sdma_length_mismatch_err_cnt),
  4319. [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
  4320. CNTR_NORMAL,
  4321. access_sdma_halt_err_cnt),
  4322. [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
  4323. CNTR_NORMAL,
  4324. access_sdma_mem_read_err_cnt),
  4325. [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
  4326. CNTR_NORMAL,
  4327. access_sdma_first_desc_err_cnt),
  4328. [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
  4329. CNTR_NORMAL,
  4330. access_sdma_tail_out_of_bounds_err_cnt),
  4331. [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
  4332. CNTR_NORMAL,
  4333. access_sdma_too_long_err_cnt),
  4334. [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
  4335. CNTR_NORMAL,
  4336. access_sdma_gen_mismatch_err_cnt),
  4337. [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
  4338. CNTR_NORMAL,
  4339. access_sdma_wrong_dw_err_cnt),
  4340. };
  4341. static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
  4342. [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
  4343. CNTR_NORMAL),
  4344. [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
  4345. CNTR_NORMAL),
  4346. [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
  4347. CNTR_NORMAL),
  4348. [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
  4349. CNTR_NORMAL),
  4350. [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
  4351. CNTR_NORMAL),
  4352. [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
  4353. CNTR_NORMAL),
  4354. [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
  4355. CNTR_NORMAL),
  4356. [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
  4357. [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
  4358. [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
  4359. [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
  4360. CNTR_SYNTH | CNTR_VL),
  4361. [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
  4362. CNTR_SYNTH | CNTR_VL),
  4363. [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
  4364. CNTR_SYNTH | CNTR_VL),
  4365. [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
  4366. [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
  4367. [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
  4368. access_sw_link_dn_cnt),
  4369. [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
  4370. access_sw_link_up_cnt),
  4371. [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
  4372. access_sw_unknown_frame_cnt),
  4373. [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
  4374. access_sw_xmit_discards),
  4375. [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
  4376. CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
  4377. access_sw_xmit_discards),
  4378. [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
  4379. access_xmit_constraint_errs),
  4380. [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
  4381. access_rcv_constraint_errs),
  4382. [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
  4383. [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
  4384. [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
  4385. [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
  4386. [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
  4387. [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
  4388. [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
  4389. [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
  4390. [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
  4391. [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
  4392. [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
  4393. [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
  4394. [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
  4395. access_sw_cpu_rc_acks),
  4396. [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
  4397. access_sw_cpu_rc_qacks),
  4398. [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
  4399. access_sw_cpu_rc_delayed_comp),
  4400. [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
  4401. [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
  4402. [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
  4403. [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
  4404. [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
  4405. [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
  4406. [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
  4407. [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
  4408. [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
  4409. [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
  4410. [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
  4411. [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
  4412. [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
  4413. [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
  4414. [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
  4415. [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
  4416. [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
  4417. [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
  4418. [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
  4419. [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
  4420. [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
  4421. [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
  4422. [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
  4423. [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
  4424. [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
  4425. [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
  4426. [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
  4427. [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
  4428. [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
  4429. [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
  4430. [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
  4431. [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
  4432. [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
  4433. [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
  4434. [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
  4435. [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
  4436. [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
  4437. [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
  4438. [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
  4439. [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
  4440. [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
  4441. [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
  4442. [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
  4443. [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
  4444. [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
  4445. [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
  4446. [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
  4447. [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
  4448. [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
  4449. [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
  4450. [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
  4451. [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
  4452. [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
  4453. [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
  4454. [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
  4455. [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
  4456. [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
  4457. [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
  4458. [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
  4459. [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
  4460. [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
  4461. [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
  4462. [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
  4463. [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
  4464. [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
  4465. [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
  4466. [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
  4467. [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
  4468. [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
  4469. [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
  4470. [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
  4471. [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
  4472. [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
  4473. [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
  4474. [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
  4475. [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
  4476. [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
  4477. [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
  4478. [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
  4479. [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
  4480. };
  4481. /* ======================================================================== */
  4482. /* return true if this is chip revision revision a */
  4483. int is_ax(struct hfi1_devdata *dd)
  4484. {
  4485. u8 chip_rev_minor =
  4486. dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
  4487. & CCE_REVISION_CHIP_REV_MINOR_MASK;
  4488. return (chip_rev_minor & 0xf0) == 0;
  4489. }
  4490. /* return true if this is chip revision revision b */
  4491. int is_bx(struct hfi1_devdata *dd)
  4492. {
  4493. u8 chip_rev_minor =
  4494. dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
  4495. & CCE_REVISION_CHIP_REV_MINOR_MASK;
  4496. return (chip_rev_minor & 0xF0) == 0x10;
  4497. }
  4498. /*
  4499. * Append string s to buffer buf. Arguments curp and len are the current
  4500. * position and remaining length, respectively.
  4501. *
  4502. * return 0 on success, 1 on out of room
  4503. */
  4504. static int append_str(char *buf, char **curp, int *lenp, const char *s)
  4505. {
  4506. char *p = *curp;
  4507. int len = *lenp;
  4508. int result = 0; /* success */
  4509. char c;
  4510. /* add a comma, if first in the buffer */
  4511. if (p != buf) {
  4512. if (len == 0) {
  4513. result = 1; /* out of room */
  4514. goto done;
  4515. }
  4516. *p++ = ',';
  4517. len--;
  4518. }
  4519. /* copy the string */
  4520. while ((c = *s++) != 0) {
  4521. if (len == 0) {
  4522. result = 1; /* out of room */
  4523. goto done;
  4524. }
  4525. *p++ = c;
  4526. len--;
  4527. }
  4528. done:
  4529. /* write return values */
  4530. *curp = p;
  4531. *lenp = len;
  4532. return result;
  4533. }
  4534. /*
  4535. * Using the given flag table, print a comma separated string into
  4536. * the buffer. End in '*' if the buffer is too short.
  4537. */
  4538. static char *flag_string(char *buf, int buf_len, u64 flags,
  4539. struct flag_table *table, int table_size)
  4540. {
  4541. char extra[32];
  4542. char *p = buf;
  4543. int len = buf_len;
  4544. int no_room = 0;
  4545. int i;
  4546. /* make sure there is at least 2 so we can form "*" */
  4547. if (len < 2)
  4548. return "";
  4549. len--; /* leave room for a nul */
  4550. for (i = 0; i < table_size; i++) {
  4551. if (flags & table[i].flag) {
  4552. no_room = append_str(buf, &p, &len, table[i].str);
  4553. if (no_room)
  4554. break;
  4555. flags &= ~table[i].flag;
  4556. }
  4557. }
  4558. /* any undocumented bits left? */
  4559. if (!no_room && flags) {
  4560. snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
  4561. no_room = append_str(buf, &p, &len, extra);
  4562. }
  4563. /* add * if ran out of room */
  4564. if (no_room) {
  4565. /* may need to back up to add space for a '*' */
  4566. if (len == 0)
  4567. --p;
  4568. *p++ = '*';
  4569. }
  4570. /* add final nul - space already allocated above */
  4571. *p = 0;
  4572. return buf;
  4573. }
  4574. /* first 8 CCE error interrupt source names */
  4575. static const char * const cce_misc_names[] = {
  4576. "CceErrInt", /* 0 */
  4577. "RxeErrInt", /* 1 */
  4578. "MiscErrInt", /* 2 */
  4579. "Reserved3", /* 3 */
  4580. "PioErrInt", /* 4 */
  4581. "SDmaErrInt", /* 5 */
  4582. "EgressErrInt", /* 6 */
  4583. "TxeErrInt" /* 7 */
  4584. };
  4585. /*
  4586. * Return the miscellaneous error interrupt name.
  4587. */
  4588. static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
  4589. {
  4590. if (source < ARRAY_SIZE(cce_misc_names))
  4591. strncpy(buf, cce_misc_names[source], bsize);
  4592. else
  4593. snprintf(buf, bsize, "Reserved%u",
  4594. source + IS_GENERAL_ERR_START);
  4595. return buf;
  4596. }
  4597. /*
  4598. * Return the SDMA engine error interrupt name.
  4599. */
  4600. static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
  4601. {
  4602. snprintf(buf, bsize, "SDmaEngErrInt%u", source);
  4603. return buf;
  4604. }
  4605. /*
  4606. * Return the send context error interrupt name.
  4607. */
  4608. static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
  4609. {
  4610. snprintf(buf, bsize, "SendCtxtErrInt%u", source);
  4611. return buf;
  4612. }
  4613. static const char * const various_names[] = {
  4614. "PbcInt",
  4615. "GpioAssertInt",
  4616. "Qsfp1Int",
  4617. "Qsfp2Int",
  4618. "TCritInt"
  4619. };
  4620. /*
  4621. * Return the various interrupt name.
  4622. */
  4623. static char *is_various_name(char *buf, size_t bsize, unsigned int source)
  4624. {
  4625. if (source < ARRAY_SIZE(various_names))
  4626. strncpy(buf, various_names[source], bsize);
  4627. else
  4628. snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
  4629. return buf;
  4630. }
  4631. /*
  4632. * Return the DC interrupt name.
  4633. */
  4634. static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
  4635. {
  4636. static const char * const dc_int_names[] = {
  4637. "common",
  4638. "lcb",
  4639. "8051",
  4640. "lbm" /* local block merge */
  4641. };
  4642. if (source < ARRAY_SIZE(dc_int_names))
  4643. snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
  4644. else
  4645. snprintf(buf, bsize, "DCInt%u", source);
  4646. return buf;
  4647. }
  4648. static const char * const sdma_int_names[] = {
  4649. "SDmaInt",
  4650. "SdmaIdleInt",
  4651. "SdmaProgressInt",
  4652. };
  4653. /*
  4654. * Return the SDMA engine interrupt name.
  4655. */
  4656. static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
  4657. {
  4658. /* what interrupt */
  4659. unsigned int what = source / TXE_NUM_SDMA_ENGINES;
  4660. /* which engine */
  4661. unsigned int which = source % TXE_NUM_SDMA_ENGINES;
  4662. if (likely(what < 3))
  4663. snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
  4664. else
  4665. snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
  4666. return buf;
  4667. }
  4668. /*
  4669. * Return the receive available interrupt name.
  4670. */
  4671. static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
  4672. {
  4673. snprintf(buf, bsize, "RcvAvailInt%u", source);
  4674. return buf;
  4675. }
  4676. /*
  4677. * Return the receive urgent interrupt name.
  4678. */
  4679. static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
  4680. {
  4681. snprintf(buf, bsize, "RcvUrgentInt%u", source);
  4682. return buf;
  4683. }
  4684. /*
  4685. * Return the send credit interrupt name.
  4686. */
  4687. static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
  4688. {
  4689. snprintf(buf, bsize, "SendCreditInt%u", source);
  4690. return buf;
  4691. }
  4692. /*
  4693. * Return the reserved interrupt name.
  4694. */
  4695. static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
  4696. {
  4697. snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
  4698. return buf;
  4699. }
  4700. static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
  4701. {
  4702. return flag_string(buf, buf_len, flags,
  4703. cce_err_status_flags,
  4704. ARRAY_SIZE(cce_err_status_flags));
  4705. }
  4706. static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
  4707. {
  4708. return flag_string(buf, buf_len, flags,
  4709. rxe_err_status_flags,
  4710. ARRAY_SIZE(rxe_err_status_flags));
  4711. }
  4712. static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
  4713. {
  4714. return flag_string(buf, buf_len, flags, misc_err_status_flags,
  4715. ARRAY_SIZE(misc_err_status_flags));
  4716. }
  4717. static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
  4718. {
  4719. return flag_string(buf, buf_len, flags,
  4720. pio_err_status_flags,
  4721. ARRAY_SIZE(pio_err_status_flags));
  4722. }
  4723. static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
  4724. {
  4725. return flag_string(buf, buf_len, flags,
  4726. sdma_err_status_flags,
  4727. ARRAY_SIZE(sdma_err_status_flags));
  4728. }
  4729. static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
  4730. {
  4731. return flag_string(buf, buf_len, flags,
  4732. egress_err_status_flags,
  4733. ARRAY_SIZE(egress_err_status_flags));
  4734. }
  4735. static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
  4736. {
  4737. return flag_string(buf, buf_len, flags,
  4738. egress_err_info_flags,
  4739. ARRAY_SIZE(egress_err_info_flags));
  4740. }
  4741. static char *send_err_status_string(char *buf, int buf_len, u64 flags)
  4742. {
  4743. return flag_string(buf, buf_len, flags,
  4744. send_err_status_flags,
  4745. ARRAY_SIZE(send_err_status_flags));
  4746. }
  4747. static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  4748. {
  4749. char buf[96];
  4750. int i = 0;
  4751. /*
  4752. * For most these errors, there is nothing that can be done except
  4753. * report or record it.
  4754. */
  4755. dd_dev_info(dd, "CCE Error: %s\n",
  4756. cce_err_status_string(buf, sizeof(buf), reg));
  4757. if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
  4758. is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
  4759. /* this error requires a manual drop into SPC freeze mode */
  4760. /* then a fix up */
  4761. start_freeze_handling(dd->pport, FREEZE_SELF);
  4762. }
  4763. for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
  4764. if (reg & (1ull << i)) {
  4765. incr_cntr64(&dd->cce_err_status_cnt[i]);
  4766. /* maintain a counter over all cce_err_status errors */
  4767. incr_cntr64(&dd->sw_cce_err_status_aggregate);
  4768. }
  4769. }
  4770. }
  4771. /*
  4772. * Check counters for receive errors that do not have an interrupt
  4773. * associated with them.
  4774. */
  4775. #define RCVERR_CHECK_TIME 10
  4776. static void update_rcverr_timer(unsigned long opaque)
  4777. {
  4778. struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
  4779. struct hfi1_pportdata *ppd = dd->pport;
  4780. u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
  4781. if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
  4782. ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
  4783. dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
  4784. set_link_down_reason(
  4785. ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
  4786. OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
  4787. queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
  4788. }
  4789. dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
  4790. mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
  4791. }
  4792. static int init_rcverr(struct hfi1_devdata *dd)
  4793. {
  4794. setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
  4795. /* Assume the hardware counter has been reset */
  4796. dd->rcv_ovfl_cnt = 0;
  4797. return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
  4798. }
  4799. static void free_rcverr(struct hfi1_devdata *dd)
  4800. {
  4801. if (dd->rcverr_timer.data)
  4802. del_timer_sync(&dd->rcverr_timer);
  4803. dd->rcverr_timer.data = 0;
  4804. }
  4805. static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  4806. {
  4807. char buf[96];
  4808. int i = 0;
  4809. dd_dev_info(dd, "Receive Error: %s\n",
  4810. rxe_err_status_string(buf, sizeof(buf), reg));
  4811. if (reg & ALL_RXE_FREEZE_ERR) {
  4812. int flags = 0;
  4813. /*
  4814. * Freeze mode recovery is disabled for the errors
  4815. * in RXE_FREEZE_ABORT_MASK
  4816. */
  4817. if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
  4818. flags = FREEZE_ABORT;
  4819. start_freeze_handling(dd->pport, flags);
  4820. }
  4821. for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
  4822. if (reg & (1ull << i))
  4823. incr_cntr64(&dd->rcv_err_status_cnt[i]);
  4824. }
  4825. }
  4826. static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  4827. {
  4828. char buf[96];
  4829. int i = 0;
  4830. dd_dev_info(dd, "Misc Error: %s",
  4831. misc_err_status_string(buf, sizeof(buf), reg));
  4832. for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
  4833. if (reg & (1ull << i))
  4834. incr_cntr64(&dd->misc_err_status_cnt[i]);
  4835. }
  4836. }
  4837. static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  4838. {
  4839. char buf[96];
  4840. int i = 0;
  4841. dd_dev_info(dd, "PIO Error: %s\n",
  4842. pio_err_status_string(buf, sizeof(buf), reg));
  4843. if (reg & ALL_PIO_FREEZE_ERR)
  4844. start_freeze_handling(dd->pport, 0);
  4845. for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
  4846. if (reg & (1ull << i))
  4847. incr_cntr64(&dd->send_pio_err_status_cnt[i]);
  4848. }
  4849. }
  4850. static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  4851. {
  4852. char buf[96];
  4853. int i = 0;
  4854. dd_dev_info(dd, "SDMA Error: %s\n",
  4855. sdma_err_status_string(buf, sizeof(buf), reg));
  4856. if (reg & ALL_SDMA_FREEZE_ERR)
  4857. start_freeze_handling(dd->pport, 0);
  4858. for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
  4859. if (reg & (1ull << i))
  4860. incr_cntr64(&dd->send_dma_err_status_cnt[i]);
  4861. }
  4862. }
  4863. static inline void __count_port_discards(struct hfi1_pportdata *ppd)
  4864. {
  4865. incr_cntr64(&ppd->port_xmit_discards);
  4866. }
  4867. static void count_port_inactive(struct hfi1_devdata *dd)
  4868. {
  4869. __count_port_discards(dd->pport);
  4870. }
  4871. /*
  4872. * We have had a "disallowed packet" error during egress. Determine the
  4873. * integrity check which failed, and update relevant error counter, etc.
  4874. *
  4875. * Note that the SEND_EGRESS_ERR_INFO register has only a single
  4876. * bit of state per integrity check, and so we can miss the reason for an
  4877. * egress error if more than one packet fails the same integrity check
  4878. * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
  4879. */
  4880. static void handle_send_egress_err_info(struct hfi1_devdata *dd,
  4881. int vl)
  4882. {
  4883. struct hfi1_pportdata *ppd = dd->pport;
  4884. u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
  4885. u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
  4886. char buf[96];
  4887. /* clear down all observed info as quickly as possible after read */
  4888. write_csr(dd, SEND_EGRESS_ERR_INFO, info);
  4889. dd_dev_info(dd,
  4890. "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
  4891. info, egress_err_info_string(buf, sizeof(buf), info), src);
  4892. /* Eventually add other counters for each bit */
  4893. if (info & PORT_DISCARD_EGRESS_ERRS) {
  4894. int weight, i;
  4895. /*
  4896. * Count all applicable bits as individual errors and
  4897. * attribute them to the packet that triggered this handler.
  4898. * This may not be completely accurate due to limitations
  4899. * on the available hardware error information. There is
  4900. * a single information register and any number of error
  4901. * packets may have occurred and contributed to it before
  4902. * this routine is called. This means that:
  4903. * a) If multiple packets with the same error occur before
  4904. * this routine is called, earlier packets are missed.
  4905. * There is only a single bit for each error type.
  4906. * b) Errors may not be attributed to the correct VL.
  4907. * The driver is attributing all bits in the info register
  4908. * to the packet that triggered this call, but bits
  4909. * could be an accumulation of different packets with
  4910. * different VLs.
  4911. * c) A single error packet may have multiple counts attached
  4912. * to it. There is no way for the driver to know if
  4913. * multiple bits set in the info register are due to a
  4914. * single packet or multiple packets. The driver assumes
  4915. * multiple packets.
  4916. */
  4917. weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
  4918. for (i = 0; i < weight; i++) {
  4919. __count_port_discards(ppd);
  4920. if (vl >= 0 && vl < TXE_NUM_DATA_VL)
  4921. incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
  4922. else if (vl == 15)
  4923. incr_cntr64(&ppd->port_xmit_discards_vl
  4924. [C_VL_15]);
  4925. }
  4926. }
  4927. }
  4928. /*
  4929. * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
  4930. * register. Does it represent a 'port inactive' error?
  4931. */
  4932. static inline int port_inactive_err(u64 posn)
  4933. {
  4934. return (posn >= SEES(TX_LINKDOWN) &&
  4935. posn <= SEES(TX_INCORRECT_LINK_STATE));
  4936. }
  4937. /*
  4938. * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
  4939. * register. Does it represent a 'disallowed packet' error?
  4940. */
  4941. static inline int disallowed_pkt_err(int posn)
  4942. {
  4943. return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
  4944. posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
  4945. }
  4946. /*
  4947. * Input value is a bit position of one of the SDMA engine disallowed
  4948. * packet errors. Return which engine. Use of this must be guarded by
  4949. * disallowed_pkt_err().
  4950. */
  4951. static inline int disallowed_pkt_engine(int posn)
  4952. {
  4953. return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
  4954. }
  4955. /*
  4956. * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
  4957. * be done.
  4958. */
  4959. static int engine_to_vl(struct hfi1_devdata *dd, int engine)
  4960. {
  4961. struct sdma_vl_map *m;
  4962. int vl;
  4963. /* range check */
  4964. if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
  4965. return -1;
  4966. rcu_read_lock();
  4967. m = rcu_dereference(dd->sdma_map);
  4968. vl = m->engine_to_vl[engine];
  4969. rcu_read_unlock();
  4970. return vl;
  4971. }
  4972. /*
  4973. * Translate the send context (sofware index) into a VL. Return -1 if the
  4974. * translation cannot be done.
  4975. */
  4976. static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
  4977. {
  4978. struct send_context_info *sci;
  4979. struct send_context *sc;
  4980. int i;
  4981. sci = &dd->send_contexts[sw_index];
  4982. /* there is no information for user (PSM) and ack contexts */
  4983. if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
  4984. return -1;
  4985. sc = sci->sc;
  4986. if (!sc)
  4987. return -1;
  4988. if (dd->vld[15].sc == sc)
  4989. return 15;
  4990. for (i = 0; i < num_vls; i++)
  4991. if (dd->vld[i].sc == sc)
  4992. return i;
  4993. return -1;
  4994. }
  4995. static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  4996. {
  4997. u64 reg_copy = reg, handled = 0;
  4998. char buf[96];
  4999. int i = 0;
  5000. if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
  5001. start_freeze_handling(dd->pport, 0);
  5002. else if (is_ax(dd) &&
  5003. (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
  5004. (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
  5005. start_freeze_handling(dd->pport, 0);
  5006. while (reg_copy) {
  5007. int posn = fls64(reg_copy);
  5008. /* fls64() returns a 1-based offset, we want it zero based */
  5009. int shift = posn - 1;
  5010. u64 mask = 1ULL << shift;
  5011. if (port_inactive_err(shift)) {
  5012. count_port_inactive(dd);
  5013. handled |= mask;
  5014. } else if (disallowed_pkt_err(shift)) {
  5015. int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
  5016. handle_send_egress_err_info(dd, vl);
  5017. handled |= mask;
  5018. }
  5019. reg_copy &= ~mask;
  5020. }
  5021. reg &= ~handled;
  5022. if (reg)
  5023. dd_dev_info(dd, "Egress Error: %s\n",
  5024. egress_err_status_string(buf, sizeof(buf), reg));
  5025. for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
  5026. if (reg & (1ull << i))
  5027. incr_cntr64(&dd->send_egress_err_status_cnt[i]);
  5028. }
  5029. }
  5030. static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  5031. {
  5032. char buf[96];
  5033. int i = 0;
  5034. dd_dev_info(dd, "Send Error: %s\n",
  5035. send_err_status_string(buf, sizeof(buf), reg));
  5036. for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
  5037. if (reg & (1ull << i))
  5038. incr_cntr64(&dd->send_err_status_cnt[i]);
  5039. }
  5040. }
  5041. /*
  5042. * The maximum number of times the error clear down will loop before
  5043. * blocking a repeating error. This value is arbitrary.
  5044. */
  5045. #define MAX_CLEAR_COUNT 20
  5046. /*
  5047. * Clear and handle an error register. All error interrupts are funneled
  5048. * through here to have a central location to correctly handle single-
  5049. * or multi-shot errors.
  5050. *
  5051. * For non per-context registers, call this routine with a context value
  5052. * of 0 so the per-context offset is zero.
  5053. *
  5054. * If the handler loops too many times, assume that something is wrong
  5055. * and can't be fixed, so mask the error bits.
  5056. */
  5057. static void interrupt_clear_down(struct hfi1_devdata *dd,
  5058. u32 context,
  5059. const struct err_reg_info *eri)
  5060. {
  5061. u64 reg;
  5062. u32 count;
  5063. /* read in a loop until no more errors are seen */
  5064. count = 0;
  5065. while (1) {
  5066. reg = read_kctxt_csr(dd, context, eri->status);
  5067. if (reg == 0)
  5068. break;
  5069. write_kctxt_csr(dd, context, eri->clear, reg);
  5070. if (likely(eri->handler))
  5071. eri->handler(dd, context, reg);
  5072. count++;
  5073. if (count > MAX_CLEAR_COUNT) {
  5074. u64 mask;
  5075. dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
  5076. eri->desc, reg);
  5077. /*
  5078. * Read-modify-write so any other masked bits
  5079. * remain masked.
  5080. */
  5081. mask = read_kctxt_csr(dd, context, eri->mask);
  5082. mask &= ~reg;
  5083. write_kctxt_csr(dd, context, eri->mask, mask);
  5084. break;
  5085. }
  5086. }
  5087. }
  5088. /*
  5089. * CCE block "misc" interrupt. Source is < 16.
  5090. */
  5091. static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
  5092. {
  5093. const struct err_reg_info *eri = &misc_errs[source];
  5094. if (eri->handler) {
  5095. interrupt_clear_down(dd, 0, eri);
  5096. } else {
  5097. dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
  5098. source);
  5099. }
  5100. }
  5101. static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
  5102. {
  5103. return flag_string(buf, buf_len, flags,
  5104. sc_err_status_flags,
  5105. ARRAY_SIZE(sc_err_status_flags));
  5106. }
  5107. /*
  5108. * Send context error interrupt. Source (hw_context) is < 160.
  5109. *
  5110. * All send context errors cause the send context to halt. The normal
  5111. * clear-down mechanism cannot be used because we cannot clear the
  5112. * error bits until several other long-running items are done first.
  5113. * This is OK because with the context halted, nothing else is going
  5114. * to happen on it anyway.
  5115. */
  5116. static void is_sendctxt_err_int(struct hfi1_devdata *dd,
  5117. unsigned int hw_context)
  5118. {
  5119. struct send_context_info *sci;
  5120. struct send_context *sc;
  5121. char flags[96];
  5122. u64 status;
  5123. u32 sw_index;
  5124. int i = 0;
  5125. sw_index = dd->hw_to_sw[hw_context];
  5126. if (sw_index >= dd->num_send_contexts) {
  5127. dd_dev_err(dd,
  5128. "out of range sw index %u for send context %u\n",
  5129. sw_index, hw_context);
  5130. return;
  5131. }
  5132. sci = &dd->send_contexts[sw_index];
  5133. sc = sci->sc;
  5134. if (!sc) {
  5135. dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
  5136. sw_index, hw_context);
  5137. return;
  5138. }
  5139. /* tell the software that a halt has begun */
  5140. sc_stop(sc, SCF_HALTED);
  5141. status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
  5142. dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
  5143. send_context_err_status_string(flags, sizeof(flags),
  5144. status));
  5145. if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
  5146. handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
  5147. /*
  5148. * Automatically restart halted kernel contexts out of interrupt
  5149. * context. User contexts must ask the driver to restart the context.
  5150. */
  5151. if (sc->type != SC_USER)
  5152. queue_work(dd->pport->hfi1_wq, &sc->halt_work);
  5153. /*
  5154. * Update the counters for the corresponding status bits.
  5155. * Note that these particular counters are aggregated over all
  5156. * 160 contexts.
  5157. */
  5158. for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
  5159. if (status & (1ull << i))
  5160. incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
  5161. }
  5162. }
  5163. static void handle_sdma_eng_err(struct hfi1_devdata *dd,
  5164. unsigned int source, u64 status)
  5165. {
  5166. struct sdma_engine *sde;
  5167. int i = 0;
  5168. sde = &dd->per_sdma[source];
  5169. #ifdef CONFIG_SDMA_VERBOSITY
  5170. dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
  5171. slashstrip(__FILE__), __LINE__, __func__);
  5172. dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
  5173. sde->this_idx, source, (unsigned long long)status);
  5174. #endif
  5175. sde->err_cnt++;
  5176. sdma_engine_error(sde, status);
  5177. /*
  5178. * Update the counters for the corresponding status bits.
  5179. * Note that these particular counters are aggregated over
  5180. * all 16 DMA engines.
  5181. */
  5182. for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
  5183. if (status & (1ull << i))
  5184. incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
  5185. }
  5186. }
  5187. /*
  5188. * CCE block SDMA error interrupt. Source is < 16.
  5189. */
  5190. static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
  5191. {
  5192. #ifdef CONFIG_SDMA_VERBOSITY
  5193. struct sdma_engine *sde = &dd->per_sdma[source];
  5194. dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
  5195. slashstrip(__FILE__), __LINE__, __func__);
  5196. dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
  5197. source);
  5198. sdma_dumpstate(sde);
  5199. #endif
  5200. interrupt_clear_down(dd, source, &sdma_eng_err);
  5201. }
  5202. /*
  5203. * CCE block "various" interrupt. Source is < 8.
  5204. */
  5205. static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
  5206. {
  5207. const struct err_reg_info *eri = &various_err[source];
  5208. /*
  5209. * TCritInt cannot go through interrupt_clear_down()
  5210. * because it is not a second tier interrupt. The handler
  5211. * should be called directly.
  5212. */
  5213. if (source == TCRIT_INT_SOURCE)
  5214. handle_temp_err(dd);
  5215. else if (eri->handler)
  5216. interrupt_clear_down(dd, 0, eri);
  5217. else
  5218. dd_dev_info(dd,
  5219. "%s: Unimplemented/reserved interrupt %d\n",
  5220. __func__, source);
  5221. }
  5222. static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
  5223. {
  5224. /* src_ctx is always zero */
  5225. struct hfi1_pportdata *ppd = dd->pport;
  5226. unsigned long flags;
  5227. u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
  5228. if (reg & QSFP_HFI0_MODPRST_N) {
  5229. if (!qsfp_mod_present(ppd)) {
  5230. dd_dev_info(dd, "%s: QSFP module removed\n",
  5231. __func__);
  5232. ppd->driver_link_ready = 0;
  5233. /*
  5234. * Cable removed, reset all our information about the
  5235. * cache and cable capabilities
  5236. */
  5237. spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
  5238. /*
  5239. * We don't set cache_refresh_required here as we expect
  5240. * an interrupt when a cable is inserted
  5241. */
  5242. ppd->qsfp_info.cache_valid = 0;
  5243. ppd->qsfp_info.reset_needed = 0;
  5244. ppd->qsfp_info.limiting_active = 0;
  5245. spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
  5246. flags);
  5247. /* Invert the ModPresent pin now to detect plug-in */
  5248. write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
  5249. ASIC_QSFP1_INVERT, qsfp_int_mgmt);
  5250. if ((ppd->offline_disabled_reason >
  5251. HFI1_ODR_MASK(
  5252. OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
  5253. (ppd->offline_disabled_reason ==
  5254. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
  5255. ppd->offline_disabled_reason =
  5256. HFI1_ODR_MASK(
  5257. OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
  5258. if (ppd->host_link_state == HLS_DN_POLL) {
  5259. /*
  5260. * The link is still in POLL. This means
  5261. * that the normal link down processing
  5262. * will not happen. We have to do it here
  5263. * before turning the DC off.
  5264. */
  5265. queue_work(ppd->hfi1_wq, &ppd->link_down_work);
  5266. }
  5267. } else {
  5268. dd_dev_info(dd, "%s: QSFP module inserted\n",
  5269. __func__);
  5270. spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
  5271. ppd->qsfp_info.cache_valid = 0;
  5272. ppd->qsfp_info.cache_refresh_required = 1;
  5273. spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
  5274. flags);
  5275. /*
  5276. * Stop inversion of ModPresent pin to detect
  5277. * removal of the cable
  5278. */
  5279. qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
  5280. write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
  5281. ASIC_QSFP1_INVERT, qsfp_int_mgmt);
  5282. ppd->offline_disabled_reason =
  5283. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
  5284. }
  5285. }
  5286. if (reg & QSFP_HFI0_INT_N) {
  5287. dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
  5288. __func__);
  5289. spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
  5290. ppd->qsfp_info.check_interrupt_flags = 1;
  5291. spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
  5292. }
  5293. /* Schedule the QSFP work only if there is a cable attached. */
  5294. if (qsfp_mod_present(ppd))
  5295. queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
  5296. }
  5297. static int request_host_lcb_access(struct hfi1_devdata *dd)
  5298. {
  5299. int ret;
  5300. ret = do_8051_command(dd, HCMD_MISC,
  5301. (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
  5302. LOAD_DATA_FIELD_ID_SHIFT, NULL);
  5303. if (ret != HCMD_SUCCESS) {
  5304. dd_dev_err(dd, "%s: command failed with error %d\n",
  5305. __func__, ret);
  5306. }
  5307. return ret == HCMD_SUCCESS ? 0 : -EBUSY;
  5308. }
  5309. static int request_8051_lcb_access(struct hfi1_devdata *dd)
  5310. {
  5311. int ret;
  5312. ret = do_8051_command(dd, HCMD_MISC,
  5313. (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
  5314. LOAD_DATA_FIELD_ID_SHIFT, NULL);
  5315. if (ret != HCMD_SUCCESS) {
  5316. dd_dev_err(dd, "%s: command failed with error %d\n",
  5317. __func__, ret);
  5318. }
  5319. return ret == HCMD_SUCCESS ? 0 : -EBUSY;
  5320. }
  5321. /*
  5322. * Set the LCB selector - allow host access. The DCC selector always
  5323. * points to the host.
  5324. */
  5325. static inline void set_host_lcb_access(struct hfi1_devdata *dd)
  5326. {
  5327. write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
  5328. DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
  5329. DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
  5330. }
  5331. /*
  5332. * Clear the LCB selector - allow 8051 access. The DCC selector always
  5333. * points to the host.
  5334. */
  5335. static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
  5336. {
  5337. write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
  5338. DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
  5339. }
  5340. /*
  5341. * Acquire LCB access from the 8051. If the host already has access,
  5342. * just increment a counter. Otherwise, inform the 8051 that the
  5343. * host is taking access.
  5344. *
  5345. * Returns:
  5346. * 0 on success
  5347. * -EBUSY if the 8051 has control and cannot be disturbed
  5348. * -errno if unable to acquire access from the 8051
  5349. */
  5350. int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
  5351. {
  5352. struct hfi1_pportdata *ppd = dd->pport;
  5353. int ret = 0;
  5354. /*
  5355. * Use the host link state lock so the operation of this routine
  5356. * { link state check, selector change, count increment } can occur
  5357. * as a unit against a link state change. Otherwise there is a
  5358. * race between the state change and the count increment.
  5359. */
  5360. if (sleep_ok) {
  5361. mutex_lock(&ppd->hls_lock);
  5362. } else {
  5363. while (!mutex_trylock(&ppd->hls_lock))
  5364. udelay(1);
  5365. }
  5366. /* this access is valid only when the link is up */
  5367. if (ppd->host_link_state & HLS_DOWN) {
  5368. dd_dev_info(dd, "%s: link state %s not up\n",
  5369. __func__, link_state_name(ppd->host_link_state));
  5370. ret = -EBUSY;
  5371. goto done;
  5372. }
  5373. if (dd->lcb_access_count == 0) {
  5374. ret = request_host_lcb_access(dd);
  5375. if (ret) {
  5376. dd_dev_err(dd,
  5377. "%s: unable to acquire LCB access, err %d\n",
  5378. __func__, ret);
  5379. goto done;
  5380. }
  5381. set_host_lcb_access(dd);
  5382. }
  5383. dd->lcb_access_count++;
  5384. done:
  5385. mutex_unlock(&ppd->hls_lock);
  5386. return ret;
  5387. }
  5388. /*
  5389. * Release LCB access by decrementing the use count. If the count is moving
  5390. * from 1 to 0, inform 8051 that it has control back.
  5391. *
  5392. * Returns:
  5393. * 0 on success
  5394. * -errno if unable to release access to the 8051
  5395. */
  5396. int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
  5397. {
  5398. int ret = 0;
  5399. /*
  5400. * Use the host link state lock because the acquire needed it.
  5401. * Here, we only need to keep { selector change, count decrement }
  5402. * as a unit.
  5403. */
  5404. if (sleep_ok) {
  5405. mutex_lock(&dd->pport->hls_lock);
  5406. } else {
  5407. while (!mutex_trylock(&dd->pport->hls_lock))
  5408. udelay(1);
  5409. }
  5410. if (dd->lcb_access_count == 0) {
  5411. dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
  5412. __func__);
  5413. goto done;
  5414. }
  5415. if (dd->lcb_access_count == 1) {
  5416. set_8051_lcb_access(dd);
  5417. ret = request_8051_lcb_access(dd);
  5418. if (ret) {
  5419. dd_dev_err(dd,
  5420. "%s: unable to release LCB access, err %d\n",
  5421. __func__, ret);
  5422. /* restore host access if the grant didn't work */
  5423. set_host_lcb_access(dd);
  5424. goto done;
  5425. }
  5426. }
  5427. dd->lcb_access_count--;
  5428. done:
  5429. mutex_unlock(&dd->pport->hls_lock);
  5430. return ret;
  5431. }
  5432. /*
  5433. * Initialize LCB access variables and state. Called during driver load,
  5434. * after most of the initialization is finished.
  5435. *
  5436. * The DC default is LCB access on for the host. The driver defaults to
  5437. * leaving access to the 8051. Assign access now - this constrains the call
  5438. * to this routine to be after all LCB set-up is done. In particular, after
  5439. * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
  5440. */
  5441. static void init_lcb_access(struct hfi1_devdata *dd)
  5442. {
  5443. dd->lcb_access_count = 0;
  5444. }
  5445. /*
  5446. * Write a response back to a 8051 request.
  5447. */
  5448. static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
  5449. {
  5450. write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
  5451. DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
  5452. (u64)return_code <<
  5453. DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
  5454. (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
  5455. }
  5456. /*
  5457. * Handle host requests from the 8051.
  5458. */
  5459. static void handle_8051_request(struct hfi1_pportdata *ppd)
  5460. {
  5461. struct hfi1_devdata *dd = ppd->dd;
  5462. u64 reg;
  5463. u16 data = 0;
  5464. u8 type;
  5465. reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
  5466. if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
  5467. return; /* no request */
  5468. /* zero out COMPLETED so the response is seen */
  5469. write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
  5470. /* extract request details */
  5471. type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
  5472. & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
  5473. data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
  5474. & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
  5475. switch (type) {
  5476. case HREQ_LOAD_CONFIG:
  5477. case HREQ_SAVE_CONFIG:
  5478. case HREQ_READ_CONFIG:
  5479. case HREQ_SET_TX_EQ_ABS:
  5480. case HREQ_SET_TX_EQ_REL:
  5481. case HREQ_ENABLE:
  5482. dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
  5483. type);
  5484. hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
  5485. break;
  5486. case HREQ_CONFIG_DONE:
  5487. hreq_response(dd, HREQ_SUCCESS, 0);
  5488. break;
  5489. case HREQ_INTERFACE_TEST:
  5490. hreq_response(dd, HREQ_SUCCESS, data);
  5491. break;
  5492. default:
  5493. dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
  5494. hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
  5495. break;
  5496. }
  5497. }
  5498. /*
  5499. * Set up allocation unit vaulue.
  5500. */
  5501. void set_up_vau(struct hfi1_devdata *dd, u8 vau)
  5502. {
  5503. u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
  5504. /* do not modify other values in the register */
  5505. reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
  5506. reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
  5507. write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
  5508. }
  5509. /*
  5510. * Set up initial VL15 credits of the remote. Assumes the rest of
  5511. * the CM credit registers are zero from a previous global or credit reset.
  5512. * Shared limit for VL15 will always be 0.
  5513. */
  5514. void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
  5515. {
  5516. u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
  5517. /* set initial values for total and shared credit limit */
  5518. reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
  5519. SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
  5520. /*
  5521. * Set total limit to be equal to VL15 credits.
  5522. * Leave shared limit at 0.
  5523. */
  5524. reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
  5525. write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
  5526. write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
  5527. << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
  5528. }
  5529. /*
  5530. * Zero all credit details from the previous connection and
  5531. * reset the CM manager's internal counters.
  5532. */
  5533. void reset_link_credits(struct hfi1_devdata *dd)
  5534. {
  5535. int i;
  5536. /* remove all previous VL credit limits */
  5537. for (i = 0; i < TXE_NUM_DATA_VL; i++)
  5538. write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
  5539. write_csr(dd, SEND_CM_CREDIT_VL15, 0);
  5540. write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
  5541. /* reset the CM block */
  5542. pio_send_control(dd, PSC_CM_RESET);
  5543. /* reset cached value */
  5544. dd->vl15buf_cached = 0;
  5545. }
  5546. /* convert a vCU to a CU */
  5547. static u32 vcu_to_cu(u8 vcu)
  5548. {
  5549. return 1 << vcu;
  5550. }
  5551. /* convert a CU to a vCU */
  5552. static u8 cu_to_vcu(u32 cu)
  5553. {
  5554. return ilog2(cu);
  5555. }
  5556. /* convert a vAU to an AU */
  5557. static u32 vau_to_au(u8 vau)
  5558. {
  5559. return 8 * (1 << vau);
  5560. }
  5561. static void set_linkup_defaults(struct hfi1_pportdata *ppd)
  5562. {
  5563. ppd->sm_trap_qp = 0x0;
  5564. ppd->sa_qp = 0x1;
  5565. }
  5566. /*
  5567. * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
  5568. */
  5569. static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
  5570. {
  5571. u64 reg;
  5572. /* clear lcb run: LCB_CFG_RUN.EN = 0 */
  5573. write_csr(dd, DC_LCB_CFG_RUN, 0);
  5574. /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
  5575. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
  5576. 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
  5577. /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
  5578. dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
  5579. reg = read_csr(dd, DCC_CFG_RESET);
  5580. write_csr(dd, DCC_CFG_RESET, reg |
  5581. (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
  5582. (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
  5583. (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
  5584. if (!abort) {
  5585. udelay(1); /* must hold for the longer of 16cclks or 20ns */
  5586. write_csr(dd, DCC_CFG_RESET, reg);
  5587. write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
  5588. }
  5589. }
  5590. /*
  5591. * This routine should be called after the link has been transitioned to
  5592. * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
  5593. * reset).
  5594. *
  5595. * The expectation is that the caller of this routine would have taken
  5596. * care of properly transitioning the link into the correct state.
  5597. * NOTE: the caller needs to acquire the dd->dc8051_lock lock
  5598. * before calling this function.
  5599. */
  5600. static void _dc_shutdown(struct hfi1_devdata *dd)
  5601. {
  5602. lockdep_assert_held(&dd->dc8051_lock);
  5603. if (dd->dc_shutdown)
  5604. return;
  5605. dd->dc_shutdown = 1;
  5606. /* Shutdown the LCB */
  5607. lcb_shutdown(dd, 1);
  5608. /*
  5609. * Going to OFFLINE would have causes the 8051 to put the
  5610. * SerDes into reset already. Just need to shut down the 8051,
  5611. * itself.
  5612. */
  5613. write_csr(dd, DC_DC8051_CFG_RST, 0x1);
  5614. }
  5615. static void dc_shutdown(struct hfi1_devdata *dd)
  5616. {
  5617. mutex_lock(&dd->dc8051_lock);
  5618. _dc_shutdown(dd);
  5619. mutex_unlock(&dd->dc8051_lock);
  5620. }
  5621. /*
  5622. * Calling this after the DC has been brought out of reset should not
  5623. * do any damage.
  5624. * NOTE: the caller needs to acquire the dd->dc8051_lock lock
  5625. * before calling this function.
  5626. */
  5627. static void _dc_start(struct hfi1_devdata *dd)
  5628. {
  5629. lockdep_assert_held(&dd->dc8051_lock);
  5630. if (!dd->dc_shutdown)
  5631. return;
  5632. /* Take the 8051 out of reset */
  5633. write_csr(dd, DC_DC8051_CFG_RST, 0ull);
  5634. /* Wait until 8051 is ready */
  5635. if (wait_fm_ready(dd, TIMEOUT_8051_START))
  5636. dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
  5637. __func__);
  5638. /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
  5639. write_csr(dd, DCC_CFG_RESET, 0x10);
  5640. /* lcb_shutdown() with abort=1 does not restore these */
  5641. write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
  5642. dd->dc_shutdown = 0;
  5643. }
  5644. static void dc_start(struct hfi1_devdata *dd)
  5645. {
  5646. mutex_lock(&dd->dc8051_lock);
  5647. _dc_start(dd);
  5648. mutex_unlock(&dd->dc8051_lock);
  5649. }
  5650. /*
  5651. * These LCB adjustments are for the Aurora SerDes core in the FPGA.
  5652. */
  5653. static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
  5654. {
  5655. u64 rx_radr, tx_radr;
  5656. u32 version;
  5657. if (dd->icode != ICODE_FPGA_EMULATION)
  5658. return;
  5659. /*
  5660. * These LCB defaults on emulator _s are good, nothing to do here:
  5661. * LCB_CFG_TX_FIFOS_RADR
  5662. * LCB_CFG_RX_FIFOS_RADR
  5663. * LCB_CFG_LN_DCLK
  5664. * LCB_CFG_IGNORE_LOST_RCLK
  5665. */
  5666. if (is_emulator_s(dd))
  5667. return;
  5668. /* else this is _p */
  5669. version = emulator_rev(dd);
  5670. if (!is_ax(dd))
  5671. version = 0x2d; /* all B0 use 0x2d or higher settings */
  5672. if (version <= 0x12) {
  5673. /* release 0x12 and below */
  5674. /*
  5675. * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
  5676. * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
  5677. * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
  5678. */
  5679. rx_radr =
  5680. 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
  5681. | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
  5682. | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
  5683. /*
  5684. * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
  5685. * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
  5686. */
  5687. tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
  5688. } else if (version <= 0x18) {
  5689. /* release 0x13 up to 0x18 */
  5690. /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
  5691. rx_radr =
  5692. 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
  5693. | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
  5694. | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
  5695. tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
  5696. } else if (version == 0x19) {
  5697. /* release 0x19 */
  5698. /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
  5699. rx_radr =
  5700. 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
  5701. | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
  5702. | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
  5703. tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
  5704. } else if (version == 0x1a) {
  5705. /* release 0x1a */
  5706. /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
  5707. rx_radr =
  5708. 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
  5709. | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
  5710. | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
  5711. tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
  5712. write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
  5713. } else {
  5714. /* release 0x1b and higher */
  5715. /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
  5716. rx_radr =
  5717. 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
  5718. | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
  5719. | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
  5720. tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
  5721. }
  5722. write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
  5723. /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
  5724. write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
  5725. DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
  5726. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
  5727. }
  5728. /*
  5729. * Handle a SMA idle message
  5730. *
  5731. * This is a work-queue function outside of the interrupt.
  5732. */
  5733. void handle_sma_message(struct work_struct *work)
  5734. {
  5735. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  5736. sma_message_work);
  5737. struct hfi1_devdata *dd = ppd->dd;
  5738. u64 msg;
  5739. int ret;
  5740. /*
  5741. * msg is bytes 1-4 of the 40-bit idle message - the command code
  5742. * is stripped off
  5743. */
  5744. ret = read_idle_sma(dd, &msg);
  5745. if (ret)
  5746. return;
  5747. dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
  5748. /*
  5749. * React to the SMA message. Byte[1] (0 for us) is the command.
  5750. */
  5751. switch (msg & 0xff) {
  5752. case SMA_IDLE_ARM:
  5753. /*
  5754. * See OPAv1 table 9-14 - HFI and External Switch Ports Key
  5755. * State Transitions
  5756. *
  5757. * Only expected in INIT or ARMED, discard otherwise.
  5758. */
  5759. if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
  5760. ppd->neighbor_normal = 1;
  5761. break;
  5762. case SMA_IDLE_ACTIVE:
  5763. /*
  5764. * See OPAv1 table 9-14 - HFI and External Switch Ports Key
  5765. * State Transitions
  5766. *
  5767. * Can activate the node. Discard otherwise.
  5768. */
  5769. if (ppd->host_link_state == HLS_UP_ARMED &&
  5770. ppd->is_active_optimize_enabled) {
  5771. ppd->neighbor_normal = 1;
  5772. ret = set_link_state(ppd, HLS_UP_ACTIVE);
  5773. if (ret)
  5774. dd_dev_err(
  5775. dd,
  5776. "%s: received Active SMA idle message, couldn't set link to Active\n",
  5777. __func__);
  5778. }
  5779. break;
  5780. default:
  5781. dd_dev_err(dd,
  5782. "%s: received unexpected SMA idle message 0x%llx\n",
  5783. __func__, msg);
  5784. break;
  5785. }
  5786. }
  5787. static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
  5788. {
  5789. u64 rcvctrl;
  5790. unsigned long flags;
  5791. spin_lock_irqsave(&dd->rcvctrl_lock, flags);
  5792. rcvctrl = read_csr(dd, RCV_CTRL);
  5793. rcvctrl |= add;
  5794. rcvctrl &= ~clear;
  5795. write_csr(dd, RCV_CTRL, rcvctrl);
  5796. spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
  5797. }
  5798. static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
  5799. {
  5800. adjust_rcvctrl(dd, add, 0);
  5801. }
  5802. static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
  5803. {
  5804. adjust_rcvctrl(dd, 0, clear);
  5805. }
  5806. /*
  5807. * Called from all interrupt handlers to start handling an SPC freeze.
  5808. */
  5809. void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
  5810. {
  5811. struct hfi1_devdata *dd = ppd->dd;
  5812. struct send_context *sc;
  5813. int i;
  5814. if (flags & FREEZE_SELF)
  5815. write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
  5816. /* enter frozen mode */
  5817. dd->flags |= HFI1_FROZEN;
  5818. /* notify all SDMA engines that they are going into a freeze */
  5819. sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
  5820. /* do halt pre-handling on all enabled send contexts */
  5821. for (i = 0; i < dd->num_send_contexts; i++) {
  5822. sc = dd->send_contexts[i].sc;
  5823. if (sc && (sc->flags & SCF_ENABLED))
  5824. sc_stop(sc, SCF_FROZEN | SCF_HALTED);
  5825. }
  5826. /* Send context are frozen. Notify user space */
  5827. hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
  5828. if (flags & FREEZE_ABORT) {
  5829. dd_dev_err(dd,
  5830. "Aborted freeze recovery. Please REBOOT system\n");
  5831. return;
  5832. }
  5833. /* queue non-interrupt handler */
  5834. queue_work(ppd->hfi1_wq, &ppd->freeze_work);
  5835. }
  5836. /*
  5837. * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
  5838. * depending on the "freeze" parameter.
  5839. *
  5840. * No need to return an error if it times out, our only option
  5841. * is to proceed anyway.
  5842. */
  5843. static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
  5844. {
  5845. unsigned long timeout;
  5846. u64 reg;
  5847. timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
  5848. while (1) {
  5849. reg = read_csr(dd, CCE_STATUS);
  5850. if (freeze) {
  5851. /* waiting until all indicators are set */
  5852. if ((reg & ALL_FROZE) == ALL_FROZE)
  5853. return; /* all done */
  5854. } else {
  5855. /* waiting until all indicators are clear */
  5856. if ((reg & ALL_FROZE) == 0)
  5857. return; /* all done */
  5858. }
  5859. if (time_after(jiffies, timeout)) {
  5860. dd_dev_err(dd,
  5861. "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
  5862. freeze ? "" : "un", reg & ALL_FROZE,
  5863. freeze ? ALL_FROZE : 0ull);
  5864. return;
  5865. }
  5866. usleep_range(80, 120);
  5867. }
  5868. }
  5869. /*
  5870. * Do all freeze handling for the RXE block.
  5871. */
  5872. static void rxe_freeze(struct hfi1_devdata *dd)
  5873. {
  5874. int i;
  5875. /* disable port */
  5876. clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
  5877. /* disable all receive contexts */
  5878. for (i = 0; i < dd->num_rcv_contexts; i++)
  5879. hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
  5880. }
  5881. /*
  5882. * Unfreeze handling for the RXE block - kernel contexts only.
  5883. * This will also enable the port. User contexts will do unfreeze
  5884. * handling on a per-context basis as they call into the driver.
  5885. *
  5886. */
  5887. static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
  5888. {
  5889. u32 rcvmask;
  5890. int i;
  5891. /* enable all kernel contexts */
  5892. for (i = 0; i < dd->num_rcv_contexts; i++) {
  5893. struct hfi1_ctxtdata *rcd = dd->rcd[i];
  5894. /* Ensure all non-user contexts(including vnic) are enabled */
  5895. if (!rcd || !rcd->sc || (rcd->sc->type == SC_USER))
  5896. continue;
  5897. rcvmask = HFI1_RCVCTRL_CTXT_ENB;
  5898. /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
  5899. rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
  5900. HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
  5901. hfi1_rcvctrl(dd, rcvmask, i);
  5902. }
  5903. /* enable port */
  5904. add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
  5905. }
  5906. /*
  5907. * Non-interrupt SPC freeze handling.
  5908. *
  5909. * This is a work-queue function outside of the triggering interrupt.
  5910. */
  5911. void handle_freeze(struct work_struct *work)
  5912. {
  5913. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  5914. freeze_work);
  5915. struct hfi1_devdata *dd = ppd->dd;
  5916. /* wait for freeze indicators on all affected blocks */
  5917. wait_for_freeze_status(dd, 1);
  5918. /* SPC is now frozen */
  5919. /* do send PIO freeze steps */
  5920. pio_freeze(dd);
  5921. /* do send DMA freeze steps */
  5922. sdma_freeze(dd);
  5923. /* do send egress freeze steps - nothing to do */
  5924. /* do receive freeze steps */
  5925. rxe_freeze(dd);
  5926. /*
  5927. * Unfreeze the hardware - clear the freeze, wait for each
  5928. * block's frozen bit to clear, then clear the frozen flag.
  5929. */
  5930. write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
  5931. wait_for_freeze_status(dd, 0);
  5932. if (is_ax(dd)) {
  5933. write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
  5934. wait_for_freeze_status(dd, 1);
  5935. write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
  5936. wait_for_freeze_status(dd, 0);
  5937. }
  5938. /* do send PIO unfreeze steps for kernel contexts */
  5939. pio_kernel_unfreeze(dd);
  5940. /* do send DMA unfreeze steps */
  5941. sdma_unfreeze(dd);
  5942. /* do send egress unfreeze steps - nothing to do */
  5943. /* do receive unfreeze steps for kernel contexts */
  5944. rxe_kernel_unfreeze(dd);
  5945. /*
  5946. * The unfreeze procedure touches global device registers when
  5947. * it disables and re-enables RXE. Mark the device unfrozen
  5948. * after all that is done so other parts of the driver waiting
  5949. * for the device to unfreeze don't do things out of order.
  5950. *
  5951. * The above implies that the meaning of HFI1_FROZEN flag is
  5952. * "Device has gone into freeze mode and freeze mode handling
  5953. * is still in progress."
  5954. *
  5955. * The flag will be removed when freeze mode processing has
  5956. * completed.
  5957. */
  5958. dd->flags &= ~HFI1_FROZEN;
  5959. wake_up(&dd->event_queue);
  5960. /* no longer frozen */
  5961. }
  5962. /*
  5963. * Handle a link up interrupt from the 8051.
  5964. *
  5965. * This is a work-queue function outside of the interrupt.
  5966. */
  5967. void handle_link_up(struct work_struct *work)
  5968. {
  5969. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  5970. link_up_work);
  5971. struct hfi1_devdata *dd = ppd->dd;
  5972. set_link_state(ppd, HLS_UP_INIT);
  5973. /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
  5974. read_ltp_rtt(dd);
  5975. /*
  5976. * OPA specifies that certain counters are cleared on a transition
  5977. * to link up, so do that.
  5978. */
  5979. clear_linkup_counters(dd);
  5980. /*
  5981. * And (re)set link up default values.
  5982. */
  5983. set_linkup_defaults(ppd);
  5984. /*
  5985. * Set VL15 credits. Use cached value from verify cap interrupt.
  5986. * In case of quick linkup or simulator, vl15 value will be set by
  5987. * handle_linkup_change. VerifyCap interrupt handler will not be
  5988. * called in those scenarios.
  5989. */
  5990. if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
  5991. set_up_vl15(dd, dd->vl15buf_cached);
  5992. /* enforce link speed enabled */
  5993. if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
  5994. /* oops - current speed is not enabled, bounce */
  5995. dd_dev_err(dd,
  5996. "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
  5997. ppd->link_speed_active, ppd->link_speed_enabled);
  5998. set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
  5999. OPA_LINKDOWN_REASON_SPEED_POLICY);
  6000. set_link_state(ppd, HLS_DN_OFFLINE);
  6001. start_link(ppd);
  6002. }
  6003. }
  6004. /*
  6005. * Several pieces of LNI information were cached for SMA in ppd.
  6006. * Reset these on link down
  6007. */
  6008. static void reset_neighbor_info(struct hfi1_pportdata *ppd)
  6009. {
  6010. ppd->neighbor_guid = 0;
  6011. ppd->neighbor_port_number = 0;
  6012. ppd->neighbor_type = 0;
  6013. ppd->neighbor_fm_security = 0;
  6014. }
  6015. static const char * const link_down_reason_strs[] = {
  6016. [OPA_LINKDOWN_REASON_NONE] = "None",
  6017. [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Recive error 0",
  6018. [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
  6019. [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
  6020. [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
  6021. [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
  6022. [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
  6023. [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
  6024. [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
  6025. [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
  6026. [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
  6027. [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
  6028. [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
  6029. [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
  6030. [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
  6031. [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
  6032. [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
  6033. [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
  6034. [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
  6035. [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
  6036. [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
  6037. [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
  6038. [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
  6039. [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
  6040. [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
  6041. [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
  6042. [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
  6043. [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
  6044. [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
  6045. [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
  6046. [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
  6047. [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
  6048. [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
  6049. "Excessive buffer overrun",
  6050. [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
  6051. [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
  6052. [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
  6053. [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
  6054. [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
  6055. [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
  6056. [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
  6057. [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
  6058. "Local media not installed",
  6059. [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
  6060. [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
  6061. [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
  6062. "End to end not installed",
  6063. [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
  6064. [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
  6065. [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
  6066. [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
  6067. [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
  6068. [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
  6069. };
  6070. /* return the neighbor link down reason string */
  6071. static const char *link_down_reason_str(u8 reason)
  6072. {
  6073. const char *str = NULL;
  6074. if (reason < ARRAY_SIZE(link_down_reason_strs))
  6075. str = link_down_reason_strs[reason];
  6076. if (!str)
  6077. str = "(invalid)";
  6078. return str;
  6079. }
  6080. /*
  6081. * Handle a link down interrupt from the 8051.
  6082. *
  6083. * This is a work-queue function outside of the interrupt.
  6084. */
  6085. void handle_link_down(struct work_struct *work)
  6086. {
  6087. u8 lcl_reason, neigh_reason = 0;
  6088. u8 link_down_reason;
  6089. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  6090. link_down_work);
  6091. int was_up;
  6092. static const char ldr_str[] = "Link down reason: ";
  6093. if ((ppd->host_link_state &
  6094. (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
  6095. ppd->port_type == PORT_TYPE_FIXED)
  6096. ppd->offline_disabled_reason =
  6097. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
  6098. /* Go offline first, then deal with reading/writing through 8051 */
  6099. was_up = !!(ppd->host_link_state & HLS_UP);
  6100. set_link_state(ppd, HLS_DN_OFFLINE);
  6101. if (was_up) {
  6102. lcl_reason = 0;
  6103. /* link down reason is only valid if the link was up */
  6104. read_link_down_reason(ppd->dd, &link_down_reason);
  6105. switch (link_down_reason) {
  6106. case LDR_LINK_TRANSFER_ACTIVE_LOW:
  6107. /* the link went down, no idle message reason */
  6108. dd_dev_info(ppd->dd, "%sUnexpected link down\n",
  6109. ldr_str);
  6110. break;
  6111. case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
  6112. /*
  6113. * The neighbor reason is only valid if an idle message
  6114. * was received for it.
  6115. */
  6116. read_planned_down_reason_code(ppd->dd, &neigh_reason);
  6117. dd_dev_info(ppd->dd,
  6118. "%sNeighbor link down message %d, %s\n",
  6119. ldr_str, neigh_reason,
  6120. link_down_reason_str(neigh_reason));
  6121. break;
  6122. case LDR_RECEIVED_HOST_OFFLINE_REQ:
  6123. dd_dev_info(ppd->dd,
  6124. "%sHost requested link to go offline\n",
  6125. ldr_str);
  6126. break;
  6127. default:
  6128. dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
  6129. ldr_str, link_down_reason);
  6130. break;
  6131. }
  6132. /*
  6133. * If no reason, assume peer-initiated but missed
  6134. * LinkGoingDown idle flits.
  6135. */
  6136. if (neigh_reason == 0)
  6137. lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
  6138. } else {
  6139. /* went down while polling or going up */
  6140. lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
  6141. }
  6142. set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
  6143. /* inform the SMA when the link transitions from up to down */
  6144. if (was_up && ppd->local_link_down_reason.sma == 0 &&
  6145. ppd->neigh_link_down_reason.sma == 0) {
  6146. ppd->local_link_down_reason.sma =
  6147. ppd->local_link_down_reason.latest;
  6148. ppd->neigh_link_down_reason.sma =
  6149. ppd->neigh_link_down_reason.latest;
  6150. }
  6151. reset_neighbor_info(ppd);
  6152. /* disable the port */
  6153. clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
  6154. /*
  6155. * If there is no cable attached, turn the DC off. Otherwise,
  6156. * start the link bring up.
  6157. */
  6158. if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
  6159. dc_shutdown(ppd->dd);
  6160. else
  6161. start_link(ppd);
  6162. }
  6163. void handle_link_bounce(struct work_struct *work)
  6164. {
  6165. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  6166. link_bounce_work);
  6167. /*
  6168. * Only do something if the link is currently up.
  6169. */
  6170. if (ppd->host_link_state & HLS_UP) {
  6171. set_link_state(ppd, HLS_DN_OFFLINE);
  6172. start_link(ppd);
  6173. } else {
  6174. dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
  6175. __func__, link_state_name(ppd->host_link_state));
  6176. }
  6177. }
  6178. /*
  6179. * Mask conversion: Capability exchange to Port LTP. The capability
  6180. * exchange has an implicit 16b CRC that is mandatory.
  6181. */
  6182. static int cap_to_port_ltp(int cap)
  6183. {
  6184. int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
  6185. if (cap & CAP_CRC_14B)
  6186. port_ltp |= PORT_LTP_CRC_MODE_14;
  6187. if (cap & CAP_CRC_48B)
  6188. port_ltp |= PORT_LTP_CRC_MODE_48;
  6189. if (cap & CAP_CRC_12B_16B_PER_LANE)
  6190. port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
  6191. return port_ltp;
  6192. }
  6193. /*
  6194. * Convert an OPA Port LTP mask to capability mask
  6195. */
  6196. int port_ltp_to_cap(int port_ltp)
  6197. {
  6198. int cap_mask = 0;
  6199. if (port_ltp & PORT_LTP_CRC_MODE_14)
  6200. cap_mask |= CAP_CRC_14B;
  6201. if (port_ltp & PORT_LTP_CRC_MODE_48)
  6202. cap_mask |= CAP_CRC_48B;
  6203. if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
  6204. cap_mask |= CAP_CRC_12B_16B_PER_LANE;
  6205. return cap_mask;
  6206. }
  6207. /*
  6208. * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
  6209. */
  6210. static int lcb_to_port_ltp(int lcb_crc)
  6211. {
  6212. int port_ltp = 0;
  6213. if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
  6214. port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
  6215. else if (lcb_crc == LCB_CRC_48B)
  6216. port_ltp = PORT_LTP_CRC_MODE_48;
  6217. else if (lcb_crc == LCB_CRC_14B)
  6218. port_ltp = PORT_LTP_CRC_MODE_14;
  6219. else
  6220. port_ltp = PORT_LTP_CRC_MODE_16;
  6221. return port_ltp;
  6222. }
  6223. /*
  6224. * Our neighbor has indicated that we are allowed to act as a fabric
  6225. * manager, so place the full management partition key in the second
  6226. * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
  6227. * that we should already have the limited management partition key in
  6228. * array element 1, and also that the port is not yet up when
  6229. * add_full_mgmt_pkey() is invoked.
  6230. */
  6231. static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
  6232. {
  6233. struct hfi1_devdata *dd = ppd->dd;
  6234. /* Sanity check - ppd->pkeys[2] should be 0, or already initialized */
  6235. if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
  6236. dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
  6237. __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
  6238. ppd->pkeys[2] = FULL_MGMT_P_KEY;
  6239. (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
  6240. hfi1_event_pkey_change(ppd->dd, ppd->port);
  6241. }
  6242. static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
  6243. {
  6244. if (ppd->pkeys[2] != 0) {
  6245. ppd->pkeys[2] = 0;
  6246. (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
  6247. hfi1_event_pkey_change(ppd->dd, ppd->port);
  6248. }
  6249. }
  6250. /*
  6251. * Convert the given link width to the OPA link width bitmask.
  6252. */
  6253. static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
  6254. {
  6255. switch (width) {
  6256. case 0:
  6257. /*
  6258. * Simulator and quick linkup do not set the width.
  6259. * Just set it to 4x without complaint.
  6260. */
  6261. if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
  6262. return OPA_LINK_WIDTH_4X;
  6263. return 0; /* no lanes up */
  6264. case 1: return OPA_LINK_WIDTH_1X;
  6265. case 2: return OPA_LINK_WIDTH_2X;
  6266. case 3: return OPA_LINK_WIDTH_3X;
  6267. default:
  6268. dd_dev_info(dd, "%s: invalid width %d, using 4\n",
  6269. __func__, width);
  6270. /* fall through */
  6271. case 4: return OPA_LINK_WIDTH_4X;
  6272. }
  6273. }
  6274. /*
  6275. * Do a population count on the bottom nibble.
  6276. */
  6277. static const u8 bit_counts[16] = {
  6278. 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
  6279. };
  6280. static inline u8 nibble_to_count(u8 nibble)
  6281. {
  6282. return bit_counts[nibble & 0xf];
  6283. }
  6284. /*
  6285. * Read the active lane information from the 8051 registers and return
  6286. * their widths.
  6287. *
  6288. * Active lane information is found in these 8051 registers:
  6289. * enable_lane_tx
  6290. * enable_lane_rx
  6291. */
  6292. static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
  6293. u16 *rx_width)
  6294. {
  6295. u16 tx, rx;
  6296. u8 enable_lane_rx;
  6297. u8 enable_lane_tx;
  6298. u8 tx_polarity_inversion;
  6299. u8 rx_polarity_inversion;
  6300. u8 max_rate;
  6301. /* read the active lanes */
  6302. read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
  6303. &rx_polarity_inversion, &max_rate);
  6304. read_local_lni(dd, &enable_lane_rx);
  6305. /* convert to counts */
  6306. tx = nibble_to_count(enable_lane_tx);
  6307. rx = nibble_to_count(enable_lane_rx);
  6308. /*
  6309. * Set link_speed_active here, overriding what was set in
  6310. * handle_verify_cap(). The ASIC 8051 firmware does not correctly
  6311. * set the max_rate field in handle_verify_cap until v0.19.
  6312. */
  6313. if ((dd->icode == ICODE_RTL_SILICON) &&
  6314. (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
  6315. /* max_rate: 0 = 12.5G, 1 = 25G */
  6316. switch (max_rate) {
  6317. case 0:
  6318. dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
  6319. break;
  6320. default:
  6321. dd_dev_err(dd,
  6322. "%s: unexpected max rate %d, using 25Gb\n",
  6323. __func__, (int)max_rate);
  6324. /* fall through */
  6325. case 1:
  6326. dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
  6327. break;
  6328. }
  6329. }
  6330. dd_dev_info(dd,
  6331. "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
  6332. enable_lane_tx, tx, enable_lane_rx, rx);
  6333. *tx_width = link_width_to_bits(dd, tx);
  6334. *rx_width = link_width_to_bits(dd, rx);
  6335. }
  6336. /*
  6337. * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
  6338. * Valid after the end of VerifyCap and during LinkUp. Does not change
  6339. * after link up. I.e. look elsewhere for downgrade information.
  6340. *
  6341. * Bits are:
  6342. * + bits [7:4] contain the number of active transmitters
  6343. * + bits [3:0] contain the number of active receivers
  6344. * These are numbers 1 through 4 and can be different values if the
  6345. * link is asymmetric.
  6346. *
  6347. * verify_cap_local_fm_link_width[0] retains its original value.
  6348. */
  6349. static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
  6350. u16 *rx_width)
  6351. {
  6352. u16 widths, tx, rx;
  6353. u8 misc_bits, local_flags;
  6354. u16 active_tx, active_rx;
  6355. read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
  6356. tx = widths >> 12;
  6357. rx = (widths >> 8) & 0xf;
  6358. *tx_width = link_width_to_bits(dd, tx);
  6359. *rx_width = link_width_to_bits(dd, rx);
  6360. /* print the active widths */
  6361. get_link_widths(dd, &active_tx, &active_rx);
  6362. }
  6363. /*
  6364. * Set ppd->link_width_active and ppd->link_width_downgrade_active using
  6365. * hardware information when the link first comes up.
  6366. *
  6367. * The link width is not available until after VerifyCap.AllFramesReceived
  6368. * (the trigger for handle_verify_cap), so this is outside that routine
  6369. * and should be called when the 8051 signals linkup.
  6370. */
  6371. void get_linkup_link_widths(struct hfi1_pportdata *ppd)
  6372. {
  6373. u16 tx_width, rx_width;
  6374. /* get end-of-LNI link widths */
  6375. get_linkup_widths(ppd->dd, &tx_width, &rx_width);
  6376. /* use tx_width as the link is supposed to be symmetric on link up */
  6377. ppd->link_width_active = tx_width;
  6378. /* link width downgrade active (LWD.A) starts out matching LW.A */
  6379. ppd->link_width_downgrade_tx_active = ppd->link_width_active;
  6380. ppd->link_width_downgrade_rx_active = ppd->link_width_active;
  6381. /* per OPA spec, on link up LWD.E resets to LWD.S */
  6382. ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
  6383. /* cache the active egress rate (units {10^6 bits/sec]) */
  6384. ppd->current_egress_rate = active_egress_rate(ppd);
  6385. }
  6386. /*
  6387. * Handle a verify capabilities interrupt from the 8051.
  6388. *
  6389. * This is a work-queue function outside of the interrupt.
  6390. */
  6391. void handle_verify_cap(struct work_struct *work)
  6392. {
  6393. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  6394. link_vc_work);
  6395. struct hfi1_devdata *dd = ppd->dd;
  6396. u64 reg;
  6397. u8 power_management;
  6398. u8 continious;
  6399. u8 vcu;
  6400. u8 vau;
  6401. u8 z;
  6402. u16 vl15buf;
  6403. u16 link_widths;
  6404. u16 crc_mask;
  6405. u16 crc_val;
  6406. u16 device_id;
  6407. u16 active_tx, active_rx;
  6408. u8 partner_supported_crc;
  6409. u8 remote_tx_rate;
  6410. u8 device_rev;
  6411. set_link_state(ppd, HLS_VERIFY_CAP);
  6412. lcb_shutdown(dd, 0);
  6413. adjust_lcb_for_fpga_serdes(dd);
  6414. read_vc_remote_phy(dd, &power_management, &continious);
  6415. read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
  6416. &partner_supported_crc);
  6417. read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
  6418. read_remote_device_id(dd, &device_id, &device_rev);
  6419. /*
  6420. * And the 'MgmtAllowed' information, which is exchanged during
  6421. * LNI, is also be available at this point.
  6422. */
  6423. read_mgmt_allowed(dd, &ppd->mgmt_allowed);
  6424. /* print the active widths */
  6425. get_link_widths(dd, &active_tx, &active_rx);
  6426. dd_dev_info(dd,
  6427. "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
  6428. (int)power_management, (int)continious);
  6429. dd_dev_info(dd,
  6430. "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
  6431. (int)vau, (int)z, (int)vcu, (int)vl15buf,
  6432. (int)partner_supported_crc);
  6433. dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
  6434. (u32)remote_tx_rate, (u32)link_widths);
  6435. dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
  6436. (u32)device_id, (u32)device_rev);
  6437. /*
  6438. * The peer vAU value just read is the peer receiver value. HFI does
  6439. * not support a transmit vAU of 0 (AU == 8). We advertised that
  6440. * with Z=1 in the fabric capabilities sent to the peer. The peer
  6441. * will see our Z=1, and, if it advertised a vAU of 0, will move its
  6442. * receive to vAU of 1 (AU == 16). Do the same here. We do not care
  6443. * about the peer Z value - our sent vAU is 3 (hardwired) and is not
  6444. * subject to the Z value exception.
  6445. */
  6446. if (vau == 0)
  6447. vau = 1;
  6448. set_up_vau(dd, vau);
  6449. /*
  6450. * Set VL15 credits to 0 in global credit register. Cache remote VL15
  6451. * credits value and wait for link-up interrupt ot set it.
  6452. */
  6453. set_up_vl15(dd, 0);
  6454. dd->vl15buf_cached = vl15buf;
  6455. /* set up the LCB CRC mode */
  6456. crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
  6457. /* order is important: use the lowest bit in common */
  6458. if (crc_mask & CAP_CRC_14B)
  6459. crc_val = LCB_CRC_14B;
  6460. else if (crc_mask & CAP_CRC_48B)
  6461. crc_val = LCB_CRC_48B;
  6462. else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
  6463. crc_val = LCB_CRC_12B_16B_PER_LANE;
  6464. else
  6465. crc_val = LCB_CRC_16B;
  6466. dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
  6467. write_csr(dd, DC_LCB_CFG_CRC_MODE,
  6468. (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
  6469. /* set (14b only) or clear sideband credit */
  6470. reg = read_csr(dd, SEND_CM_CTRL);
  6471. if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
  6472. write_csr(dd, SEND_CM_CTRL,
  6473. reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
  6474. } else {
  6475. write_csr(dd, SEND_CM_CTRL,
  6476. reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
  6477. }
  6478. ppd->link_speed_active = 0; /* invalid value */
  6479. if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
  6480. /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
  6481. switch (remote_tx_rate) {
  6482. case 0:
  6483. ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
  6484. break;
  6485. case 1:
  6486. ppd->link_speed_active = OPA_LINK_SPEED_25G;
  6487. break;
  6488. }
  6489. } else {
  6490. /* actual rate is highest bit of the ANDed rates */
  6491. u8 rate = remote_tx_rate & ppd->local_tx_rate;
  6492. if (rate & 2)
  6493. ppd->link_speed_active = OPA_LINK_SPEED_25G;
  6494. else if (rate & 1)
  6495. ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
  6496. }
  6497. if (ppd->link_speed_active == 0) {
  6498. dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
  6499. __func__, (int)remote_tx_rate);
  6500. ppd->link_speed_active = OPA_LINK_SPEED_25G;
  6501. }
  6502. /*
  6503. * Cache the values of the supported, enabled, and active
  6504. * LTP CRC modes to return in 'portinfo' queries. But the bit
  6505. * flags that are returned in the portinfo query differ from
  6506. * what's in the link_crc_mask, crc_sizes, and crc_val
  6507. * variables. Convert these here.
  6508. */
  6509. ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
  6510. /* supported crc modes */
  6511. ppd->port_ltp_crc_mode |=
  6512. cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
  6513. /* enabled crc modes */
  6514. ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
  6515. /* active crc mode */
  6516. /* set up the remote credit return table */
  6517. assign_remote_cm_au_table(dd, vcu);
  6518. /*
  6519. * The LCB is reset on entry to handle_verify_cap(), so this must
  6520. * be applied on every link up.
  6521. *
  6522. * Adjust LCB error kill enable to kill the link if
  6523. * these RBUF errors are seen:
  6524. * REPLAY_BUF_MBE_SMASK
  6525. * FLIT_INPUT_BUF_MBE_SMASK
  6526. */
  6527. if (is_ax(dd)) { /* fixed in B0 */
  6528. reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
  6529. reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
  6530. | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
  6531. write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
  6532. }
  6533. /* pull LCB fifos out of reset - all fifo clocks must be stable */
  6534. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
  6535. /* give 8051 access to the LCB CSRs */
  6536. write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
  6537. set_8051_lcb_access(dd);
  6538. if (ppd->mgmt_allowed)
  6539. add_full_mgmt_pkey(ppd);
  6540. /* tell the 8051 to go to LinkUp */
  6541. set_link_state(ppd, HLS_GOING_UP);
  6542. }
  6543. /*
  6544. * Apply the link width downgrade enabled policy against the current active
  6545. * link widths.
  6546. *
  6547. * Called when the enabled policy changes or the active link widths change.
  6548. */
  6549. void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
  6550. {
  6551. int do_bounce = 0;
  6552. int tries;
  6553. u16 lwde;
  6554. u16 tx, rx;
  6555. /* use the hls lock to avoid a race with actual link up */
  6556. tries = 0;
  6557. retry:
  6558. mutex_lock(&ppd->hls_lock);
  6559. /* only apply if the link is up */
  6560. if (ppd->host_link_state & HLS_DOWN) {
  6561. /* still going up..wait and retry */
  6562. if (ppd->host_link_state & HLS_GOING_UP) {
  6563. if (++tries < 1000) {
  6564. mutex_unlock(&ppd->hls_lock);
  6565. usleep_range(100, 120); /* arbitrary */
  6566. goto retry;
  6567. }
  6568. dd_dev_err(ppd->dd,
  6569. "%s: giving up waiting for link state change\n",
  6570. __func__);
  6571. }
  6572. goto done;
  6573. }
  6574. lwde = ppd->link_width_downgrade_enabled;
  6575. if (refresh_widths) {
  6576. get_link_widths(ppd->dd, &tx, &rx);
  6577. ppd->link_width_downgrade_tx_active = tx;
  6578. ppd->link_width_downgrade_rx_active = rx;
  6579. }
  6580. if (ppd->link_width_downgrade_tx_active == 0 ||
  6581. ppd->link_width_downgrade_rx_active == 0) {
  6582. /* the 8051 reported a dead link as a downgrade */
  6583. dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
  6584. } else if (lwde == 0) {
  6585. /* downgrade is disabled */
  6586. /* bounce if not at starting active width */
  6587. if ((ppd->link_width_active !=
  6588. ppd->link_width_downgrade_tx_active) ||
  6589. (ppd->link_width_active !=
  6590. ppd->link_width_downgrade_rx_active)) {
  6591. dd_dev_err(ppd->dd,
  6592. "Link downgrade is disabled and link has downgraded, downing link\n");
  6593. dd_dev_err(ppd->dd,
  6594. " original 0x%x, tx active 0x%x, rx active 0x%x\n",
  6595. ppd->link_width_active,
  6596. ppd->link_width_downgrade_tx_active,
  6597. ppd->link_width_downgrade_rx_active);
  6598. do_bounce = 1;
  6599. }
  6600. } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
  6601. (lwde & ppd->link_width_downgrade_rx_active) == 0) {
  6602. /* Tx or Rx is outside the enabled policy */
  6603. dd_dev_err(ppd->dd,
  6604. "Link is outside of downgrade allowed, downing link\n");
  6605. dd_dev_err(ppd->dd,
  6606. " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
  6607. lwde, ppd->link_width_downgrade_tx_active,
  6608. ppd->link_width_downgrade_rx_active);
  6609. do_bounce = 1;
  6610. }
  6611. done:
  6612. mutex_unlock(&ppd->hls_lock);
  6613. if (do_bounce) {
  6614. set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
  6615. OPA_LINKDOWN_REASON_WIDTH_POLICY);
  6616. set_link_state(ppd, HLS_DN_OFFLINE);
  6617. start_link(ppd);
  6618. }
  6619. }
  6620. /*
  6621. * Handle a link downgrade interrupt from the 8051.
  6622. *
  6623. * This is a work-queue function outside of the interrupt.
  6624. */
  6625. void handle_link_downgrade(struct work_struct *work)
  6626. {
  6627. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  6628. link_downgrade_work);
  6629. dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
  6630. apply_link_downgrade_policy(ppd, 1);
  6631. }
  6632. static char *dcc_err_string(char *buf, int buf_len, u64 flags)
  6633. {
  6634. return flag_string(buf, buf_len, flags, dcc_err_flags,
  6635. ARRAY_SIZE(dcc_err_flags));
  6636. }
  6637. static char *lcb_err_string(char *buf, int buf_len, u64 flags)
  6638. {
  6639. return flag_string(buf, buf_len, flags, lcb_err_flags,
  6640. ARRAY_SIZE(lcb_err_flags));
  6641. }
  6642. static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
  6643. {
  6644. return flag_string(buf, buf_len, flags, dc8051_err_flags,
  6645. ARRAY_SIZE(dc8051_err_flags));
  6646. }
  6647. static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
  6648. {
  6649. return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
  6650. ARRAY_SIZE(dc8051_info_err_flags));
  6651. }
  6652. static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
  6653. {
  6654. return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
  6655. ARRAY_SIZE(dc8051_info_host_msg_flags));
  6656. }
  6657. static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
  6658. {
  6659. struct hfi1_pportdata *ppd = dd->pport;
  6660. u64 info, err, host_msg;
  6661. int queue_link_down = 0;
  6662. char buf[96];
  6663. /* look at the flags */
  6664. if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
  6665. /* 8051 information set by firmware */
  6666. /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
  6667. info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
  6668. err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
  6669. & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
  6670. host_msg = (info >>
  6671. DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
  6672. & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
  6673. /*
  6674. * Handle error flags.
  6675. */
  6676. if (err & FAILED_LNI) {
  6677. /*
  6678. * LNI error indications are cleared by the 8051
  6679. * only when starting polling. Only pay attention
  6680. * to them when in the states that occur during
  6681. * LNI.
  6682. */
  6683. if (ppd->host_link_state
  6684. & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
  6685. queue_link_down = 1;
  6686. dd_dev_info(dd, "Link error: %s\n",
  6687. dc8051_info_err_string(buf,
  6688. sizeof(buf),
  6689. err &
  6690. FAILED_LNI));
  6691. }
  6692. err &= ~(u64)FAILED_LNI;
  6693. }
  6694. /* unknown frames can happen durning LNI, just count */
  6695. if (err & UNKNOWN_FRAME) {
  6696. ppd->unknown_frame_count++;
  6697. err &= ~(u64)UNKNOWN_FRAME;
  6698. }
  6699. if (err) {
  6700. /* report remaining errors, but do not do anything */
  6701. dd_dev_err(dd, "8051 info error: %s\n",
  6702. dc8051_info_err_string(buf, sizeof(buf),
  6703. err));
  6704. }
  6705. /*
  6706. * Handle host message flags.
  6707. */
  6708. if (host_msg & HOST_REQ_DONE) {
  6709. /*
  6710. * Presently, the driver does a busy wait for
  6711. * host requests to complete. This is only an
  6712. * informational message.
  6713. * NOTE: The 8051 clears the host message
  6714. * information *on the next 8051 command*.
  6715. * Therefore, when linkup is achieved,
  6716. * this flag will still be set.
  6717. */
  6718. host_msg &= ~(u64)HOST_REQ_DONE;
  6719. }
  6720. if (host_msg & BC_SMA_MSG) {
  6721. queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
  6722. host_msg &= ~(u64)BC_SMA_MSG;
  6723. }
  6724. if (host_msg & LINKUP_ACHIEVED) {
  6725. dd_dev_info(dd, "8051: Link up\n");
  6726. queue_work(ppd->hfi1_wq, &ppd->link_up_work);
  6727. host_msg &= ~(u64)LINKUP_ACHIEVED;
  6728. }
  6729. if (host_msg & EXT_DEVICE_CFG_REQ) {
  6730. handle_8051_request(ppd);
  6731. host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
  6732. }
  6733. if (host_msg & VERIFY_CAP_FRAME) {
  6734. queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
  6735. host_msg &= ~(u64)VERIFY_CAP_FRAME;
  6736. }
  6737. if (host_msg & LINK_GOING_DOWN) {
  6738. const char *extra = "";
  6739. /* no downgrade action needed if going down */
  6740. if (host_msg & LINK_WIDTH_DOWNGRADED) {
  6741. host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
  6742. extra = " (ignoring downgrade)";
  6743. }
  6744. dd_dev_info(dd, "8051: Link down%s\n", extra);
  6745. queue_link_down = 1;
  6746. host_msg &= ~(u64)LINK_GOING_DOWN;
  6747. }
  6748. if (host_msg & LINK_WIDTH_DOWNGRADED) {
  6749. queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
  6750. host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
  6751. }
  6752. if (host_msg) {
  6753. /* report remaining messages, but do not do anything */
  6754. dd_dev_info(dd, "8051 info host message: %s\n",
  6755. dc8051_info_host_msg_string(buf,
  6756. sizeof(buf),
  6757. host_msg));
  6758. }
  6759. reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
  6760. }
  6761. if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
  6762. /*
  6763. * Lost the 8051 heartbeat. If this happens, we
  6764. * receive constant interrupts about it. Disable
  6765. * the interrupt after the first.
  6766. */
  6767. dd_dev_err(dd, "Lost 8051 heartbeat\n");
  6768. write_csr(dd, DC_DC8051_ERR_EN,
  6769. read_csr(dd, DC_DC8051_ERR_EN) &
  6770. ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
  6771. reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
  6772. }
  6773. if (reg) {
  6774. /* report the error, but do not do anything */
  6775. dd_dev_err(dd, "8051 error: %s\n",
  6776. dc8051_err_string(buf, sizeof(buf), reg));
  6777. }
  6778. if (queue_link_down) {
  6779. /*
  6780. * if the link is already going down or disabled, do not
  6781. * queue another
  6782. */
  6783. if ((ppd->host_link_state &
  6784. (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
  6785. ppd->link_enabled == 0) {
  6786. dd_dev_info(dd, "%s: not queuing link down\n",
  6787. __func__);
  6788. } else {
  6789. queue_work(ppd->hfi1_wq, &ppd->link_down_work);
  6790. }
  6791. }
  6792. }
  6793. static const char * const fm_config_txt[] = {
  6794. [0] =
  6795. "BadHeadDist: Distance violation between two head flits",
  6796. [1] =
  6797. "BadTailDist: Distance violation between two tail flits",
  6798. [2] =
  6799. "BadCtrlDist: Distance violation between two credit control flits",
  6800. [3] =
  6801. "BadCrdAck: Credits return for unsupported VL",
  6802. [4] =
  6803. "UnsupportedVLMarker: Received VL Marker",
  6804. [5] =
  6805. "BadPreempt: Exceeded the preemption nesting level",
  6806. [6] =
  6807. "BadControlFlit: Received unsupported control flit",
  6808. /* no 7 */
  6809. [8] =
  6810. "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
  6811. };
  6812. static const char * const port_rcv_txt[] = {
  6813. [1] =
  6814. "BadPktLen: Illegal PktLen",
  6815. [2] =
  6816. "PktLenTooLong: Packet longer than PktLen",
  6817. [3] =
  6818. "PktLenTooShort: Packet shorter than PktLen",
  6819. [4] =
  6820. "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
  6821. [5] =
  6822. "BadDLID: Illegal DLID (0, doesn't match HFI)",
  6823. [6] =
  6824. "BadL2: Illegal L2 opcode",
  6825. [7] =
  6826. "BadSC: Unsupported SC",
  6827. [9] =
  6828. "BadRC: Illegal RC",
  6829. [11] =
  6830. "PreemptError: Preempting with same VL",
  6831. [12] =
  6832. "PreemptVL15: Preempting a VL15 packet",
  6833. };
  6834. #define OPA_LDR_FMCONFIG_OFFSET 16
  6835. #define OPA_LDR_PORTRCV_OFFSET 0
  6836. static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  6837. {
  6838. u64 info, hdr0, hdr1;
  6839. const char *extra;
  6840. char buf[96];
  6841. struct hfi1_pportdata *ppd = dd->pport;
  6842. u8 lcl_reason = 0;
  6843. int do_bounce = 0;
  6844. if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
  6845. if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
  6846. info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
  6847. dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
  6848. /* set status bit */
  6849. dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
  6850. }
  6851. reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
  6852. }
  6853. if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
  6854. struct hfi1_pportdata *ppd = dd->pport;
  6855. /* this counter saturates at (2^32) - 1 */
  6856. if (ppd->link_downed < (u32)UINT_MAX)
  6857. ppd->link_downed++;
  6858. reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
  6859. }
  6860. if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
  6861. u8 reason_valid = 1;
  6862. info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
  6863. if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
  6864. dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
  6865. /* set status bit */
  6866. dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
  6867. }
  6868. switch (info) {
  6869. case 0:
  6870. case 1:
  6871. case 2:
  6872. case 3:
  6873. case 4:
  6874. case 5:
  6875. case 6:
  6876. extra = fm_config_txt[info];
  6877. break;
  6878. case 8:
  6879. extra = fm_config_txt[info];
  6880. if (ppd->port_error_action &
  6881. OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
  6882. do_bounce = 1;
  6883. /*
  6884. * lcl_reason cannot be derived from info
  6885. * for this error
  6886. */
  6887. lcl_reason =
  6888. OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
  6889. }
  6890. break;
  6891. default:
  6892. reason_valid = 0;
  6893. snprintf(buf, sizeof(buf), "reserved%lld", info);
  6894. extra = buf;
  6895. break;
  6896. }
  6897. if (reason_valid && !do_bounce) {
  6898. do_bounce = ppd->port_error_action &
  6899. (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
  6900. lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
  6901. }
  6902. /* just report this */
  6903. dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
  6904. extra);
  6905. reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
  6906. }
  6907. if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
  6908. u8 reason_valid = 1;
  6909. info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
  6910. hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
  6911. hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
  6912. if (!(dd->err_info_rcvport.status_and_code &
  6913. OPA_EI_STATUS_SMASK)) {
  6914. dd->err_info_rcvport.status_and_code =
  6915. info & OPA_EI_CODE_SMASK;
  6916. /* set status bit */
  6917. dd->err_info_rcvport.status_and_code |=
  6918. OPA_EI_STATUS_SMASK;
  6919. /*
  6920. * save first 2 flits in the packet that caused
  6921. * the error
  6922. */
  6923. dd->err_info_rcvport.packet_flit1 = hdr0;
  6924. dd->err_info_rcvport.packet_flit2 = hdr1;
  6925. }
  6926. switch (info) {
  6927. case 1:
  6928. case 2:
  6929. case 3:
  6930. case 4:
  6931. case 5:
  6932. case 6:
  6933. case 7:
  6934. case 9:
  6935. case 11:
  6936. case 12:
  6937. extra = port_rcv_txt[info];
  6938. break;
  6939. default:
  6940. reason_valid = 0;
  6941. snprintf(buf, sizeof(buf), "reserved%lld", info);
  6942. extra = buf;
  6943. break;
  6944. }
  6945. if (reason_valid && !do_bounce) {
  6946. do_bounce = ppd->port_error_action &
  6947. (1 << (OPA_LDR_PORTRCV_OFFSET + info));
  6948. lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
  6949. }
  6950. /* just report this */
  6951. dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
  6952. " hdr0 0x%llx, hdr1 0x%llx\n",
  6953. extra, hdr0, hdr1);
  6954. reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
  6955. }
  6956. if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
  6957. /* informative only */
  6958. dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
  6959. reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
  6960. }
  6961. if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
  6962. /* informative only */
  6963. dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
  6964. reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
  6965. }
  6966. if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
  6967. reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
  6968. /* report any remaining errors */
  6969. if (reg)
  6970. dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
  6971. dcc_err_string(buf, sizeof(buf), reg));
  6972. if (lcl_reason == 0)
  6973. lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
  6974. if (do_bounce) {
  6975. dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
  6976. __func__);
  6977. set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
  6978. queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
  6979. }
  6980. }
  6981. static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  6982. {
  6983. char buf[96];
  6984. dd_dev_info(dd, "LCB Error: %s\n",
  6985. lcb_err_string(buf, sizeof(buf), reg));
  6986. }
  6987. /*
  6988. * CCE block DC interrupt. Source is < 8.
  6989. */
  6990. static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
  6991. {
  6992. const struct err_reg_info *eri = &dc_errs[source];
  6993. if (eri->handler) {
  6994. interrupt_clear_down(dd, 0, eri);
  6995. } else if (source == 3 /* dc_lbm_int */) {
  6996. /*
  6997. * This indicates that a parity error has occurred on the
  6998. * address/control lines presented to the LBM. The error
  6999. * is a single pulse, there is no associated error flag,
  7000. * and it is non-maskable. This is because if a parity
  7001. * error occurs on the request the request is dropped.
  7002. * This should never occur, but it is nice to know if it
  7003. * ever does.
  7004. */
  7005. dd_dev_err(dd, "Parity error in DC LBM block\n");
  7006. } else {
  7007. dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
  7008. }
  7009. }
  7010. /*
  7011. * TX block send credit interrupt. Source is < 160.
  7012. */
  7013. static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
  7014. {
  7015. sc_group_release_update(dd, source);
  7016. }
  7017. /*
  7018. * TX block SDMA interrupt. Source is < 48.
  7019. *
  7020. * SDMA interrupts are grouped by type:
  7021. *
  7022. * 0 - N-1 = SDma
  7023. * N - 2N-1 = SDmaProgress
  7024. * 2N - 3N-1 = SDmaIdle
  7025. */
  7026. static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
  7027. {
  7028. /* what interrupt */
  7029. unsigned int what = source / TXE_NUM_SDMA_ENGINES;
  7030. /* which engine */
  7031. unsigned int which = source % TXE_NUM_SDMA_ENGINES;
  7032. #ifdef CONFIG_SDMA_VERBOSITY
  7033. dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
  7034. slashstrip(__FILE__), __LINE__, __func__);
  7035. sdma_dumpstate(&dd->per_sdma[which]);
  7036. #endif
  7037. if (likely(what < 3 && which < dd->num_sdma)) {
  7038. sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
  7039. } else {
  7040. /* should not happen */
  7041. dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
  7042. }
  7043. }
  7044. /*
  7045. * RX block receive available interrupt. Source is < 160.
  7046. */
  7047. static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
  7048. {
  7049. struct hfi1_ctxtdata *rcd;
  7050. char *err_detail;
  7051. if (likely(source < dd->num_rcv_contexts)) {
  7052. rcd = dd->rcd[source];
  7053. if (rcd) {
  7054. /* Check for non-user contexts, including vnic */
  7055. if ((source < dd->first_dyn_alloc_ctxt) ||
  7056. (rcd->sc && (rcd->sc->type == SC_KERNEL)))
  7057. rcd->do_interrupt(rcd, 0);
  7058. else
  7059. handle_user_interrupt(rcd);
  7060. return; /* OK */
  7061. }
  7062. /* received an interrupt, but no rcd */
  7063. err_detail = "dataless";
  7064. } else {
  7065. /* received an interrupt, but are not using that context */
  7066. err_detail = "out of range";
  7067. }
  7068. dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
  7069. err_detail, source);
  7070. }
  7071. /*
  7072. * RX block receive urgent interrupt. Source is < 160.
  7073. */
  7074. static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
  7075. {
  7076. struct hfi1_ctxtdata *rcd;
  7077. char *err_detail;
  7078. if (likely(source < dd->num_rcv_contexts)) {
  7079. rcd = dd->rcd[source];
  7080. if (rcd) {
  7081. /* only pay attention to user urgent interrupts */
  7082. if ((source >= dd->first_dyn_alloc_ctxt) &&
  7083. (!rcd->sc || (rcd->sc->type == SC_USER)))
  7084. handle_user_interrupt(rcd);
  7085. return; /* OK */
  7086. }
  7087. /* received an interrupt, but no rcd */
  7088. err_detail = "dataless";
  7089. } else {
  7090. /* received an interrupt, but are not using that context */
  7091. err_detail = "out of range";
  7092. }
  7093. dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
  7094. err_detail, source);
  7095. }
  7096. /*
  7097. * Reserved range interrupt. Should not be called in normal operation.
  7098. */
  7099. static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
  7100. {
  7101. char name[64];
  7102. dd_dev_err(dd, "unexpected %s interrupt\n",
  7103. is_reserved_name(name, sizeof(name), source));
  7104. }
  7105. static const struct is_table is_table[] = {
  7106. /*
  7107. * start end
  7108. * name func interrupt func
  7109. */
  7110. { IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
  7111. is_misc_err_name, is_misc_err_int },
  7112. { IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
  7113. is_sdma_eng_err_name, is_sdma_eng_err_int },
  7114. { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
  7115. is_sendctxt_err_name, is_sendctxt_err_int },
  7116. { IS_SDMA_START, IS_SDMA_END,
  7117. is_sdma_eng_name, is_sdma_eng_int },
  7118. { IS_VARIOUS_START, IS_VARIOUS_END,
  7119. is_various_name, is_various_int },
  7120. { IS_DC_START, IS_DC_END,
  7121. is_dc_name, is_dc_int },
  7122. { IS_RCVAVAIL_START, IS_RCVAVAIL_END,
  7123. is_rcv_avail_name, is_rcv_avail_int },
  7124. { IS_RCVURGENT_START, IS_RCVURGENT_END,
  7125. is_rcv_urgent_name, is_rcv_urgent_int },
  7126. { IS_SENDCREDIT_START, IS_SENDCREDIT_END,
  7127. is_send_credit_name, is_send_credit_int},
  7128. { IS_RESERVED_START, IS_RESERVED_END,
  7129. is_reserved_name, is_reserved_int},
  7130. };
  7131. /*
  7132. * Interrupt source interrupt - called when the given source has an interrupt.
  7133. * Source is a bit index into an array of 64-bit integers.
  7134. */
  7135. static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
  7136. {
  7137. const struct is_table *entry;
  7138. /* avoids a double compare by walking the table in-order */
  7139. for (entry = &is_table[0]; entry->is_name; entry++) {
  7140. if (source < entry->end) {
  7141. trace_hfi1_interrupt(dd, entry, source);
  7142. entry->is_int(dd, source - entry->start);
  7143. return;
  7144. }
  7145. }
  7146. /* fell off the end */
  7147. dd_dev_err(dd, "invalid interrupt source %u\n", source);
  7148. }
  7149. /*
  7150. * General interrupt handler. This is able to correctly handle
  7151. * all interrupts in case INTx is used.
  7152. */
  7153. static irqreturn_t general_interrupt(int irq, void *data)
  7154. {
  7155. struct hfi1_devdata *dd = data;
  7156. u64 regs[CCE_NUM_INT_CSRS];
  7157. u32 bit;
  7158. int i;
  7159. this_cpu_inc(*dd->int_counter);
  7160. /* phase 1: scan and clear all handled interrupts */
  7161. for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
  7162. if (dd->gi_mask[i] == 0) {
  7163. regs[i] = 0; /* used later */
  7164. continue;
  7165. }
  7166. regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
  7167. dd->gi_mask[i];
  7168. /* only clear if anything is set */
  7169. if (regs[i])
  7170. write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
  7171. }
  7172. /* phase 2: call the appropriate handler */
  7173. for_each_set_bit(bit, (unsigned long *)&regs[0],
  7174. CCE_NUM_INT_CSRS * 64) {
  7175. is_interrupt(dd, bit);
  7176. }
  7177. return IRQ_HANDLED;
  7178. }
  7179. static irqreturn_t sdma_interrupt(int irq, void *data)
  7180. {
  7181. struct sdma_engine *sde = data;
  7182. struct hfi1_devdata *dd = sde->dd;
  7183. u64 status;
  7184. #ifdef CONFIG_SDMA_VERBOSITY
  7185. dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
  7186. slashstrip(__FILE__), __LINE__, __func__);
  7187. sdma_dumpstate(sde);
  7188. #endif
  7189. this_cpu_inc(*dd->int_counter);
  7190. /* This read_csr is really bad in the hot path */
  7191. status = read_csr(dd,
  7192. CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
  7193. & sde->imask;
  7194. if (likely(status)) {
  7195. /* clear the interrupt(s) */
  7196. write_csr(dd,
  7197. CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
  7198. status);
  7199. /* handle the interrupt(s) */
  7200. sdma_engine_interrupt(sde, status);
  7201. } else {
  7202. dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
  7203. sde->this_idx);
  7204. }
  7205. return IRQ_HANDLED;
  7206. }
  7207. /*
  7208. * Clear the receive interrupt. Use a read of the interrupt clear CSR
  7209. * to insure that the write completed. This does NOT guarantee that
  7210. * queued DMA writes to memory from the chip are pushed.
  7211. */
  7212. static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
  7213. {
  7214. struct hfi1_devdata *dd = rcd->dd;
  7215. u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
  7216. mmiowb(); /* make sure everything before is written */
  7217. write_csr(dd, addr, rcd->imask);
  7218. /* force the above write on the chip and get a value back */
  7219. (void)read_csr(dd, addr);
  7220. }
  7221. /* force the receive interrupt */
  7222. void force_recv_intr(struct hfi1_ctxtdata *rcd)
  7223. {
  7224. write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
  7225. }
  7226. /*
  7227. * Return non-zero if a packet is present.
  7228. *
  7229. * This routine is called when rechecking for packets after the RcvAvail
  7230. * interrupt has been cleared down. First, do a quick check of memory for
  7231. * a packet present. If not found, use an expensive CSR read of the context
  7232. * tail to determine the actual tail. The CSR read is necessary because there
  7233. * is no method to push pending DMAs to memory other than an interrupt and we
  7234. * are trying to determine if we need to force an interrupt.
  7235. */
  7236. static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
  7237. {
  7238. u32 tail;
  7239. int present;
  7240. if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
  7241. present = (rcd->seq_cnt ==
  7242. rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
  7243. else /* is RDMA rtail */
  7244. present = (rcd->head != get_rcvhdrtail(rcd));
  7245. if (present)
  7246. return 1;
  7247. /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
  7248. tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
  7249. return rcd->head != tail;
  7250. }
  7251. /*
  7252. * Receive packet IRQ handler. This routine expects to be on its own IRQ.
  7253. * This routine will try to handle packets immediately (latency), but if
  7254. * it finds too many, it will invoke the thread handler (bandwitdh). The
  7255. * chip receive interrupt is *not* cleared down until this or the thread (if
  7256. * invoked) is finished. The intent is to avoid extra interrupts while we
  7257. * are processing packets anyway.
  7258. */
  7259. static irqreturn_t receive_context_interrupt(int irq, void *data)
  7260. {
  7261. struct hfi1_ctxtdata *rcd = data;
  7262. struct hfi1_devdata *dd = rcd->dd;
  7263. int disposition;
  7264. int present;
  7265. trace_hfi1_receive_interrupt(dd, rcd->ctxt);
  7266. this_cpu_inc(*dd->int_counter);
  7267. aspm_ctx_disable(rcd);
  7268. /* receive interrupt remains blocked while processing packets */
  7269. disposition = rcd->do_interrupt(rcd, 0);
  7270. /*
  7271. * Too many packets were seen while processing packets in this
  7272. * IRQ handler. Invoke the handler thread. The receive interrupt
  7273. * remains blocked.
  7274. */
  7275. if (disposition == RCV_PKT_LIMIT)
  7276. return IRQ_WAKE_THREAD;
  7277. /*
  7278. * The packet processor detected no more packets. Clear the receive
  7279. * interrupt and recheck for a packet packet that may have arrived
  7280. * after the previous check and interrupt clear. If a packet arrived,
  7281. * force another interrupt.
  7282. */
  7283. clear_recv_intr(rcd);
  7284. present = check_packet_present(rcd);
  7285. if (present)
  7286. force_recv_intr(rcd);
  7287. return IRQ_HANDLED;
  7288. }
  7289. /*
  7290. * Receive packet thread handler. This expects to be invoked with the
  7291. * receive interrupt still blocked.
  7292. */
  7293. static irqreturn_t receive_context_thread(int irq, void *data)
  7294. {
  7295. struct hfi1_ctxtdata *rcd = data;
  7296. int present;
  7297. /* receive interrupt is still blocked from the IRQ handler */
  7298. (void)rcd->do_interrupt(rcd, 1);
  7299. /*
  7300. * The packet processor will only return if it detected no more
  7301. * packets. Hold IRQs here so we can safely clear the interrupt and
  7302. * recheck for a packet that may have arrived after the previous
  7303. * check and the interrupt clear. If a packet arrived, force another
  7304. * interrupt.
  7305. */
  7306. local_irq_disable();
  7307. clear_recv_intr(rcd);
  7308. present = check_packet_present(rcd);
  7309. if (present)
  7310. force_recv_intr(rcd);
  7311. local_irq_enable();
  7312. return IRQ_HANDLED;
  7313. }
  7314. /* ========================================================================= */
  7315. u32 read_physical_state(struct hfi1_devdata *dd)
  7316. {
  7317. u64 reg;
  7318. reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
  7319. return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
  7320. & DC_DC8051_STS_CUR_STATE_PORT_MASK;
  7321. }
  7322. u32 read_logical_state(struct hfi1_devdata *dd)
  7323. {
  7324. u64 reg;
  7325. reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
  7326. return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
  7327. & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
  7328. }
  7329. static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
  7330. {
  7331. u64 reg;
  7332. reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
  7333. /* clear current state, set new state */
  7334. reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
  7335. reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
  7336. write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
  7337. }
  7338. /*
  7339. * Use the 8051 to read a LCB CSR.
  7340. */
  7341. static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
  7342. {
  7343. u32 regno;
  7344. int ret;
  7345. if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
  7346. if (acquire_lcb_access(dd, 0) == 0) {
  7347. *data = read_csr(dd, addr);
  7348. release_lcb_access(dd, 0);
  7349. return 0;
  7350. }
  7351. return -EBUSY;
  7352. }
  7353. /* register is an index of LCB registers: (offset - base) / 8 */
  7354. regno = (addr - DC_LCB_CFG_RUN) >> 3;
  7355. ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
  7356. if (ret != HCMD_SUCCESS)
  7357. return -EBUSY;
  7358. return 0;
  7359. }
  7360. /*
  7361. * Provide a cache for some of the LCB registers in case the LCB is
  7362. * unavailable.
  7363. * (The LCB is unavailable in certain link states, for example.)
  7364. */
  7365. struct lcb_datum {
  7366. u32 off;
  7367. u64 val;
  7368. };
  7369. static struct lcb_datum lcb_cache[] = {
  7370. { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
  7371. { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
  7372. { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
  7373. };
  7374. static void update_lcb_cache(struct hfi1_devdata *dd)
  7375. {
  7376. int i;
  7377. int ret;
  7378. u64 val;
  7379. for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
  7380. ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
  7381. /* Update if we get good data */
  7382. if (likely(ret != -EBUSY))
  7383. lcb_cache[i].val = val;
  7384. }
  7385. }
  7386. static int read_lcb_cache(u32 off, u64 *val)
  7387. {
  7388. int i;
  7389. for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
  7390. if (lcb_cache[i].off == off) {
  7391. *val = lcb_cache[i].val;
  7392. return 0;
  7393. }
  7394. }
  7395. pr_warn("%s bad offset 0x%x\n", __func__, off);
  7396. return -1;
  7397. }
  7398. /*
  7399. * Read an LCB CSR. Access may not be in host control, so check.
  7400. * Return 0 on success, -EBUSY on failure.
  7401. */
  7402. int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
  7403. {
  7404. struct hfi1_pportdata *ppd = dd->pport;
  7405. /* if up, go through the 8051 for the value */
  7406. if (ppd->host_link_state & HLS_UP)
  7407. return read_lcb_via_8051(dd, addr, data);
  7408. /* if going up or down, check the cache, otherwise, no access */
  7409. if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
  7410. if (read_lcb_cache(addr, data))
  7411. return -EBUSY;
  7412. return 0;
  7413. }
  7414. /* otherwise, host has access */
  7415. *data = read_csr(dd, addr);
  7416. return 0;
  7417. }
  7418. /*
  7419. * Use the 8051 to write a LCB CSR.
  7420. */
  7421. static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
  7422. {
  7423. u32 regno;
  7424. int ret;
  7425. if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
  7426. (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
  7427. if (acquire_lcb_access(dd, 0) == 0) {
  7428. write_csr(dd, addr, data);
  7429. release_lcb_access(dd, 0);
  7430. return 0;
  7431. }
  7432. return -EBUSY;
  7433. }
  7434. /* register is an index of LCB registers: (offset - base) / 8 */
  7435. regno = (addr - DC_LCB_CFG_RUN) >> 3;
  7436. ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
  7437. if (ret != HCMD_SUCCESS)
  7438. return -EBUSY;
  7439. return 0;
  7440. }
  7441. /*
  7442. * Write an LCB CSR. Access may not be in host control, so check.
  7443. * Return 0 on success, -EBUSY on failure.
  7444. */
  7445. int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
  7446. {
  7447. struct hfi1_pportdata *ppd = dd->pport;
  7448. /* if up, go through the 8051 for the value */
  7449. if (ppd->host_link_state & HLS_UP)
  7450. return write_lcb_via_8051(dd, addr, data);
  7451. /* if going up or down, no access */
  7452. if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
  7453. return -EBUSY;
  7454. /* otherwise, host has access */
  7455. write_csr(dd, addr, data);
  7456. return 0;
  7457. }
  7458. /*
  7459. * Returns:
  7460. * < 0 = Linux error, not able to get access
  7461. * > 0 = 8051 command RETURN_CODE
  7462. */
  7463. static int do_8051_command(
  7464. struct hfi1_devdata *dd,
  7465. u32 type,
  7466. u64 in_data,
  7467. u64 *out_data)
  7468. {
  7469. u64 reg, completed;
  7470. int return_code;
  7471. unsigned long timeout;
  7472. hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
  7473. mutex_lock(&dd->dc8051_lock);
  7474. /* We can't send any commands to the 8051 if it's in reset */
  7475. if (dd->dc_shutdown) {
  7476. return_code = -ENODEV;
  7477. goto fail;
  7478. }
  7479. /*
  7480. * If an 8051 host command timed out previously, then the 8051 is
  7481. * stuck.
  7482. *
  7483. * On first timeout, attempt to reset and restart the entire DC
  7484. * block (including 8051). (Is this too big of a hammer?)
  7485. *
  7486. * If the 8051 times out a second time, the reset did not bring it
  7487. * back to healthy life. In that case, fail any subsequent commands.
  7488. */
  7489. if (dd->dc8051_timed_out) {
  7490. if (dd->dc8051_timed_out > 1) {
  7491. dd_dev_err(dd,
  7492. "Previous 8051 host command timed out, skipping command %u\n",
  7493. type);
  7494. return_code = -ENXIO;
  7495. goto fail;
  7496. }
  7497. _dc_shutdown(dd);
  7498. _dc_start(dd);
  7499. }
  7500. /*
  7501. * If there is no timeout, then the 8051 command interface is
  7502. * waiting for a command.
  7503. */
  7504. /*
  7505. * When writing a LCB CSR, out_data contains the full value to
  7506. * to be written, while in_data contains the relative LCB
  7507. * address in 7:0. Do the work here, rather than the caller,
  7508. * of distrubting the write data to where it needs to go:
  7509. *
  7510. * Write data
  7511. * 39:00 -> in_data[47:8]
  7512. * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
  7513. * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
  7514. */
  7515. if (type == HCMD_WRITE_LCB_CSR) {
  7516. in_data |= ((*out_data) & 0xffffffffffull) << 8;
  7517. /* must preserve COMPLETED - it is tied to hardware */
  7518. reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
  7519. reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
  7520. reg |= ((((*out_data) >> 40) & 0xff) <<
  7521. DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
  7522. | ((((*out_data) >> 48) & 0xffff) <<
  7523. DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
  7524. write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
  7525. }
  7526. /*
  7527. * Do two writes: the first to stabilize the type and req_data, the
  7528. * second to activate.
  7529. */
  7530. reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
  7531. << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
  7532. | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
  7533. << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
  7534. write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
  7535. reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
  7536. write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
  7537. /* wait for completion, alternate: interrupt */
  7538. timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
  7539. while (1) {
  7540. reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
  7541. completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
  7542. if (completed)
  7543. break;
  7544. if (time_after(jiffies, timeout)) {
  7545. dd->dc8051_timed_out++;
  7546. dd_dev_err(dd, "8051 host command %u timeout\n", type);
  7547. if (out_data)
  7548. *out_data = 0;
  7549. return_code = -ETIMEDOUT;
  7550. goto fail;
  7551. }
  7552. udelay(2);
  7553. }
  7554. if (out_data) {
  7555. *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
  7556. & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
  7557. if (type == HCMD_READ_LCB_CSR) {
  7558. /* top 16 bits are in a different register */
  7559. *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
  7560. & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
  7561. << (48
  7562. - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
  7563. }
  7564. }
  7565. return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
  7566. & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
  7567. dd->dc8051_timed_out = 0;
  7568. /*
  7569. * Clear command for next user.
  7570. */
  7571. write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
  7572. fail:
  7573. mutex_unlock(&dd->dc8051_lock);
  7574. return return_code;
  7575. }
  7576. static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
  7577. {
  7578. return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
  7579. }
  7580. int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
  7581. u8 lane_id, u32 config_data)
  7582. {
  7583. u64 data;
  7584. int ret;
  7585. data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
  7586. | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
  7587. | (u64)config_data << LOAD_DATA_DATA_SHIFT;
  7588. ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
  7589. if (ret != HCMD_SUCCESS) {
  7590. dd_dev_err(dd,
  7591. "load 8051 config: field id %d, lane %d, err %d\n",
  7592. (int)field_id, (int)lane_id, ret);
  7593. }
  7594. return ret;
  7595. }
  7596. /*
  7597. * Read the 8051 firmware "registers". Use the RAM directly. Always
  7598. * set the result, even on error.
  7599. * Return 0 on success, -errno on failure
  7600. */
  7601. int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
  7602. u32 *result)
  7603. {
  7604. u64 big_data;
  7605. u32 addr;
  7606. int ret;
  7607. /* address start depends on the lane_id */
  7608. if (lane_id < 4)
  7609. addr = (4 * NUM_GENERAL_FIELDS)
  7610. + (lane_id * 4 * NUM_LANE_FIELDS);
  7611. else
  7612. addr = 0;
  7613. addr += field_id * 4;
  7614. /* read is in 8-byte chunks, hardware will truncate the address down */
  7615. ret = read_8051_data(dd, addr, 8, &big_data);
  7616. if (ret == 0) {
  7617. /* extract the 4 bytes we want */
  7618. if (addr & 0x4)
  7619. *result = (u32)(big_data >> 32);
  7620. else
  7621. *result = (u32)big_data;
  7622. } else {
  7623. *result = 0;
  7624. dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
  7625. __func__, lane_id, field_id);
  7626. }
  7627. return ret;
  7628. }
  7629. static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
  7630. u8 continuous)
  7631. {
  7632. u32 frame;
  7633. frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
  7634. | power_management << POWER_MANAGEMENT_SHIFT;
  7635. return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
  7636. GENERAL_CONFIG, frame);
  7637. }
  7638. static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
  7639. u16 vl15buf, u8 crc_sizes)
  7640. {
  7641. u32 frame;
  7642. frame = (u32)vau << VAU_SHIFT
  7643. | (u32)z << Z_SHIFT
  7644. | (u32)vcu << VCU_SHIFT
  7645. | (u32)vl15buf << VL15BUF_SHIFT
  7646. | (u32)crc_sizes << CRC_SIZES_SHIFT;
  7647. return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
  7648. GENERAL_CONFIG, frame);
  7649. }
  7650. static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
  7651. u8 *flag_bits, u16 *link_widths)
  7652. {
  7653. u32 frame;
  7654. read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
  7655. &frame);
  7656. *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
  7657. *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
  7658. *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
  7659. }
  7660. static int write_vc_local_link_width(struct hfi1_devdata *dd,
  7661. u8 misc_bits,
  7662. u8 flag_bits,
  7663. u16 link_widths)
  7664. {
  7665. u32 frame;
  7666. frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
  7667. | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
  7668. | (u32)link_widths << LINK_WIDTH_SHIFT;
  7669. return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
  7670. frame);
  7671. }
  7672. static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
  7673. u8 device_rev)
  7674. {
  7675. u32 frame;
  7676. frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
  7677. | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
  7678. return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
  7679. }
  7680. static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
  7681. u8 *device_rev)
  7682. {
  7683. u32 frame;
  7684. read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
  7685. *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
  7686. *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
  7687. & REMOTE_DEVICE_REV_MASK;
  7688. }
  7689. void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
  7690. u8 *ver_patch)
  7691. {
  7692. u32 frame;
  7693. read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
  7694. *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
  7695. STS_FM_VERSION_MAJOR_MASK;
  7696. *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
  7697. STS_FM_VERSION_MINOR_MASK;
  7698. read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
  7699. *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
  7700. STS_FM_VERSION_PATCH_MASK;
  7701. }
  7702. static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
  7703. u8 *continuous)
  7704. {
  7705. u32 frame;
  7706. read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
  7707. *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
  7708. & POWER_MANAGEMENT_MASK;
  7709. *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
  7710. & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
  7711. }
  7712. static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
  7713. u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
  7714. {
  7715. u32 frame;
  7716. read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
  7717. *vau = (frame >> VAU_SHIFT) & VAU_MASK;
  7718. *z = (frame >> Z_SHIFT) & Z_MASK;
  7719. *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
  7720. *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
  7721. *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
  7722. }
  7723. static void read_vc_remote_link_width(struct hfi1_devdata *dd,
  7724. u8 *remote_tx_rate,
  7725. u16 *link_widths)
  7726. {
  7727. u32 frame;
  7728. read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
  7729. &frame);
  7730. *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
  7731. & REMOTE_TX_RATE_MASK;
  7732. *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
  7733. }
  7734. static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
  7735. {
  7736. u32 frame;
  7737. read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
  7738. *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
  7739. }
  7740. static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
  7741. {
  7742. u32 frame;
  7743. read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
  7744. *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
  7745. }
  7746. static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
  7747. {
  7748. read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
  7749. }
  7750. static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
  7751. {
  7752. read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
  7753. }
  7754. void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
  7755. {
  7756. u32 frame;
  7757. int ret;
  7758. *link_quality = 0;
  7759. if (dd->pport->host_link_state & HLS_UP) {
  7760. ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
  7761. &frame);
  7762. if (ret == 0)
  7763. *link_quality = (frame >> LINK_QUALITY_SHIFT)
  7764. & LINK_QUALITY_MASK;
  7765. }
  7766. }
  7767. static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
  7768. {
  7769. u32 frame;
  7770. read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
  7771. *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
  7772. }
  7773. static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
  7774. {
  7775. u32 frame;
  7776. read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
  7777. *ldr = (frame & 0xff);
  7778. }
  7779. static int read_tx_settings(struct hfi1_devdata *dd,
  7780. u8 *enable_lane_tx,
  7781. u8 *tx_polarity_inversion,
  7782. u8 *rx_polarity_inversion,
  7783. u8 *max_rate)
  7784. {
  7785. u32 frame;
  7786. int ret;
  7787. ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
  7788. *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
  7789. & ENABLE_LANE_TX_MASK;
  7790. *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
  7791. & TX_POLARITY_INVERSION_MASK;
  7792. *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
  7793. & RX_POLARITY_INVERSION_MASK;
  7794. *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
  7795. return ret;
  7796. }
  7797. static int write_tx_settings(struct hfi1_devdata *dd,
  7798. u8 enable_lane_tx,
  7799. u8 tx_polarity_inversion,
  7800. u8 rx_polarity_inversion,
  7801. u8 max_rate)
  7802. {
  7803. u32 frame;
  7804. /* no need to mask, all variable sizes match field widths */
  7805. frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
  7806. | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
  7807. | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
  7808. | max_rate << MAX_RATE_SHIFT;
  7809. return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
  7810. }
  7811. /*
  7812. * Read an idle LCB message.
  7813. *
  7814. * Returns 0 on success, -EINVAL on error
  7815. */
  7816. static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
  7817. {
  7818. int ret;
  7819. ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
  7820. if (ret != HCMD_SUCCESS) {
  7821. dd_dev_err(dd, "read idle message: type %d, err %d\n",
  7822. (u32)type, ret);
  7823. return -EINVAL;
  7824. }
  7825. dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
  7826. /* return only the payload as we already know the type */
  7827. *data_out >>= IDLE_PAYLOAD_SHIFT;
  7828. return 0;
  7829. }
  7830. /*
  7831. * Read an idle SMA message. To be done in response to a notification from
  7832. * the 8051.
  7833. *
  7834. * Returns 0 on success, -EINVAL on error
  7835. */
  7836. static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
  7837. {
  7838. return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
  7839. data);
  7840. }
  7841. /*
  7842. * Send an idle LCB message.
  7843. *
  7844. * Returns 0 on success, -EINVAL on error
  7845. */
  7846. static int send_idle_message(struct hfi1_devdata *dd, u64 data)
  7847. {
  7848. int ret;
  7849. dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
  7850. ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
  7851. if (ret != HCMD_SUCCESS) {
  7852. dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
  7853. data, ret);
  7854. return -EINVAL;
  7855. }
  7856. return 0;
  7857. }
  7858. /*
  7859. * Send an idle SMA message.
  7860. *
  7861. * Returns 0 on success, -EINVAL on error
  7862. */
  7863. int send_idle_sma(struct hfi1_devdata *dd, u64 message)
  7864. {
  7865. u64 data;
  7866. data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
  7867. ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
  7868. return send_idle_message(dd, data);
  7869. }
  7870. /*
  7871. * Initialize the LCB then do a quick link up. This may or may not be
  7872. * in loopback.
  7873. *
  7874. * return 0 on success, -errno on error
  7875. */
  7876. static int do_quick_linkup(struct hfi1_devdata *dd)
  7877. {
  7878. int ret;
  7879. lcb_shutdown(dd, 0);
  7880. if (loopback) {
  7881. /* LCB_CFG_LOOPBACK.VAL = 2 */
  7882. /* LCB_CFG_LANE_WIDTH.VAL = 0 */
  7883. write_csr(dd, DC_LCB_CFG_LOOPBACK,
  7884. IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
  7885. write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
  7886. }
  7887. /* start the LCBs */
  7888. /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
  7889. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
  7890. /* simulator only loopback steps */
  7891. if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
  7892. /* LCB_CFG_RUN.EN = 1 */
  7893. write_csr(dd, DC_LCB_CFG_RUN,
  7894. 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
  7895. ret = wait_link_transfer_active(dd, 10);
  7896. if (ret)
  7897. return ret;
  7898. write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
  7899. 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
  7900. }
  7901. if (!loopback) {
  7902. /*
  7903. * When doing quick linkup and not in loopback, both
  7904. * sides must be done with LCB set-up before either
  7905. * starts the quick linkup. Put a delay here so that
  7906. * both sides can be started and have a chance to be
  7907. * done with LCB set up before resuming.
  7908. */
  7909. dd_dev_err(dd,
  7910. "Pausing for peer to be finished with LCB set up\n");
  7911. msleep(5000);
  7912. dd_dev_err(dd, "Continuing with quick linkup\n");
  7913. }
  7914. write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
  7915. set_8051_lcb_access(dd);
  7916. /*
  7917. * State "quick" LinkUp request sets the physical link state to
  7918. * LinkUp without a verify capability sequence.
  7919. * This state is in simulator v37 and later.
  7920. */
  7921. ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
  7922. if (ret != HCMD_SUCCESS) {
  7923. dd_dev_err(dd,
  7924. "%s: set physical link state to quick LinkUp failed with return %d\n",
  7925. __func__, ret);
  7926. set_host_lcb_access(dd);
  7927. write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
  7928. if (ret >= 0)
  7929. ret = -EINVAL;
  7930. return ret;
  7931. }
  7932. return 0; /* success */
  7933. }
  7934. /*
  7935. * Set the SerDes to internal loopback mode.
  7936. * Returns 0 on success, -errno on error.
  7937. */
  7938. static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
  7939. {
  7940. int ret;
  7941. ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
  7942. if (ret == HCMD_SUCCESS)
  7943. return 0;
  7944. dd_dev_err(dd,
  7945. "Set physical link state to SerDes Loopback failed with return %d\n",
  7946. ret);
  7947. if (ret >= 0)
  7948. ret = -EINVAL;
  7949. return ret;
  7950. }
  7951. /*
  7952. * Do all special steps to set up loopback.
  7953. */
  7954. static int init_loopback(struct hfi1_devdata *dd)
  7955. {
  7956. dd_dev_info(dd, "Entering loopback mode\n");
  7957. /* all loopbacks should disable self GUID check */
  7958. write_csr(dd, DC_DC8051_CFG_MODE,
  7959. (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
  7960. /*
  7961. * The simulator has only one loopback option - LCB. Switch
  7962. * to that option, which includes quick link up.
  7963. *
  7964. * Accept all valid loopback values.
  7965. */
  7966. if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
  7967. (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
  7968. loopback == LOOPBACK_CABLE)) {
  7969. loopback = LOOPBACK_LCB;
  7970. quick_linkup = 1;
  7971. return 0;
  7972. }
  7973. /* handle serdes loopback */
  7974. if (loopback == LOOPBACK_SERDES) {
  7975. /* internal serdes loopack needs quick linkup on RTL */
  7976. if (dd->icode == ICODE_RTL_SILICON)
  7977. quick_linkup = 1;
  7978. return set_serdes_loopback_mode(dd);
  7979. }
  7980. /* LCB loopback - handled at poll time */
  7981. if (loopback == LOOPBACK_LCB) {
  7982. quick_linkup = 1; /* LCB is always quick linkup */
  7983. /* not supported in emulation due to emulation RTL changes */
  7984. if (dd->icode == ICODE_FPGA_EMULATION) {
  7985. dd_dev_err(dd,
  7986. "LCB loopback not supported in emulation\n");
  7987. return -EINVAL;
  7988. }
  7989. return 0;
  7990. }
  7991. /* external cable loopback requires no extra steps */
  7992. if (loopback == LOOPBACK_CABLE)
  7993. return 0;
  7994. dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
  7995. return -EINVAL;
  7996. }
  7997. /*
  7998. * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
  7999. * used in the Verify Capability link width attribute.
  8000. */
  8001. static u16 opa_to_vc_link_widths(u16 opa_widths)
  8002. {
  8003. int i;
  8004. u16 result = 0;
  8005. static const struct link_bits {
  8006. u16 from;
  8007. u16 to;
  8008. } opa_link_xlate[] = {
  8009. { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
  8010. { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
  8011. { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
  8012. { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
  8013. };
  8014. for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
  8015. if (opa_widths & opa_link_xlate[i].from)
  8016. result |= opa_link_xlate[i].to;
  8017. }
  8018. return result;
  8019. }
  8020. /*
  8021. * Set link attributes before moving to polling.
  8022. */
  8023. static int set_local_link_attributes(struct hfi1_pportdata *ppd)
  8024. {
  8025. struct hfi1_devdata *dd = ppd->dd;
  8026. u8 enable_lane_tx;
  8027. u8 tx_polarity_inversion;
  8028. u8 rx_polarity_inversion;
  8029. int ret;
  8030. /* reset our fabric serdes to clear any lingering problems */
  8031. fabric_serdes_reset(dd);
  8032. /* set the local tx rate - need to read-modify-write */
  8033. ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
  8034. &rx_polarity_inversion, &ppd->local_tx_rate);
  8035. if (ret)
  8036. goto set_local_link_attributes_fail;
  8037. if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
  8038. /* set the tx rate to the fastest enabled */
  8039. if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
  8040. ppd->local_tx_rate = 1;
  8041. else
  8042. ppd->local_tx_rate = 0;
  8043. } else {
  8044. /* set the tx rate to all enabled */
  8045. ppd->local_tx_rate = 0;
  8046. if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
  8047. ppd->local_tx_rate |= 2;
  8048. if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
  8049. ppd->local_tx_rate |= 1;
  8050. }
  8051. enable_lane_tx = 0xF; /* enable all four lanes */
  8052. ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
  8053. rx_polarity_inversion, ppd->local_tx_rate);
  8054. if (ret != HCMD_SUCCESS)
  8055. goto set_local_link_attributes_fail;
  8056. /*
  8057. * DC supports continuous updates.
  8058. */
  8059. ret = write_vc_local_phy(dd,
  8060. 0 /* no power management */,
  8061. 1 /* continuous updates */);
  8062. if (ret != HCMD_SUCCESS)
  8063. goto set_local_link_attributes_fail;
  8064. /* z=1 in the next call: AU of 0 is not supported by the hardware */
  8065. ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
  8066. ppd->port_crc_mode_enabled);
  8067. if (ret != HCMD_SUCCESS)
  8068. goto set_local_link_attributes_fail;
  8069. ret = write_vc_local_link_width(dd, 0, 0,
  8070. opa_to_vc_link_widths(
  8071. ppd->link_width_enabled));
  8072. if (ret != HCMD_SUCCESS)
  8073. goto set_local_link_attributes_fail;
  8074. /* let peer know who we are */
  8075. ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
  8076. if (ret == HCMD_SUCCESS)
  8077. return 0;
  8078. set_local_link_attributes_fail:
  8079. dd_dev_err(dd,
  8080. "Failed to set local link attributes, return 0x%x\n",
  8081. ret);
  8082. return ret;
  8083. }
  8084. /*
  8085. * Call this to start the link.
  8086. * Do not do anything if the link is disabled.
  8087. * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
  8088. */
  8089. int start_link(struct hfi1_pportdata *ppd)
  8090. {
  8091. /*
  8092. * Tune the SerDes to a ballpark setting for optimal signal and bit
  8093. * error rate. Needs to be done before starting the link.
  8094. */
  8095. tune_serdes(ppd);
  8096. if (!ppd->link_enabled) {
  8097. dd_dev_info(ppd->dd,
  8098. "%s: stopping link start because link is disabled\n",
  8099. __func__);
  8100. return 0;
  8101. }
  8102. if (!ppd->driver_link_ready) {
  8103. dd_dev_info(ppd->dd,
  8104. "%s: stopping link start because driver is not ready\n",
  8105. __func__);
  8106. return 0;
  8107. }
  8108. /*
  8109. * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
  8110. * pkey table can be configured properly if the HFI unit is connected
  8111. * to switch port with MgmtAllowed=NO
  8112. */
  8113. clear_full_mgmt_pkey(ppd);
  8114. return set_link_state(ppd, HLS_DN_POLL);
  8115. }
  8116. static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
  8117. {
  8118. struct hfi1_devdata *dd = ppd->dd;
  8119. u64 mask;
  8120. unsigned long timeout;
  8121. /*
  8122. * Some QSFP cables have a quirk that asserts the IntN line as a side
  8123. * effect of power up on plug-in. We ignore this false positive
  8124. * interrupt until the module has finished powering up by waiting for
  8125. * a minimum timeout of the module inrush initialization time of
  8126. * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
  8127. * module have stabilized.
  8128. */
  8129. msleep(500);
  8130. /*
  8131. * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
  8132. */
  8133. timeout = jiffies + msecs_to_jiffies(2000);
  8134. while (1) {
  8135. mask = read_csr(dd, dd->hfi1_id ?
  8136. ASIC_QSFP2_IN : ASIC_QSFP1_IN);
  8137. if (!(mask & QSFP_HFI0_INT_N))
  8138. break;
  8139. if (time_after(jiffies, timeout)) {
  8140. dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
  8141. __func__);
  8142. break;
  8143. }
  8144. udelay(2);
  8145. }
  8146. }
  8147. static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
  8148. {
  8149. struct hfi1_devdata *dd = ppd->dd;
  8150. u64 mask;
  8151. mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
  8152. if (enable) {
  8153. /*
  8154. * Clear the status register to avoid an immediate interrupt
  8155. * when we re-enable the IntN pin
  8156. */
  8157. write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
  8158. QSFP_HFI0_INT_N);
  8159. mask |= (u64)QSFP_HFI0_INT_N;
  8160. } else {
  8161. mask &= ~(u64)QSFP_HFI0_INT_N;
  8162. }
  8163. write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
  8164. }
  8165. void reset_qsfp(struct hfi1_pportdata *ppd)
  8166. {
  8167. struct hfi1_devdata *dd = ppd->dd;
  8168. u64 mask, qsfp_mask;
  8169. /* Disable INT_N from triggering QSFP interrupts */
  8170. set_qsfp_int_n(ppd, 0);
  8171. /* Reset the QSFP */
  8172. mask = (u64)QSFP_HFI0_RESET_N;
  8173. qsfp_mask = read_csr(dd,
  8174. dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
  8175. qsfp_mask &= ~mask;
  8176. write_csr(dd,
  8177. dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
  8178. udelay(10);
  8179. qsfp_mask |= mask;
  8180. write_csr(dd,
  8181. dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
  8182. wait_for_qsfp_init(ppd);
  8183. /*
  8184. * Allow INT_N to trigger the QSFP interrupt to watch
  8185. * for alarms and warnings
  8186. */
  8187. set_qsfp_int_n(ppd, 1);
  8188. }
  8189. static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
  8190. u8 *qsfp_interrupt_status)
  8191. {
  8192. struct hfi1_devdata *dd = ppd->dd;
  8193. if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
  8194. (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
  8195. dd_dev_info(dd, "%s: QSFP cable temperature too high\n",
  8196. __func__);
  8197. if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
  8198. (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
  8199. dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
  8200. __func__);
  8201. /*
  8202. * The remaining alarms/warnings don't matter if the link is down.
  8203. */
  8204. if (ppd->host_link_state & HLS_DOWN)
  8205. return 0;
  8206. if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
  8207. (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
  8208. dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
  8209. __func__);
  8210. if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
  8211. (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
  8212. dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
  8213. __func__);
  8214. /* Byte 2 is vendor specific */
  8215. if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
  8216. (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
  8217. dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
  8218. __func__);
  8219. if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
  8220. (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
  8221. dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
  8222. __func__);
  8223. if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
  8224. (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
  8225. dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
  8226. __func__);
  8227. if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
  8228. (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
  8229. dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
  8230. __func__);
  8231. if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
  8232. (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
  8233. dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
  8234. __func__);
  8235. if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
  8236. (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
  8237. dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
  8238. __func__);
  8239. if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
  8240. (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
  8241. dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
  8242. __func__);
  8243. if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
  8244. (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
  8245. dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
  8246. __func__);
  8247. if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
  8248. (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
  8249. dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
  8250. __func__);
  8251. if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
  8252. (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
  8253. dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
  8254. __func__);
  8255. if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
  8256. (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
  8257. dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
  8258. __func__);
  8259. if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
  8260. (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
  8261. dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
  8262. __func__);
  8263. /* Bytes 9-10 and 11-12 are reserved */
  8264. /* Bytes 13-15 are vendor specific */
  8265. return 0;
  8266. }
  8267. /* This routine will only be scheduled if the QSFP module present is asserted */
  8268. void qsfp_event(struct work_struct *work)
  8269. {
  8270. struct qsfp_data *qd;
  8271. struct hfi1_pportdata *ppd;
  8272. struct hfi1_devdata *dd;
  8273. qd = container_of(work, struct qsfp_data, qsfp_work);
  8274. ppd = qd->ppd;
  8275. dd = ppd->dd;
  8276. /* Sanity check */
  8277. if (!qsfp_mod_present(ppd))
  8278. return;
  8279. /*
  8280. * Turn DC back on after cable has been re-inserted. Up until
  8281. * now, the DC has been in reset to save power.
  8282. */
  8283. dc_start(dd);
  8284. if (qd->cache_refresh_required) {
  8285. set_qsfp_int_n(ppd, 0);
  8286. wait_for_qsfp_init(ppd);
  8287. /*
  8288. * Allow INT_N to trigger the QSFP interrupt to watch
  8289. * for alarms and warnings
  8290. */
  8291. set_qsfp_int_n(ppd, 1);
  8292. start_link(ppd);
  8293. }
  8294. if (qd->check_interrupt_flags) {
  8295. u8 qsfp_interrupt_status[16] = {0,};
  8296. if (one_qsfp_read(ppd, dd->hfi1_id, 6,
  8297. &qsfp_interrupt_status[0], 16) != 16) {
  8298. dd_dev_info(dd,
  8299. "%s: Failed to read status of QSFP module\n",
  8300. __func__);
  8301. } else {
  8302. unsigned long flags;
  8303. handle_qsfp_error_conditions(
  8304. ppd, qsfp_interrupt_status);
  8305. spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
  8306. ppd->qsfp_info.check_interrupt_flags = 0;
  8307. spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
  8308. flags);
  8309. }
  8310. }
  8311. }
  8312. static void init_qsfp_int(struct hfi1_devdata *dd)
  8313. {
  8314. struct hfi1_pportdata *ppd = dd->pport;
  8315. u64 qsfp_mask, cce_int_mask;
  8316. const int qsfp1_int_smask = QSFP1_INT % 64;
  8317. const int qsfp2_int_smask = QSFP2_INT % 64;
  8318. /*
  8319. * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
  8320. * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
  8321. * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
  8322. * the index of the appropriate CSR in the CCEIntMask CSR array
  8323. */
  8324. cce_int_mask = read_csr(dd, CCE_INT_MASK +
  8325. (8 * (QSFP1_INT / 64)));
  8326. if (dd->hfi1_id) {
  8327. cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
  8328. write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
  8329. cce_int_mask);
  8330. } else {
  8331. cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
  8332. write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
  8333. cce_int_mask);
  8334. }
  8335. qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
  8336. /* Clear current status to avoid spurious interrupts */
  8337. write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
  8338. qsfp_mask);
  8339. write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
  8340. qsfp_mask);
  8341. set_qsfp_int_n(ppd, 0);
  8342. /* Handle active low nature of INT_N and MODPRST_N pins */
  8343. if (qsfp_mod_present(ppd))
  8344. qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
  8345. write_csr(dd,
  8346. dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
  8347. qsfp_mask);
  8348. }
  8349. /*
  8350. * Do a one-time initialize of the LCB block.
  8351. */
  8352. static void init_lcb(struct hfi1_devdata *dd)
  8353. {
  8354. /* simulator does not correctly handle LCB cclk loopback, skip */
  8355. if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
  8356. return;
  8357. /* the DC has been reset earlier in the driver load */
  8358. /* set LCB for cclk loopback on the port */
  8359. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
  8360. write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
  8361. write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
  8362. write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
  8363. write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
  8364. write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
  8365. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
  8366. }
  8367. /*
  8368. * Perform a test read on the QSFP. Return 0 on success, -ERRNO
  8369. * on error.
  8370. */
  8371. static int test_qsfp_read(struct hfi1_pportdata *ppd)
  8372. {
  8373. int ret;
  8374. u8 status;
  8375. /*
  8376. * Report success if not a QSFP or, if it is a QSFP, but the cable is
  8377. * not present
  8378. */
  8379. if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
  8380. return 0;
  8381. /* read byte 2, the status byte */
  8382. ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
  8383. if (ret < 0)
  8384. return ret;
  8385. if (ret != 1)
  8386. return -EIO;
  8387. return 0; /* success */
  8388. }
  8389. /*
  8390. * Values for QSFP retry.
  8391. *
  8392. * Give up after 10s (20 x 500ms). The overall timeout was empirically
  8393. * arrived at from experience on a large cluster.
  8394. */
  8395. #define MAX_QSFP_RETRIES 20
  8396. #define QSFP_RETRY_WAIT 500 /* msec */
  8397. /*
  8398. * Try a QSFP read. If it fails, schedule a retry for later.
  8399. * Called on first link activation after driver load.
  8400. */
  8401. static void try_start_link(struct hfi1_pportdata *ppd)
  8402. {
  8403. if (test_qsfp_read(ppd)) {
  8404. /* read failed */
  8405. if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
  8406. dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
  8407. return;
  8408. }
  8409. dd_dev_info(ppd->dd,
  8410. "QSFP not responding, waiting and retrying %d\n",
  8411. (int)ppd->qsfp_retry_count);
  8412. ppd->qsfp_retry_count++;
  8413. queue_delayed_work(ppd->hfi1_wq, &ppd->start_link_work,
  8414. msecs_to_jiffies(QSFP_RETRY_WAIT));
  8415. return;
  8416. }
  8417. ppd->qsfp_retry_count = 0;
  8418. start_link(ppd);
  8419. }
  8420. /*
  8421. * Workqueue function to start the link after a delay.
  8422. */
  8423. void handle_start_link(struct work_struct *work)
  8424. {
  8425. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  8426. start_link_work.work);
  8427. try_start_link(ppd);
  8428. }
  8429. int bringup_serdes(struct hfi1_pportdata *ppd)
  8430. {
  8431. struct hfi1_devdata *dd = ppd->dd;
  8432. u64 guid;
  8433. int ret;
  8434. if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
  8435. add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
  8436. guid = ppd->guids[HFI1_PORT_GUID_INDEX];
  8437. if (!guid) {
  8438. if (dd->base_guid)
  8439. guid = dd->base_guid + ppd->port - 1;
  8440. ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
  8441. }
  8442. /* Set linkinit_reason on power up per OPA spec */
  8443. ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
  8444. /* one-time init of the LCB */
  8445. init_lcb(dd);
  8446. if (loopback) {
  8447. ret = init_loopback(dd);
  8448. if (ret < 0)
  8449. return ret;
  8450. }
  8451. get_port_type(ppd);
  8452. if (ppd->port_type == PORT_TYPE_QSFP) {
  8453. set_qsfp_int_n(ppd, 0);
  8454. wait_for_qsfp_init(ppd);
  8455. set_qsfp_int_n(ppd, 1);
  8456. }
  8457. try_start_link(ppd);
  8458. return 0;
  8459. }
  8460. void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
  8461. {
  8462. struct hfi1_devdata *dd = ppd->dd;
  8463. /*
  8464. * Shut down the link and keep it down. First turn off that the
  8465. * driver wants to allow the link to be up (driver_link_ready).
  8466. * Then make sure the link is not automatically restarted
  8467. * (link_enabled). Cancel any pending restart. And finally
  8468. * go offline.
  8469. */
  8470. ppd->driver_link_ready = 0;
  8471. ppd->link_enabled = 0;
  8472. ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
  8473. flush_delayed_work(&ppd->start_link_work);
  8474. cancel_delayed_work_sync(&ppd->start_link_work);
  8475. ppd->offline_disabled_reason =
  8476. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
  8477. set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
  8478. OPA_LINKDOWN_REASON_SMA_DISABLED);
  8479. set_link_state(ppd, HLS_DN_OFFLINE);
  8480. /* disable the port */
  8481. clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
  8482. }
  8483. static inline int init_cpu_counters(struct hfi1_devdata *dd)
  8484. {
  8485. struct hfi1_pportdata *ppd;
  8486. int i;
  8487. ppd = (struct hfi1_pportdata *)(dd + 1);
  8488. for (i = 0; i < dd->num_pports; i++, ppd++) {
  8489. ppd->ibport_data.rvp.rc_acks = NULL;
  8490. ppd->ibport_data.rvp.rc_qacks = NULL;
  8491. ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
  8492. ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
  8493. ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
  8494. if (!ppd->ibport_data.rvp.rc_acks ||
  8495. !ppd->ibport_data.rvp.rc_delayed_comp ||
  8496. !ppd->ibport_data.rvp.rc_qacks)
  8497. return -ENOMEM;
  8498. }
  8499. return 0;
  8500. }
  8501. static const char * const pt_names[] = {
  8502. "expected",
  8503. "eager",
  8504. "invalid"
  8505. };
  8506. static const char *pt_name(u32 type)
  8507. {
  8508. return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
  8509. }
  8510. /*
  8511. * index is the index into the receive array
  8512. */
  8513. void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
  8514. u32 type, unsigned long pa, u16 order)
  8515. {
  8516. u64 reg;
  8517. void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
  8518. (dd->kregbase + RCV_ARRAY));
  8519. if (!(dd->flags & HFI1_PRESENT))
  8520. goto done;
  8521. if (type == PT_INVALID) {
  8522. pa = 0;
  8523. } else if (type > PT_INVALID) {
  8524. dd_dev_err(dd,
  8525. "unexpected receive array type %u for index %u, not handled\n",
  8526. type, index);
  8527. goto done;
  8528. }
  8529. hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
  8530. pt_name(type), index, pa, (unsigned long)order);
  8531. #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
  8532. reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
  8533. | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
  8534. | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
  8535. << RCV_ARRAY_RT_ADDR_SHIFT;
  8536. writeq(reg, base + (index * 8));
  8537. if (type == PT_EAGER)
  8538. /*
  8539. * Eager entries are written one-by-one so we have to push them
  8540. * after we write the entry.
  8541. */
  8542. flush_wc();
  8543. done:
  8544. return;
  8545. }
  8546. void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
  8547. {
  8548. struct hfi1_devdata *dd = rcd->dd;
  8549. u32 i;
  8550. /* this could be optimized */
  8551. for (i = rcd->eager_base; i < rcd->eager_base +
  8552. rcd->egrbufs.alloced; i++)
  8553. hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
  8554. for (i = rcd->expected_base;
  8555. i < rcd->expected_base + rcd->expected_count; i++)
  8556. hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
  8557. }
  8558. struct ib_header *hfi1_get_msgheader(
  8559. struct hfi1_devdata *dd, __le32 *rhf_addr)
  8560. {
  8561. u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
  8562. return (struct ib_header *)
  8563. (rhf_addr - dd->rhf_offset + offset);
  8564. }
  8565. static const char * const ib_cfg_name_strings[] = {
  8566. "HFI1_IB_CFG_LIDLMC",
  8567. "HFI1_IB_CFG_LWID_DG_ENB",
  8568. "HFI1_IB_CFG_LWID_ENB",
  8569. "HFI1_IB_CFG_LWID",
  8570. "HFI1_IB_CFG_SPD_ENB",
  8571. "HFI1_IB_CFG_SPD",
  8572. "HFI1_IB_CFG_RXPOL_ENB",
  8573. "HFI1_IB_CFG_LREV_ENB",
  8574. "HFI1_IB_CFG_LINKLATENCY",
  8575. "HFI1_IB_CFG_HRTBT",
  8576. "HFI1_IB_CFG_OP_VLS",
  8577. "HFI1_IB_CFG_VL_HIGH_CAP",
  8578. "HFI1_IB_CFG_VL_LOW_CAP",
  8579. "HFI1_IB_CFG_OVERRUN_THRESH",
  8580. "HFI1_IB_CFG_PHYERR_THRESH",
  8581. "HFI1_IB_CFG_LINKDEFAULT",
  8582. "HFI1_IB_CFG_PKEYS",
  8583. "HFI1_IB_CFG_MTU",
  8584. "HFI1_IB_CFG_LSTATE",
  8585. "HFI1_IB_CFG_VL_HIGH_LIMIT",
  8586. "HFI1_IB_CFG_PMA_TICKS",
  8587. "HFI1_IB_CFG_PORT"
  8588. };
  8589. static const char *ib_cfg_name(int which)
  8590. {
  8591. if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
  8592. return "invalid";
  8593. return ib_cfg_name_strings[which];
  8594. }
  8595. int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
  8596. {
  8597. struct hfi1_devdata *dd = ppd->dd;
  8598. int val = 0;
  8599. switch (which) {
  8600. case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
  8601. val = ppd->link_width_enabled;
  8602. break;
  8603. case HFI1_IB_CFG_LWID: /* currently active Link-width */
  8604. val = ppd->link_width_active;
  8605. break;
  8606. case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
  8607. val = ppd->link_speed_enabled;
  8608. break;
  8609. case HFI1_IB_CFG_SPD: /* current Link speed */
  8610. val = ppd->link_speed_active;
  8611. break;
  8612. case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
  8613. case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
  8614. case HFI1_IB_CFG_LINKLATENCY:
  8615. goto unimplemented;
  8616. case HFI1_IB_CFG_OP_VLS:
  8617. val = ppd->vls_operational;
  8618. break;
  8619. case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
  8620. val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
  8621. break;
  8622. case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
  8623. val = VL_ARB_LOW_PRIO_TABLE_SIZE;
  8624. break;
  8625. case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
  8626. val = ppd->overrun_threshold;
  8627. break;
  8628. case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
  8629. val = ppd->phy_error_threshold;
  8630. break;
  8631. case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
  8632. val = dd->link_default;
  8633. break;
  8634. case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
  8635. case HFI1_IB_CFG_PMA_TICKS:
  8636. default:
  8637. unimplemented:
  8638. if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
  8639. dd_dev_info(
  8640. dd,
  8641. "%s: which %s: not implemented\n",
  8642. __func__,
  8643. ib_cfg_name(which));
  8644. break;
  8645. }
  8646. return val;
  8647. }
  8648. /*
  8649. * The largest MAD packet size.
  8650. */
  8651. #define MAX_MAD_PACKET 2048
  8652. /*
  8653. * Return the maximum header bytes that can go on the _wire_
  8654. * for this device. This count includes the ICRC which is
  8655. * not part of the packet held in memory but it is appended
  8656. * by the HW.
  8657. * This is dependent on the device's receive header entry size.
  8658. * HFI allows this to be set per-receive context, but the
  8659. * driver presently enforces a global value.
  8660. */
  8661. u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
  8662. {
  8663. /*
  8664. * The maximum non-payload (MTU) bytes in LRH.PktLen are
  8665. * the Receive Header Entry Size minus the PBC (or RHF) size
  8666. * plus one DW for the ICRC appended by HW.
  8667. *
  8668. * dd->rcd[0].rcvhdrqentsize is in DW.
  8669. * We use rcd[0] as all context will have the same value. Also,
  8670. * the first kernel context would have been allocated by now so
  8671. * we are guaranteed a valid value.
  8672. */
  8673. return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
  8674. }
  8675. /*
  8676. * Set Send Length
  8677. * @ppd - per port data
  8678. *
  8679. * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
  8680. * registers compare against LRH.PktLen, so use the max bytes included
  8681. * in the LRH.
  8682. *
  8683. * This routine changes all VL values except VL15, which it maintains at
  8684. * the same value.
  8685. */
  8686. static void set_send_length(struct hfi1_pportdata *ppd)
  8687. {
  8688. struct hfi1_devdata *dd = ppd->dd;
  8689. u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
  8690. u32 maxvlmtu = dd->vld[15].mtu;
  8691. u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
  8692. & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
  8693. SEND_LEN_CHECK1_LEN_VL15_SHIFT;
  8694. int i, j;
  8695. u32 thres;
  8696. for (i = 0; i < ppd->vls_supported; i++) {
  8697. if (dd->vld[i].mtu > maxvlmtu)
  8698. maxvlmtu = dd->vld[i].mtu;
  8699. if (i <= 3)
  8700. len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
  8701. & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
  8702. ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
  8703. else
  8704. len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
  8705. & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
  8706. ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
  8707. }
  8708. write_csr(dd, SEND_LEN_CHECK0, len1);
  8709. write_csr(dd, SEND_LEN_CHECK1, len2);
  8710. /* adjust kernel credit return thresholds based on new MTUs */
  8711. /* all kernel receive contexts have the same hdrqentsize */
  8712. for (i = 0; i < ppd->vls_supported; i++) {
  8713. thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
  8714. sc_mtu_to_threshold(dd->vld[i].sc,
  8715. dd->vld[i].mtu,
  8716. dd->rcd[0]->rcvhdrqentsize));
  8717. for (j = 0; j < INIT_SC_PER_VL; j++)
  8718. sc_set_cr_threshold(
  8719. pio_select_send_context_vl(dd, j, i),
  8720. thres);
  8721. }
  8722. thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
  8723. sc_mtu_to_threshold(dd->vld[15].sc,
  8724. dd->vld[15].mtu,
  8725. dd->rcd[0]->rcvhdrqentsize));
  8726. sc_set_cr_threshold(dd->vld[15].sc, thres);
  8727. /* Adjust maximum MTU for the port in DC */
  8728. dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
  8729. (ilog2(maxvlmtu >> 8) + 1);
  8730. len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
  8731. len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
  8732. len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
  8733. DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
  8734. write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
  8735. }
  8736. static void set_lidlmc(struct hfi1_pportdata *ppd)
  8737. {
  8738. int i;
  8739. u64 sreg = 0;
  8740. struct hfi1_devdata *dd = ppd->dd;
  8741. u32 mask = ~((1U << ppd->lmc) - 1);
  8742. u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
  8743. c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
  8744. | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
  8745. c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
  8746. << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
  8747. ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
  8748. << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
  8749. write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
  8750. /*
  8751. * Iterate over all the send contexts and set their SLID check
  8752. */
  8753. sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
  8754. SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
  8755. (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
  8756. SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
  8757. for (i = 0; i < dd->chip_send_contexts; i++) {
  8758. hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
  8759. i, (u32)sreg);
  8760. write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
  8761. }
  8762. /* Now we have to do the same thing for the sdma engines */
  8763. sdma_update_lmc(dd, mask, ppd->lid);
  8764. }
  8765. static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
  8766. {
  8767. unsigned long timeout;
  8768. u32 curr_state;
  8769. timeout = jiffies + msecs_to_jiffies(msecs);
  8770. while (1) {
  8771. curr_state = read_physical_state(dd);
  8772. if (curr_state == state)
  8773. break;
  8774. if (time_after(jiffies, timeout)) {
  8775. dd_dev_err(dd,
  8776. "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
  8777. state, curr_state);
  8778. return -ETIMEDOUT;
  8779. }
  8780. usleep_range(1950, 2050); /* sleep 2ms-ish */
  8781. }
  8782. return 0;
  8783. }
  8784. static const char *state_completed_string(u32 completed)
  8785. {
  8786. static const char * const state_completed[] = {
  8787. "EstablishComm",
  8788. "OptimizeEQ",
  8789. "VerifyCap"
  8790. };
  8791. if (completed < ARRAY_SIZE(state_completed))
  8792. return state_completed[completed];
  8793. return "unknown";
  8794. }
  8795. static const char all_lanes_dead_timeout_expired[] =
  8796. "All lanes were inactive – was the interconnect media removed?";
  8797. static const char tx_out_of_policy[] =
  8798. "Passing lanes on local port do not meet the local link width policy";
  8799. static const char no_state_complete[] =
  8800. "State timeout occurred before link partner completed the state";
  8801. static const char * const state_complete_reasons[] = {
  8802. [0x00] = "Reason unknown",
  8803. [0x01] = "Link was halted by driver, refer to LinkDownReason",
  8804. [0x02] = "Link partner reported failure",
  8805. [0x10] = "Unable to achieve frame sync on any lane",
  8806. [0x11] =
  8807. "Unable to find a common bit rate with the link partner",
  8808. [0x12] =
  8809. "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
  8810. [0x13] =
  8811. "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
  8812. [0x14] = no_state_complete,
  8813. [0x15] =
  8814. "State timeout occurred before link partner identified equalization presets",
  8815. [0x16] =
  8816. "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
  8817. [0x17] = tx_out_of_policy,
  8818. [0x20] = all_lanes_dead_timeout_expired,
  8819. [0x21] =
  8820. "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
  8821. [0x22] = no_state_complete,
  8822. [0x23] =
  8823. "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
  8824. [0x24] = tx_out_of_policy,
  8825. [0x30] = all_lanes_dead_timeout_expired,
  8826. [0x31] =
  8827. "State timeout occurred waiting for host to process received frames",
  8828. [0x32] = no_state_complete,
  8829. [0x33] =
  8830. "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
  8831. [0x34] = tx_out_of_policy,
  8832. };
  8833. static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
  8834. u32 code)
  8835. {
  8836. const char *str = NULL;
  8837. if (code < ARRAY_SIZE(state_complete_reasons))
  8838. str = state_complete_reasons[code];
  8839. if (str)
  8840. return str;
  8841. return "Reserved";
  8842. }
  8843. /* describe the given last state complete frame */
  8844. static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
  8845. const char *prefix)
  8846. {
  8847. struct hfi1_devdata *dd = ppd->dd;
  8848. u32 success;
  8849. u32 state;
  8850. u32 reason;
  8851. u32 lanes;
  8852. /*
  8853. * Decode frame:
  8854. * [ 0: 0] - success
  8855. * [ 3: 1] - state
  8856. * [ 7: 4] - next state timeout
  8857. * [15: 8] - reason code
  8858. * [31:16] - lanes
  8859. */
  8860. success = frame & 0x1;
  8861. state = (frame >> 1) & 0x7;
  8862. reason = (frame >> 8) & 0xff;
  8863. lanes = (frame >> 16) & 0xffff;
  8864. dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
  8865. prefix, frame);
  8866. dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
  8867. state_completed_string(state), state);
  8868. dd_dev_err(dd, " state successfully completed: %s\n",
  8869. success ? "yes" : "no");
  8870. dd_dev_err(dd, " fail reason 0x%x: %s\n",
  8871. reason, state_complete_reason_code_string(ppd, reason));
  8872. dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
  8873. }
  8874. /*
  8875. * Read the last state complete frames and explain them. This routine
  8876. * expects to be called if the link went down during link negotiation
  8877. * and initialization (LNI). That is, anywhere between polling and link up.
  8878. */
  8879. static void check_lni_states(struct hfi1_pportdata *ppd)
  8880. {
  8881. u32 last_local_state;
  8882. u32 last_remote_state;
  8883. read_last_local_state(ppd->dd, &last_local_state);
  8884. read_last_remote_state(ppd->dd, &last_remote_state);
  8885. /*
  8886. * Don't report anything if there is nothing to report. A value of
  8887. * 0 means the link was taken down while polling and there was no
  8888. * training in-process.
  8889. */
  8890. if (last_local_state == 0 && last_remote_state == 0)
  8891. return;
  8892. decode_state_complete(ppd, last_local_state, "transmitted");
  8893. decode_state_complete(ppd, last_remote_state, "received");
  8894. }
  8895. /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
  8896. static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
  8897. {
  8898. u64 reg;
  8899. unsigned long timeout;
  8900. /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
  8901. timeout = jiffies + msecs_to_jiffies(wait_ms);
  8902. while (1) {
  8903. reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
  8904. if (reg)
  8905. break;
  8906. if (time_after(jiffies, timeout)) {
  8907. dd_dev_err(dd,
  8908. "timeout waiting for LINK_TRANSFER_ACTIVE\n");
  8909. return -ETIMEDOUT;
  8910. }
  8911. udelay(2);
  8912. }
  8913. return 0;
  8914. }
  8915. /* called when the logical link state is not down as it should be */
  8916. static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
  8917. {
  8918. struct hfi1_devdata *dd = ppd->dd;
  8919. /*
  8920. * Bring link up in LCB loopback
  8921. */
  8922. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
  8923. write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
  8924. DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
  8925. write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
  8926. write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
  8927. write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
  8928. write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
  8929. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
  8930. (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
  8931. udelay(3);
  8932. write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
  8933. write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
  8934. wait_link_transfer_active(dd, 100);
  8935. /*
  8936. * Bring the link down again.
  8937. */
  8938. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
  8939. write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
  8940. write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
  8941. /* call again to adjust ppd->statusp, if needed */
  8942. get_logical_state(ppd);
  8943. }
  8944. /*
  8945. * Helper for set_link_state(). Do not call except from that routine.
  8946. * Expects ppd->hls_mutex to be held.
  8947. *
  8948. * @rem_reason value to be sent to the neighbor
  8949. *
  8950. * LinkDownReasons only set if transition succeeds.
  8951. */
  8952. static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
  8953. {
  8954. struct hfi1_devdata *dd = ppd->dd;
  8955. u32 pstate, previous_state;
  8956. int ret;
  8957. int do_transition;
  8958. int do_wait;
  8959. update_lcb_cache(dd);
  8960. previous_state = ppd->host_link_state;
  8961. ppd->host_link_state = HLS_GOING_OFFLINE;
  8962. pstate = read_physical_state(dd);
  8963. if (pstate == PLS_OFFLINE) {
  8964. do_transition = 0; /* in right state */
  8965. do_wait = 0; /* ...no need to wait */
  8966. } else if ((pstate & 0xf0) == PLS_OFFLINE) {
  8967. do_transition = 0; /* in an offline transient state */
  8968. do_wait = 1; /* ...wait for it to settle */
  8969. } else {
  8970. do_transition = 1; /* need to move to offline */
  8971. do_wait = 1; /* ...will need to wait */
  8972. }
  8973. if (do_transition) {
  8974. ret = set_physical_link_state(dd,
  8975. (rem_reason << 8) | PLS_OFFLINE);
  8976. if (ret != HCMD_SUCCESS) {
  8977. dd_dev_err(dd,
  8978. "Failed to transition to Offline link state, return %d\n",
  8979. ret);
  8980. return -EINVAL;
  8981. }
  8982. if (ppd->offline_disabled_reason ==
  8983. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
  8984. ppd->offline_disabled_reason =
  8985. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
  8986. }
  8987. if (do_wait) {
  8988. /* it can take a while for the link to go down */
  8989. ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
  8990. if (ret < 0)
  8991. return ret;
  8992. }
  8993. /*
  8994. * Now in charge of LCB - must be after the physical state is
  8995. * offline.quiet and before host_link_state is changed.
  8996. */
  8997. set_host_lcb_access(dd);
  8998. write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
  8999. /* make sure the logical state is also down */
  9000. ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
  9001. if (ret)
  9002. force_logical_link_state_down(ppd);
  9003. ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
  9004. if (ppd->port_type == PORT_TYPE_QSFP &&
  9005. ppd->qsfp_info.limiting_active &&
  9006. qsfp_mod_present(ppd)) {
  9007. int ret;
  9008. ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
  9009. if (ret == 0) {
  9010. set_qsfp_tx(ppd, 0);
  9011. release_chip_resource(dd, qsfp_resource(dd));
  9012. } else {
  9013. /* not fatal, but should warn */
  9014. dd_dev_err(dd,
  9015. "Unable to acquire lock to turn off QSFP TX\n");
  9016. }
  9017. }
  9018. /*
  9019. * The LNI has a mandatory wait time after the physical state
  9020. * moves to Offline.Quiet. The wait time may be different
  9021. * depending on how the link went down. The 8051 firmware
  9022. * will observe the needed wait time and only move to ready
  9023. * when that is completed. The largest of the quiet timeouts
  9024. * is 6s, so wait that long and then at least 0.5s more for
  9025. * other transitions, and another 0.5s for a buffer.
  9026. */
  9027. ret = wait_fm_ready(dd, 7000);
  9028. if (ret) {
  9029. dd_dev_err(dd,
  9030. "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
  9031. /* state is really offline, so make it so */
  9032. ppd->host_link_state = HLS_DN_OFFLINE;
  9033. return ret;
  9034. }
  9035. /*
  9036. * The state is now offline and the 8051 is ready to accept host
  9037. * requests.
  9038. * - change our state
  9039. * - notify others if we were previously in a linkup state
  9040. */
  9041. ppd->host_link_state = HLS_DN_OFFLINE;
  9042. if (previous_state & HLS_UP) {
  9043. /* went down while link was up */
  9044. handle_linkup_change(dd, 0);
  9045. } else if (previous_state
  9046. & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
  9047. /* went down while attempting link up */
  9048. check_lni_states(ppd);
  9049. }
  9050. /* the active link width (downgrade) is 0 on link down */
  9051. ppd->link_width_active = 0;
  9052. ppd->link_width_downgrade_tx_active = 0;
  9053. ppd->link_width_downgrade_rx_active = 0;
  9054. ppd->current_egress_rate = 0;
  9055. return 0;
  9056. }
  9057. /* return the link state name */
  9058. static const char *link_state_name(u32 state)
  9059. {
  9060. const char *name;
  9061. int n = ilog2(state);
  9062. static const char * const names[] = {
  9063. [__HLS_UP_INIT_BP] = "INIT",
  9064. [__HLS_UP_ARMED_BP] = "ARMED",
  9065. [__HLS_UP_ACTIVE_BP] = "ACTIVE",
  9066. [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
  9067. [__HLS_DN_POLL_BP] = "POLL",
  9068. [__HLS_DN_DISABLE_BP] = "DISABLE",
  9069. [__HLS_DN_OFFLINE_BP] = "OFFLINE",
  9070. [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
  9071. [__HLS_GOING_UP_BP] = "GOING_UP",
  9072. [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
  9073. [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
  9074. };
  9075. name = n < ARRAY_SIZE(names) ? names[n] : NULL;
  9076. return name ? name : "unknown";
  9077. }
  9078. /* return the link state reason name */
  9079. static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
  9080. {
  9081. if (state == HLS_UP_INIT) {
  9082. switch (ppd->linkinit_reason) {
  9083. case OPA_LINKINIT_REASON_LINKUP:
  9084. return "(LINKUP)";
  9085. case OPA_LINKINIT_REASON_FLAPPING:
  9086. return "(FLAPPING)";
  9087. case OPA_LINKINIT_OUTSIDE_POLICY:
  9088. return "(OUTSIDE_POLICY)";
  9089. case OPA_LINKINIT_QUARANTINED:
  9090. return "(QUARANTINED)";
  9091. case OPA_LINKINIT_INSUFIC_CAPABILITY:
  9092. return "(INSUFIC_CAPABILITY)";
  9093. default:
  9094. break;
  9095. }
  9096. }
  9097. return "";
  9098. }
  9099. /*
  9100. * driver_physical_state - convert the driver's notion of a port's
  9101. * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
  9102. * Return -1 (converted to a u32) to indicate error.
  9103. */
  9104. u32 driver_physical_state(struct hfi1_pportdata *ppd)
  9105. {
  9106. switch (ppd->host_link_state) {
  9107. case HLS_UP_INIT:
  9108. case HLS_UP_ARMED:
  9109. case HLS_UP_ACTIVE:
  9110. return IB_PORTPHYSSTATE_LINKUP;
  9111. case HLS_DN_POLL:
  9112. return IB_PORTPHYSSTATE_POLLING;
  9113. case HLS_DN_DISABLE:
  9114. return IB_PORTPHYSSTATE_DISABLED;
  9115. case HLS_DN_OFFLINE:
  9116. return OPA_PORTPHYSSTATE_OFFLINE;
  9117. case HLS_VERIFY_CAP:
  9118. return IB_PORTPHYSSTATE_POLLING;
  9119. case HLS_GOING_UP:
  9120. return IB_PORTPHYSSTATE_POLLING;
  9121. case HLS_GOING_OFFLINE:
  9122. return OPA_PORTPHYSSTATE_OFFLINE;
  9123. case HLS_LINK_COOLDOWN:
  9124. return OPA_PORTPHYSSTATE_OFFLINE;
  9125. case HLS_DN_DOWNDEF:
  9126. default:
  9127. dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
  9128. ppd->host_link_state);
  9129. return -1;
  9130. }
  9131. }
  9132. /*
  9133. * driver_logical_state - convert the driver's notion of a port's
  9134. * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
  9135. * (converted to a u32) to indicate error.
  9136. */
  9137. u32 driver_logical_state(struct hfi1_pportdata *ppd)
  9138. {
  9139. if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
  9140. return IB_PORT_DOWN;
  9141. switch (ppd->host_link_state & HLS_UP) {
  9142. case HLS_UP_INIT:
  9143. return IB_PORT_INIT;
  9144. case HLS_UP_ARMED:
  9145. return IB_PORT_ARMED;
  9146. case HLS_UP_ACTIVE:
  9147. return IB_PORT_ACTIVE;
  9148. default:
  9149. dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
  9150. ppd->host_link_state);
  9151. return -1;
  9152. }
  9153. }
  9154. void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
  9155. u8 neigh_reason, u8 rem_reason)
  9156. {
  9157. if (ppd->local_link_down_reason.latest == 0 &&
  9158. ppd->neigh_link_down_reason.latest == 0) {
  9159. ppd->local_link_down_reason.latest = lcl_reason;
  9160. ppd->neigh_link_down_reason.latest = neigh_reason;
  9161. ppd->remote_link_down_reason = rem_reason;
  9162. }
  9163. }
  9164. /*
  9165. * Change the physical and/or logical link state.
  9166. *
  9167. * Do not call this routine while inside an interrupt. It contains
  9168. * calls to routines that can take multiple seconds to finish.
  9169. *
  9170. * Returns 0 on success, -errno on failure.
  9171. */
  9172. int set_link_state(struct hfi1_pportdata *ppd, u32 state)
  9173. {
  9174. struct hfi1_devdata *dd = ppd->dd;
  9175. struct ib_event event = {.device = NULL};
  9176. int ret1, ret = 0;
  9177. int orig_new_state, poll_bounce;
  9178. mutex_lock(&ppd->hls_lock);
  9179. orig_new_state = state;
  9180. if (state == HLS_DN_DOWNDEF)
  9181. state = dd->link_default;
  9182. /* interpret poll -> poll as a link bounce */
  9183. poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
  9184. state == HLS_DN_POLL;
  9185. dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
  9186. link_state_name(ppd->host_link_state),
  9187. link_state_name(orig_new_state),
  9188. poll_bounce ? "(bounce) " : "",
  9189. link_state_reason_name(ppd, state));
  9190. /*
  9191. * If we're going to a (HLS_*) link state that implies the logical
  9192. * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
  9193. * reset is_sm_config_started to 0.
  9194. */
  9195. if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
  9196. ppd->is_sm_config_started = 0;
  9197. /*
  9198. * Do nothing if the states match. Let a poll to poll link bounce
  9199. * go through.
  9200. */
  9201. if (ppd->host_link_state == state && !poll_bounce)
  9202. goto done;
  9203. switch (state) {
  9204. case HLS_UP_INIT:
  9205. if (ppd->host_link_state == HLS_DN_POLL &&
  9206. (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
  9207. /*
  9208. * Quick link up jumps from polling to here.
  9209. *
  9210. * Whether in normal or loopback mode, the
  9211. * simulator jumps from polling to link up.
  9212. * Accept that here.
  9213. */
  9214. /* OK */
  9215. } else if (ppd->host_link_state != HLS_GOING_UP) {
  9216. goto unexpected;
  9217. }
  9218. ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
  9219. if (ret) {
  9220. dd_dev_err(dd,
  9221. "%s: logical state did not change to INIT\n",
  9222. __func__);
  9223. } else {
  9224. /* clear old transient LINKINIT_REASON code */
  9225. if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
  9226. ppd->linkinit_reason =
  9227. OPA_LINKINIT_REASON_LINKUP;
  9228. /* enable the port */
  9229. add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
  9230. handle_linkup_change(dd, 1);
  9231. ppd->host_link_state = HLS_UP_INIT;
  9232. }
  9233. break;
  9234. case HLS_UP_ARMED:
  9235. if (ppd->host_link_state != HLS_UP_INIT)
  9236. goto unexpected;
  9237. ppd->host_link_state = HLS_UP_ARMED;
  9238. set_logical_state(dd, LSTATE_ARMED);
  9239. ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
  9240. if (ret) {
  9241. /* logical state didn't change, stay at init */
  9242. ppd->host_link_state = HLS_UP_INIT;
  9243. dd_dev_err(dd,
  9244. "%s: logical state did not change to ARMED\n",
  9245. __func__);
  9246. }
  9247. /*
  9248. * The simulator does not currently implement SMA messages,
  9249. * so neighbor_normal is not set. Set it here when we first
  9250. * move to Armed.
  9251. */
  9252. if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
  9253. ppd->neighbor_normal = 1;
  9254. break;
  9255. case HLS_UP_ACTIVE:
  9256. if (ppd->host_link_state != HLS_UP_ARMED)
  9257. goto unexpected;
  9258. ppd->host_link_state = HLS_UP_ACTIVE;
  9259. set_logical_state(dd, LSTATE_ACTIVE);
  9260. ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
  9261. if (ret) {
  9262. /* logical state didn't change, stay at armed */
  9263. ppd->host_link_state = HLS_UP_ARMED;
  9264. dd_dev_err(dd,
  9265. "%s: logical state did not change to ACTIVE\n",
  9266. __func__);
  9267. } else {
  9268. /* tell all engines to go running */
  9269. sdma_all_running(dd);
  9270. /* Signal the IB layer that the port has went active */
  9271. event.device = &dd->verbs_dev.rdi.ibdev;
  9272. event.element.port_num = ppd->port;
  9273. event.event = IB_EVENT_PORT_ACTIVE;
  9274. }
  9275. break;
  9276. case HLS_DN_POLL:
  9277. if ((ppd->host_link_state == HLS_DN_DISABLE ||
  9278. ppd->host_link_state == HLS_DN_OFFLINE) &&
  9279. dd->dc_shutdown)
  9280. dc_start(dd);
  9281. /* Hand LED control to the DC */
  9282. write_csr(dd, DCC_CFG_LED_CNTRL, 0);
  9283. if (ppd->host_link_state != HLS_DN_OFFLINE) {
  9284. u8 tmp = ppd->link_enabled;
  9285. ret = goto_offline(ppd, ppd->remote_link_down_reason);
  9286. if (ret) {
  9287. ppd->link_enabled = tmp;
  9288. break;
  9289. }
  9290. ppd->remote_link_down_reason = 0;
  9291. if (ppd->driver_link_ready)
  9292. ppd->link_enabled = 1;
  9293. }
  9294. set_all_slowpath(ppd->dd);
  9295. ret = set_local_link_attributes(ppd);
  9296. if (ret)
  9297. break;
  9298. ppd->port_error_action = 0;
  9299. ppd->host_link_state = HLS_DN_POLL;
  9300. if (quick_linkup) {
  9301. /* quick linkup does not go into polling */
  9302. ret = do_quick_linkup(dd);
  9303. } else {
  9304. ret1 = set_physical_link_state(dd, PLS_POLLING);
  9305. if (ret1 != HCMD_SUCCESS) {
  9306. dd_dev_err(dd,
  9307. "Failed to transition to Polling link state, return 0x%x\n",
  9308. ret1);
  9309. ret = -EINVAL;
  9310. }
  9311. }
  9312. ppd->offline_disabled_reason =
  9313. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
  9314. /*
  9315. * If an error occurred above, go back to offline. The
  9316. * caller may reschedule another attempt.
  9317. */
  9318. if (ret)
  9319. goto_offline(ppd, 0);
  9320. break;
  9321. case HLS_DN_DISABLE:
  9322. /* link is disabled */
  9323. ppd->link_enabled = 0;
  9324. /* allow any state to transition to disabled */
  9325. /* must transition to offline first */
  9326. if (ppd->host_link_state != HLS_DN_OFFLINE) {
  9327. ret = goto_offline(ppd, ppd->remote_link_down_reason);
  9328. if (ret)
  9329. break;
  9330. ppd->remote_link_down_reason = 0;
  9331. }
  9332. if (!dd->dc_shutdown) {
  9333. ret1 = set_physical_link_state(dd, PLS_DISABLED);
  9334. if (ret1 != HCMD_SUCCESS) {
  9335. dd_dev_err(dd,
  9336. "Failed to transition to Disabled link state, return 0x%x\n",
  9337. ret1);
  9338. ret = -EINVAL;
  9339. break;
  9340. }
  9341. dc_shutdown(dd);
  9342. }
  9343. ppd->host_link_state = HLS_DN_DISABLE;
  9344. break;
  9345. case HLS_DN_OFFLINE:
  9346. if (ppd->host_link_state == HLS_DN_DISABLE)
  9347. dc_start(dd);
  9348. /* allow any state to transition to offline */
  9349. ret = goto_offline(ppd, ppd->remote_link_down_reason);
  9350. if (!ret)
  9351. ppd->remote_link_down_reason = 0;
  9352. break;
  9353. case HLS_VERIFY_CAP:
  9354. if (ppd->host_link_state != HLS_DN_POLL)
  9355. goto unexpected;
  9356. ppd->host_link_state = HLS_VERIFY_CAP;
  9357. break;
  9358. case HLS_GOING_UP:
  9359. if (ppd->host_link_state != HLS_VERIFY_CAP)
  9360. goto unexpected;
  9361. ret1 = set_physical_link_state(dd, PLS_LINKUP);
  9362. if (ret1 != HCMD_SUCCESS) {
  9363. dd_dev_err(dd,
  9364. "Failed to transition to link up state, return 0x%x\n",
  9365. ret1);
  9366. ret = -EINVAL;
  9367. break;
  9368. }
  9369. ppd->host_link_state = HLS_GOING_UP;
  9370. break;
  9371. case HLS_GOING_OFFLINE: /* transient within goto_offline() */
  9372. case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
  9373. default:
  9374. dd_dev_info(dd, "%s: state 0x%x: not supported\n",
  9375. __func__, state);
  9376. ret = -EINVAL;
  9377. break;
  9378. }
  9379. goto done;
  9380. unexpected:
  9381. dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
  9382. __func__, link_state_name(ppd->host_link_state),
  9383. link_state_name(state));
  9384. ret = -EINVAL;
  9385. done:
  9386. mutex_unlock(&ppd->hls_lock);
  9387. if (event.device)
  9388. ib_dispatch_event(&event);
  9389. return ret;
  9390. }
  9391. int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
  9392. {
  9393. u64 reg;
  9394. int ret = 0;
  9395. switch (which) {
  9396. case HFI1_IB_CFG_LIDLMC:
  9397. set_lidlmc(ppd);
  9398. break;
  9399. case HFI1_IB_CFG_VL_HIGH_LIMIT:
  9400. /*
  9401. * The VL Arbitrator high limit is sent in units of 4k
  9402. * bytes, while HFI stores it in units of 64 bytes.
  9403. */
  9404. val *= 4096 / 64;
  9405. reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
  9406. << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
  9407. write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
  9408. break;
  9409. case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
  9410. /* HFI only supports POLL as the default link down state */
  9411. if (val != HLS_DN_POLL)
  9412. ret = -EINVAL;
  9413. break;
  9414. case HFI1_IB_CFG_OP_VLS:
  9415. if (ppd->vls_operational != val) {
  9416. ppd->vls_operational = val;
  9417. if (!ppd->port)
  9418. ret = -EINVAL;
  9419. }
  9420. break;
  9421. /*
  9422. * For link width, link width downgrade, and speed enable, always AND
  9423. * the setting with what is actually supported. This has two benefits.
  9424. * First, enabled can't have unsupported values, no matter what the
  9425. * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
  9426. * "fill in with your supported value" have all the bits in the
  9427. * field set, so simply ANDing with supported has the desired result.
  9428. */
  9429. case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
  9430. ppd->link_width_enabled = val & ppd->link_width_supported;
  9431. break;
  9432. case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
  9433. ppd->link_width_downgrade_enabled =
  9434. val & ppd->link_width_downgrade_supported;
  9435. break;
  9436. case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
  9437. ppd->link_speed_enabled = val & ppd->link_speed_supported;
  9438. break;
  9439. case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
  9440. /*
  9441. * HFI does not follow IB specs, save this value
  9442. * so we can report it, if asked.
  9443. */
  9444. ppd->overrun_threshold = val;
  9445. break;
  9446. case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
  9447. /*
  9448. * HFI does not follow IB specs, save this value
  9449. * so we can report it, if asked.
  9450. */
  9451. ppd->phy_error_threshold = val;
  9452. break;
  9453. case HFI1_IB_CFG_MTU:
  9454. set_send_length(ppd);
  9455. break;
  9456. case HFI1_IB_CFG_PKEYS:
  9457. if (HFI1_CAP_IS_KSET(PKEY_CHECK))
  9458. set_partition_keys(ppd);
  9459. break;
  9460. default:
  9461. if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
  9462. dd_dev_info(ppd->dd,
  9463. "%s: which %s, val 0x%x: not implemented\n",
  9464. __func__, ib_cfg_name(which), val);
  9465. break;
  9466. }
  9467. return ret;
  9468. }
  9469. /* begin functions related to vl arbitration table caching */
  9470. static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
  9471. {
  9472. int i;
  9473. BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
  9474. VL_ARB_LOW_PRIO_TABLE_SIZE);
  9475. BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
  9476. VL_ARB_HIGH_PRIO_TABLE_SIZE);
  9477. /*
  9478. * Note that we always return values directly from the
  9479. * 'vl_arb_cache' (and do no CSR reads) in response to a
  9480. * 'Get(VLArbTable)'. This is obviously correct after a
  9481. * 'Set(VLArbTable)', since the cache will then be up to
  9482. * date. But it's also correct prior to any 'Set(VLArbTable)'
  9483. * since then both the cache, and the relevant h/w registers
  9484. * will be zeroed.
  9485. */
  9486. for (i = 0; i < MAX_PRIO_TABLE; i++)
  9487. spin_lock_init(&ppd->vl_arb_cache[i].lock);
  9488. }
  9489. /*
  9490. * vl_arb_lock_cache
  9491. *
  9492. * All other vl_arb_* functions should be called only after locking
  9493. * the cache.
  9494. */
  9495. static inline struct vl_arb_cache *
  9496. vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
  9497. {
  9498. if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
  9499. return NULL;
  9500. spin_lock(&ppd->vl_arb_cache[idx].lock);
  9501. return &ppd->vl_arb_cache[idx];
  9502. }
  9503. static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
  9504. {
  9505. spin_unlock(&ppd->vl_arb_cache[idx].lock);
  9506. }
  9507. static void vl_arb_get_cache(struct vl_arb_cache *cache,
  9508. struct ib_vl_weight_elem *vl)
  9509. {
  9510. memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
  9511. }
  9512. static void vl_arb_set_cache(struct vl_arb_cache *cache,
  9513. struct ib_vl_weight_elem *vl)
  9514. {
  9515. memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
  9516. }
  9517. static int vl_arb_match_cache(struct vl_arb_cache *cache,
  9518. struct ib_vl_weight_elem *vl)
  9519. {
  9520. return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
  9521. }
  9522. /* end functions related to vl arbitration table caching */
  9523. static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
  9524. u32 size, struct ib_vl_weight_elem *vl)
  9525. {
  9526. struct hfi1_devdata *dd = ppd->dd;
  9527. u64 reg;
  9528. unsigned int i, is_up = 0;
  9529. int drain, ret = 0;
  9530. mutex_lock(&ppd->hls_lock);
  9531. if (ppd->host_link_state & HLS_UP)
  9532. is_up = 1;
  9533. drain = !is_ax(dd) && is_up;
  9534. if (drain)
  9535. /*
  9536. * Before adjusting VL arbitration weights, empty per-VL
  9537. * FIFOs, otherwise a packet whose VL weight is being
  9538. * set to 0 could get stuck in a FIFO with no chance to
  9539. * egress.
  9540. */
  9541. ret = stop_drain_data_vls(dd);
  9542. if (ret) {
  9543. dd_dev_err(
  9544. dd,
  9545. "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
  9546. __func__);
  9547. goto err;
  9548. }
  9549. for (i = 0; i < size; i++, vl++) {
  9550. /*
  9551. * NOTE: The low priority shift and mask are used here, but
  9552. * they are the same for both the low and high registers.
  9553. */
  9554. reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
  9555. << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
  9556. | (((u64)vl->weight
  9557. & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
  9558. << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
  9559. write_csr(dd, target + (i * 8), reg);
  9560. }
  9561. pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
  9562. if (drain)
  9563. open_fill_data_vls(dd); /* reopen all VLs */
  9564. err:
  9565. mutex_unlock(&ppd->hls_lock);
  9566. return ret;
  9567. }
  9568. /*
  9569. * Read one credit merge VL register.
  9570. */
  9571. static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
  9572. struct vl_limit *vll)
  9573. {
  9574. u64 reg = read_csr(dd, csr);
  9575. vll->dedicated = cpu_to_be16(
  9576. (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
  9577. & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
  9578. vll->shared = cpu_to_be16(
  9579. (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
  9580. & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
  9581. }
  9582. /*
  9583. * Read the current credit merge limits.
  9584. */
  9585. static int get_buffer_control(struct hfi1_devdata *dd,
  9586. struct buffer_control *bc, u16 *overall_limit)
  9587. {
  9588. u64 reg;
  9589. int i;
  9590. /* not all entries are filled in */
  9591. memset(bc, 0, sizeof(*bc));
  9592. /* OPA and HFI have a 1-1 mapping */
  9593. for (i = 0; i < TXE_NUM_DATA_VL; i++)
  9594. read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
  9595. /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
  9596. read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
  9597. reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
  9598. bc->overall_shared_limit = cpu_to_be16(
  9599. (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
  9600. & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
  9601. if (overall_limit)
  9602. *overall_limit = (reg
  9603. >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
  9604. & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
  9605. return sizeof(struct buffer_control);
  9606. }
  9607. static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
  9608. {
  9609. u64 reg;
  9610. int i;
  9611. /* each register contains 16 SC->VLnt mappings, 4 bits each */
  9612. reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
  9613. for (i = 0; i < sizeof(u64); i++) {
  9614. u8 byte = *(((u8 *)&reg) + i);
  9615. dp->vlnt[2 * i] = byte & 0xf;
  9616. dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
  9617. }
  9618. reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
  9619. for (i = 0; i < sizeof(u64); i++) {
  9620. u8 byte = *(((u8 *)&reg) + i);
  9621. dp->vlnt[16 + (2 * i)] = byte & 0xf;
  9622. dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
  9623. }
  9624. return sizeof(struct sc2vlnt);
  9625. }
  9626. static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
  9627. struct ib_vl_weight_elem *vl)
  9628. {
  9629. unsigned int i;
  9630. for (i = 0; i < nelems; i++, vl++) {
  9631. vl->vl = 0xf;
  9632. vl->weight = 0;
  9633. }
  9634. }
  9635. static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
  9636. {
  9637. write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
  9638. DC_SC_VL_VAL(15_0,
  9639. 0, dp->vlnt[0] & 0xf,
  9640. 1, dp->vlnt[1] & 0xf,
  9641. 2, dp->vlnt[2] & 0xf,
  9642. 3, dp->vlnt[3] & 0xf,
  9643. 4, dp->vlnt[4] & 0xf,
  9644. 5, dp->vlnt[5] & 0xf,
  9645. 6, dp->vlnt[6] & 0xf,
  9646. 7, dp->vlnt[7] & 0xf,
  9647. 8, dp->vlnt[8] & 0xf,
  9648. 9, dp->vlnt[9] & 0xf,
  9649. 10, dp->vlnt[10] & 0xf,
  9650. 11, dp->vlnt[11] & 0xf,
  9651. 12, dp->vlnt[12] & 0xf,
  9652. 13, dp->vlnt[13] & 0xf,
  9653. 14, dp->vlnt[14] & 0xf,
  9654. 15, dp->vlnt[15] & 0xf));
  9655. write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
  9656. DC_SC_VL_VAL(31_16,
  9657. 16, dp->vlnt[16] & 0xf,
  9658. 17, dp->vlnt[17] & 0xf,
  9659. 18, dp->vlnt[18] & 0xf,
  9660. 19, dp->vlnt[19] & 0xf,
  9661. 20, dp->vlnt[20] & 0xf,
  9662. 21, dp->vlnt[21] & 0xf,
  9663. 22, dp->vlnt[22] & 0xf,
  9664. 23, dp->vlnt[23] & 0xf,
  9665. 24, dp->vlnt[24] & 0xf,
  9666. 25, dp->vlnt[25] & 0xf,
  9667. 26, dp->vlnt[26] & 0xf,
  9668. 27, dp->vlnt[27] & 0xf,
  9669. 28, dp->vlnt[28] & 0xf,
  9670. 29, dp->vlnt[29] & 0xf,
  9671. 30, dp->vlnt[30] & 0xf,
  9672. 31, dp->vlnt[31] & 0xf));
  9673. }
  9674. static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
  9675. u16 limit)
  9676. {
  9677. if (limit != 0)
  9678. dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
  9679. what, (int)limit, idx);
  9680. }
  9681. /* change only the shared limit portion of SendCmGLobalCredit */
  9682. static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
  9683. {
  9684. u64 reg;
  9685. reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
  9686. reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
  9687. reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
  9688. write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
  9689. }
  9690. /* change only the total credit limit portion of SendCmGLobalCredit */
  9691. static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
  9692. {
  9693. u64 reg;
  9694. reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
  9695. reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
  9696. reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
  9697. write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
  9698. }
  9699. /* set the given per-VL shared limit */
  9700. static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
  9701. {
  9702. u64 reg;
  9703. u32 addr;
  9704. if (vl < TXE_NUM_DATA_VL)
  9705. addr = SEND_CM_CREDIT_VL + (8 * vl);
  9706. else
  9707. addr = SEND_CM_CREDIT_VL15;
  9708. reg = read_csr(dd, addr);
  9709. reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
  9710. reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
  9711. write_csr(dd, addr, reg);
  9712. }
  9713. /* set the given per-VL dedicated limit */
  9714. static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
  9715. {
  9716. u64 reg;
  9717. u32 addr;
  9718. if (vl < TXE_NUM_DATA_VL)
  9719. addr = SEND_CM_CREDIT_VL + (8 * vl);
  9720. else
  9721. addr = SEND_CM_CREDIT_VL15;
  9722. reg = read_csr(dd, addr);
  9723. reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
  9724. reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
  9725. write_csr(dd, addr, reg);
  9726. }
  9727. /* spin until the given per-VL status mask bits clear */
  9728. static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
  9729. const char *which)
  9730. {
  9731. unsigned long timeout;
  9732. u64 reg;
  9733. timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
  9734. while (1) {
  9735. reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
  9736. if (reg == 0)
  9737. return; /* success */
  9738. if (time_after(jiffies, timeout))
  9739. break; /* timed out */
  9740. udelay(1);
  9741. }
  9742. dd_dev_err(dd,
  9743. "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
  9744. which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
  9745. /*
  9746. * If this occurs, it is likely there was a credit loss on the link.
  9747. * The only recovery from that is a link bounce.
  9748. */
  9749. dd_dev_err(dd,
  9750. "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
  9751. }
  9752. /*
  9753. * The number of credits on the VLs may be changed while everything
  9754. * is "live", but the following algorithm must be followed due to
  9755. * how the hardware is actually implemented. In particular,
  9756. * Return_Credit_Status[] is the only correct status check.
  9757. *
  9758. * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
  9759. * set Global_Shared_Credit_Limit = 0
  9760. * use_all_vl = 1
  9761. * mask0 = all VLs that are changing either dedicated or shared limits
  9762. * set Shared_Limit[mask0] = 0
  9763. * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
  9764. * if (changing any dedicated limit)
  9765. * mask1 = all VLs that are lowering dedicated limits
  9766. * lower Dedicated_Limit[mask1]
  9767. * spin until Return_Credit_Status[mask1] == 0
  9768. * raise Dedicated_Limits
  9769. * raise Shared_Limits
  9770. * raise Global_Shared_Credit_Limit
  9771. *
  9772. * lower = if the new limit is lower, set the limit to the new value
  9773. * raise = if the new limit is higher than the current value (may be changed
  9774. * earlier in the algorithm), set the new limit to the new value
  9775. */
  9776. int set_buffer_control(struct hfi1_pportdata *ppd,
  9777. struct buffer_control *new_bc)
  9778. {
  9779. struct hfi1_devdata *dd = ppd->dd;
  9780. u64 changing_mask, ld_mask, stat_mask;
  9781. int change_count;
  9782. int i, use_all_mask;
  9783. int this_shared_changing;
  9784. int vl_count = 0, ret;
  9785. /*
  9786. * A0: add the variable any_shared_limit_changing below and in the
  9787. * algorithm above. If removing A0 support, it can be removed.
  9788. */
  9789. int any_shared_limit_changing;
  9790. struct buffer_control cur_bc;
  9791. u8 changing[OPA_MAX_VLS];
  9792. u8 lowering_dedicated[OPA_MAX_VLS];
  9793. u16 cur_total;
  9794. u32 new_total = 0;
  9795. const u64 all_mask =
  9796. SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
  9797. | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
  9798. | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
  9799. | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
  9800. | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
  9801. | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
  9802. | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
  9803. | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
  9804. | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
  9805. #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
  9806. #define NUM_USABLE_VLS 16 /* look at VL15 and less */
  9807. /* find the new total credits, do sanity check on unused VLs */
  9808. for (i = 0; i < OPA_MAX_VLS; i++) {
  9809. if (valid_vl(i)) {
  9810. new_total += be16_to_cpu(new_bc->vl[i].dedicated);
  9811. continue;
  9812. }
  9813. nonzero_msg(dd, i, "dedicated",
  9814. be16_to_cpu(new_bc->vl[i].dedicated));
  9815. nonzero_msg(dd, i, "shared",
  9816. be16_to_cpu(new_bc->vl[i].shared));
  9817. new_bc->vl[i].dedicated = 0;
  9818. new_bc->vl[i].shared = 0;
  9819. }
  9820. new_total += be16_to_cpu(new_bc->overall_shared_limit);
  9821. /* fetch the current values */
  9822. get_buffer_control(dd, &cur_bc, &cur_total);
  9823. /*
  9824. * Create the masks we will use.
  9825. */
  9826. memset(changing, 0, sizeof(changing));
  9827. memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
  9828. /*
  9829. * NOTE: Assumes that the individual VL bits are adjacent and in
  9830. * increasing order
  9831. */
  9832. stat_mask =
  9833. SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
  9834. changing_mask = 0;
  9835. ld_mask = 0;
  9836. change_count = 0;
  9837. any_shared_limit_changing = 0;
  9838. for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
  9839. if (!valid_vl(i))
  9840. continue;
  9841. this_shared_changing = new_bc->vl[i].shared
  9842. != cur_bc.vl[i].shared;
  9843. if (this_shared_changing)
  9844. any_shared_limit_changing = 1;
  9845. if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
  9846. this_shared_changing) {
  9847. changing[i] = 1;
  9848. changing_mask |= stat_mask;
  9849. change_count++;
  9850. }
  9851. if (be16_to_cpu(new_bc->vl[i].dedicated) <
  9852. be16_to_cpu(cur_bc.vl[i].dedicated)) {
  9853. lowering_dedicated[i] = 1;
  9854. ld_mask |= stat_mask;
  9855. }
  9856. }
  9857. /* bracket the credit change with a total adjustment */
  9858. if (new_total > cur_total)
  9859. set_global_limit(dd, new_total);
  9860. /*
  9861. * Start the credit change algorithm.
  9862. */
  9863. use_all_mask = 0;
  9864. if ((be16_to_cpu(new_bc->overall_shared_limit) <
  9865. be16_to_cpu(cur_bc.overall_shared_limit)) ||
  9866. (is_ax(dd) && any_shared_limit_changing)) {
  9867. set_global_shared(dd, 0);
  9868. cur_bc.overall_shared_limit = 0;
  9869. use_all_mask = 1;
  9870. }
  9871. for (i = 0; i < NUM_USABLE_VLS; i++) {
  9872. if (!valid_vl(i))
  9873. continue;
  9874. if (changing[i]) {
  9875. set_vl_shared(dd, i, 0);
  9876. cur_bc.vl[i].shared = 0;
  9877. }
  9878. }
  9879. wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
  9880. "shared");
  9881. if (change_count > 0) {
  9882. for (i = 0; i < NUM_USABLE_VLS; i++) {
  9883. if (!valid_vl(i))
  9884. continue;
  9885. if (lowering_dedicated[i]) {
  9886. set_vl_dedicated(dd, i,
  9887. be16_to_cpu(new_bc->
  9888. vl[i].dedicated));
  9889. cur_bc.vl[i].dedicated =
  9890. new_bc->vl[i].dedicated;
  9891. }
  9892. }
  9893. wait_for_vl_status_clear(dd, ld_mask, "dedicated");
  9894. /* now raise all dedicated that are going up */
  9895. for (i = 0; i < NUM_USABLE_VLS; i++) {
  9896. if (!valid_vl(i))
  9897. continue;
  9898. if (be16_to_cpu(new_bc->vl[i].dedicated) >
  9899. be16_to_cpu(cur_bc.vl[i].dedicated))
  9900. set_vl_dedicated(dd, i,
  9901. be16_to_cpu(new_bc->
  9902. vl[i].dedicated));
  9903. }
  9904. }
  9905. /* next raise all shared that are going up */
  9906. for (i = 0; i < NUM_USABLE_VLS; i++) {
  9907. if (!valid_vl(i))
  9908. continue;
  9909. if (be16_to_cpu(new_bc->vl[i].shared) >
  9910. be16_to_cpu(cur_bc.vl[i].shared))
  9911. set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
  9912. }
  9913. /* finally raise the global shared */
  9914. if (be16_to_cpu(new_bc->overall_shared_limit) >
  9915. be16_to_cpu(cur_bc.overall_shared_limit))
  9916. set_global_shared(dd,
  9917. be16_to_cpu(new_bc->overall_shared_limit));
  9918. /* bracket the credit change with a total adjustment */
  9919. if (new_total < cur_total)
  9920. set_global_limit(dd, new_total);
  9921. /*
  9922. * Determine the actual number of operational VLS using the number of
  9923. * dedicated and shared credits for each VL.
  9924. */
  9925. if (change_count > 0) {
  9926. for (i = 0; i < TXE_NUM_DATA_VL; i++)
  9927. if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
  9928. be16_to_cpu(new_bc->vl[i].shared) > 0)
  9929. vl_count++;
  9930. ppd->actual_vls_operational = vl_count;
  9931. ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
  9932. ppd->actual_vls_operational :
  9933. ppd->vls_operational,
  9934. NULL);
  9935. if (ret == 0)
  9936. ret = pio_map_init(dd, ppd->port - 1, vl_count ?
  9937. ppd->actual_vls_operational :
  9938. ppd->vls_operational, NULL);
  9939. if (ret)
  9940. return ret;
  9941. }
  9942. return 0;
  9943. }
  9944. /*
  9945. * Read the given fabric manager table. Return the size of the
  9946. * table (in bytes) on success, and a negative error code on
  9947. * failure.
  9948. */
  9949. int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
  9950. {
  9951. int size;
  9952. struct vl_arb_cache *vlc;
  9953. switch (which) {
  9954. case FM_TBL_VL_HIGH_ARB:
  9955. size = 256;
  9956. /*
  9957. * OPA specifies 128 elements (of 2 bytes each), though
  9958. * HFI supports only 16 elements in h/w.
  9959. */
  9960. vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
  9961. vl_arb_get_cache(vlc, t);
  9962. vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
  9963. break;
  9964. case FM_TBL_VL_LOW_ARB:
  9965. size = 256;
  9966. /*
  9967. * OPA specifies 128 elements (of 2 bytes each), though
  9968. * HFI supports only 16 elements in h/w.
  9969. */
  9970. vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
  9971. vl_arb_get_cache(vlc, t);
  9972. vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
  9973. break;
  9974. case FM_TBL_BUFFER_CONTROL:
  9975. size = get_buffer_control(ppd->dd, t, NULL);
  9976. break;
  9977. case FM_TBL_SC2VLNT:
  9978. size = get_sc2vlnt(ppd->dd, t);
  9979. break;
  9980. case FM_TBL_VL_PREEMPT_ELEMS:
  9981. size = 256;
  9982. /* OPA specifies 128 elements, of 2 bytes each */
  9983. get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
  9984. break;
  9985. case FM_TBL_VL_PREEMPT_MATRIX:
  9986. size = 256;
  9987. /*
  9988. * OPA specifies that this is the same size as the VL
  9989. * arbitration tables (i.e., 256 bytes).
  9990. */
  9991. break;
  9992. default:
  9993. return -EINVAL;
  9994. }
  9995. return size;
  9996. }
  9997. /*
  9998. * Write the given fabric manager table.
  9999. */
  10000. int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
  10001. {
  10002. int ret = 0;
  10003. struct vl_arb_cache *vlc;
  10004. switch (which) {
  10005. case FM_TBL_VL_HIGH_ARB:
  10006. vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
  10007. if (vl_arb_match_cache(vlc, t)) {
  10008. vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
  10009. break;
  10010. }
  10011. vl_arb_set_cache(vlc, t);
  10012. vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
  10013. ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
  10014. VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
  10015. break;
  10016. case FM_TBL_VL_LOW_ARB:
  10017. vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
  10018. if (vl_arb_match_cache(vlc, t)) {
  10019. vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
  10020. break;
  10021. }
  10022. vl_arb_set_cache(vlc, t);
  10023. vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
  10024. ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
  10025. VL_ARB_LOW_PRIO_TABLE_SIZE, t);
  10026. break;
  10027. case FM_TBL_BUFFER_CONTROL:
  10028. ret = set_buffer_control(ppd, t);
  10029. break;
  10030. case FM_TBL_SC2VLNT:
  10031. set_sc2vlnt(ppd->dd, t);
  10032. break;
  10033. default:
  10034. ret = -EINVAL;
  10035. }
  10036. return ret;
  10037. }
  10038. /*
  10039. * Disable all data VLs.
  10040. *
  10041. * Return 0 if disabled, non-zero if the VLs cannot be disabled.
  10042. */
  10043. static int disable_data_vls(struct hfi1_devdata *dd)
  10044. {
  10045. if (is_ax(dd))
  10046. return 1;
  10047. pio_send_control(dd, PSC_DATA_VL_DISABLE);
  10048. return 0;
  10049. }
  10050. /*
  10051. * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
  10052. * Just re-enables all data VLs (the "fill" part happens
  10053. * automatically - the name was chosen for symmetry with
  10054. * stop_drain_data_vls()).
  10055. *
  10056. * Return 0 if successful, non-zero if the VLs cannot be enabled.
  10057. */
  10058. int open_fill_data_vls(struct hfi1_devdata *dd)
  10059. {
  10060. if (is_ax(dd))
  10061. return 1;
  10062. pio_send_control(dd, PSC_DATA_VL_ENABLE);
  10063. return 0;
  10064. }
  10065. /*
  10066. * drain_data_vls() - assumes that disable_data_vls() has been called,
  10067. * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
  10068. * engines to drop to 0.
  10069. */
  10070. static void drain_data_vls(struct hfi1_devdata *dd)
  10071. {
  10072. sc_wait(dd);
  10073. sdma_wait(dd);
  10074. pause_for_credit_return(dd);
  10075. }
  10076. /*
  10077. * stop_drain_data_vls() - disable, then drain all per-VL fifos.
  10078. *
  10079. * Use open_fill_data_vls() to resume using data VLs. This pair is
  10080. * meant to be used like this:
  10081. *
  10082. * stop_drain_data_vls(dd);
  10083. * // do things with per-VL resources
  10084. * open_fill_data_vls(dd);
  10085. */
  10086. int stop_drain_data_vls(struct hfi1_devdata *dd)
  10087. {
  10088. int ret;
  10089. ret = disable_data_vls(dd);
  10090. if (ret == 0)
  10091. drain_data_vls(dd);
  10092. return ret;
  10093. }
  10094. /*
  10095. * Convert a nanosecond time to a cclock count. No matter how slow
  10096. * the cclock, a non-zero ns will always have a non-zero result.
  10097. */
  10098. u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
  10099. {
  10100. u32 cclocks;
  10101. if (dd->icode == ICODE_FPGA_EMULATION)
  10102. cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
  10103. else /* simulation pretends to be ASIC */
  10104. cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
  10105. if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
  10106. cclocks = 1;
  10107. return cclocks;
  10108. }
  10109. /*
  10110. * Convert a cclock count to nanoseconds. Not matter how slow
  10111. * the cclock, a non-zero cclocks will always have a non-zero result.
  10112. */
  10113. u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
  10114. {
  10115. u32 ns;
  10116. if (dd->icode == ICODE_FPGA_EMULATION)
  10117. ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
  10118. else /* simulation pretends to be ASIC */
  10119. ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
  10120. if (cclocks && !ns)
  10121. ns = 1;
  10122. return ns;
  10123. }
  10124. /*
  10125. * Dynamically adjust the receive interrupt timeout for a context based on
  10126. * incoming packet rate.
  10127. *
  10128. * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
  10129. */
  10130. static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
  10131. {
  10132. struct hfi1_devdata *dd = rcd->dd;
  10133. u32 timeout = rcd->rcvavail_timeout;
  10134. /*
  10135. * This algorithm doubles or halves the timeout depending on whether
  10136. * the number of packets received in this interrupt were less than or
  10137. * greater equal the interrupt count.
  10138. *
  10139. * The calculations below do not allow a steady state to be achieved.
  10140. * Only at the endpoints it is possible to have an unchanging
  10141. * timeout.
  10142. */
  10143. if (npkts < rcv_intr_count) {
  10144. /*
  10145. * Not enough packets arrived before the timeout, adjust
  10146. * timeout downward.
  10147. */
  10148. if (timeout < 2) /* already at minimum? */
  10149. return;
  10150. timeout >>= 1;
  10151. } else {
  10152. /*
  10153. * More than enough packets arrived before the timeout, adjust
  10154. * timeout upward.
  10155. */
  10156. if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
  10157. return;
  10158. timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
  10159. }
  10160. rcd->rcvavail_timeout = timeout;
  10161. /*
  10162. * timeout cannot be larger than rcv_intr_timeout_csr which has already
  10163. * been verified to be in range
  10164. */
  10165. write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
  10166. (u64)timeout <<
  10167. RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
  10168. }
  10169. void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
  10170. u32 intr_adjust, u32 npkts)
  10171. {
  10172. struct hfi1_devdata *dd = rcd->dd;
  10173. u64 reg;
  10174. u32 ctxt = rcd->ctxt;
  10175. /*
  10176. * Need to write timeout register before updating RcvHdrHead to ensure
  10177. * that a new value is used when the HW decides to restart counting.
  10178. */
  10179. if (intr_adjust)
  10180. adjust_rcv_timeout(rcd, npkts);
  10181. if (updegr) {
  10182. reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
  10183. << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
  10184. write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
  10185. }
  10186. mmiowb();
  10187. reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
  10188. (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
  10189. << RCV_HDR_HEAD_HEAD_SHIFT);
  10190. write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
  10191. mmiowb();
  10192. }
  10193. u32 hdrqempty(struct hfi1_ctxtdata *rcd)
  10194. {
  10195. u32 head, tail;
  10196. head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
  10197. & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
  10198. if (rcd->rcvhdrtail_kvaddr)
  10199. tail = get_rcvhdrtail(rcd);
  10200. else
  10201. tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
  10202. return head == tail;
  10203. }
  10204. /*
  10205. * Context Control and Receive Array encoding for buffer size:
  10206. * 0x0 invalid
  10207. * 0x1 4 KB
  10208. * 0x2 8 KB
  10209. * 0x3 16 KB
  10210. * 0x4 32 KB
  10211. * 0x5 64 KB
  10212. * 0x6 128 KB
  10213. * 0x7 256 KB
  10214. * 0x8 512 KB (Receive Array only)
  10215. * 0x9 1 MB (Receive Array only)
  10216. * 0xa 2 MB (Receive Array only)
  10217. *
  10218. * 0xB-0xF - reserved (Receive Array only)
  10219. *
  10220. *
  10221. * This routine assumes that the value has already been sanity checked.
  10222. */
  10223. static u32 encoded_size(u32 size)
  10224. {
  10225. switch (size) {
  10226. case 4 * 1024: return 0x1;
  10227. case 8 * 1024: return 0x2;
  10228. case 16 * 1024: return 0x3;
  10229. case 32 * 1024: return 0x4;
  10230. case 64 * 1024: return 0x5;
  10231. case 128 * 1024: return 0x6;
  10232. case 256 * 1024: return 0x7;
  10233. case 512 * 1024: return 0x8;
  10234. case 1 * 1024 * 1024: return 0x9;
  10235. case 2 * 1024 * 1024: return 0xa;
  10236. }
  10237. return 0x1; /* if invalid, go with the minimum size */
  10238. }
  10239. void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
  10240. {
  10241. struct hfi1_ctxtdata *rcd;
  10242. u64 rcvctrl, reg;
  10243. int did_enable = 0;
  10244. rcd = dd->rcd[ctxt];
  10245. if (!rcd)
  10246. return;
  10247. hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
  10248. rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
  10249. /* if the context already enabled, don't do the extra steps */
  10250. if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
  10251. !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
  10252. /* reset the tail and hdr addresses, and sequence count */
  10253. write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
  10254. rcd->rcvhdrq_dma);
  10255. if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
  10256. write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
  10257. rcd->rcvhdrqtailaddr_dma);
  10258. rcd->seq_cnt = 1;
  10259. /* reset the cached receive header queue head value */
  10260. rcd->head = 0;
  10261. /*
  10262. * Zero the receive header queue so we don't get false
  10263. * positives when checking the sequence number. The
  10264. * sequence numbers could land exactly on the same spot.
  10265. * E.g. a rcd restart before the receive header wrapped.
  10266. */
  10267. memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
  10268. /* starting timeout */
  10269. rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
  10270. /* enable the context */
  10271. rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
  10272. /* clean the egr buffer size first */
  10273. rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
  10274. rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
  10275. & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
  10276. << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
  10277. /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
  10278. write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
  10279. did_enable = 1;
  10280. /* zero RcvEgrIndexHead */
  10281. write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
  10282. /* set eager count and base index */
  10283. reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
  10284. & RCV_EGR_CTRL_EGR_CNT_MASK)
  10285. << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
  10286. (((rcd->eager_base >> RCV_SHIFT)
  10287. & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
  10288. << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
  10289. write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
  10290. /*
  10291. * Set TID (expected) count and base index.
  10292. * rcd->expected_count is set to individual RcvArray entries,
  10293. * not pairs, and the CSR takes a pair-count in groups of
  10294. * four, so divide by 8.
  10295. */
  10296. reg = (((rcd->expected_count >> RCV_SHIFT)
  10297. & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
  10298. << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
  10299. (((rcd->expected_base >> RCV_SHIFT)
  10300. & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
  10301. << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
  10302. write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
  10303. if (ctxt == HFI1_CTRL_CTXT)
  10304. write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
  10305. }
  10306. if (op & HFI1_RCVCTRL_CTXT_DIS) {
  10307. write_csr(dd, RCV_VL15, 0);
  10308. /*
  10309. * When receive context is being disabled turn on tail
  10310. * update with a dummy tail address and then disable
  10311. * receive context.
  10312. */
  10313. if (dd->rcvhdrtail_dummy_dma) {
  10314. write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
  10315. dd->rcvhdrtail_dummy_dma);
  10316. /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
  10317. rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
  10318. }
  10319. rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
  10320. }
  10321. if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
  10322. rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
  10323. if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
  10324. rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
  10325. if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
  10326. rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
  10327. if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
  10328. /* See comment on RcvCtxtCtrl.TailUpd above */
  10329. if (!(op & HFI1_RCVCTRL_CTXT_DIS))
  10330. rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
  10331. }
  10332. if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
  10333. rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
  10334. if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
  10335. rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
  10336. if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
  10337. /*
  10338. * In one-packet-per-eager mode, the size comes from
  10339. * the RcvArray entry.
  10340. */
  10341. rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
  10342. rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
  10343. }
  10344. if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
  10345. rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
  10346. if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
  10347. rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
  10348. if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
  10349. rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
  10350. if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
  10351. rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
  10352. if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
  10353. rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
  10354. rcd->rcvctrl = rcvctrl;
  10355. hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
  10356. write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
  10357. /* work around sticky RcvCtxtStatus.BlockedRHQFull */
  10358. if (did_enable &&
  10359. (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
  10360. reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
  10361. if (reg != 0) {
  10362. dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
  10363. ctxt, reg);
  10364. read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
  10365. write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
  10366. write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
  10367. read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
  10368. reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
  10369. dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
  10370. ctxt, reg, reg == 0 ? "not" : "still");
  10371. }
  10372. }
  10373. if (did_enable) {
  10374. /*
  10375. * The interrupt timeout and count must be set after
  10376. * the context is enabled to take effect.
  10377. */
  10378. /* set interrupt timeout */
  10379. write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
  10380. (u64)rcd->rcvavail_timeout <<
  10381. RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
  10382. /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
  10383. reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
  10384. write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
  10385. }
  10386. if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
  10387. /*
  10388. * If the context has been disabled and the Tail Update has
  10389. * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
  10390. * so it doesn't contain an address that is invalid.
  10391. */
  10392. write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
  10393. dd->rcvhdrtail_dummy_dma);
  10394. }
  10395. u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
  10396. {
  10397. int ret;
  10398. u64 val = 0;
  10399. if (namep) {
  10400. ret = dd->cntrnameslen;
  10401. *namep = dd->cntrnames;
  10402. } else {
  10403. const struct cntr_entry *entry;
  10404. int i, j;
  10405. ret = (dd->ndevcntrs) * sizeof(u64);
  10406. /* Get the start of the block of counters */
  10407. *cntrp = dd->cntrs;
  10408. /*
  10409. * Now go and fill in each counter in the block.
  10410. */
  10411. for (i = 0; i < DEV_CNTR_LAST; i++) {
  10412. entry = &dev_cntrs[i];
  10413. hfi1_cdbg(CNTR, "reading %s", entry->name);
  10414. if (entry->flags & CNTR_DISABLED) {
  10415. /* Nothing */
  10416. hfi1_cdbg(CNTR, "\tDisabled\n");
  10417. } else {
  10418. if (entry->flags & CNTR_VL) {
  10419. hfi1_cdbg(CNTR, "\tPer VL\n");
  10420. for (j = 0; j < C_VL_COUNT; j++) {
  10421. val = entry->rw_cntr(entry,
  10422. dd, j,
  10423. CNTR_MODE_R,
  10424. 0);
  10425. hfi1_cdbg(
  10426. CNTR,
  10427. "\t\tRead 0x%llx for %d\n",
  10428. val, j);
  10429. dd->cntrs[entry->offset + j] =
  10430. val;
  10431. }
  10432. } else if (entry->flags & CNTR_SDMA) {
  10433. hfi1_cdbg(CNTR,
  10434. "\t Per SDMA Engine\n");
  10435. for (j = 0; j < dd->chip_sdma_engines;
  10436. j++) {
  10437. val =
  10438. entry->rw_cntr(entry, dd, j,
  10439. CNTR_MODE_R, 0);
  10440. hfi1_cdbg(CNTR,
  10441. "\t\tRead 0x%llx for %d\n",
  10442. val, j);
  10443. dd->cntrs[entry->offset + j] =
  10444. val;
  10445. }
  10446. } else {
  10447. val = entry->rw_cntr(entry, dd,
  10448. CNTR_INVALID_VL,
  10449. CNTR_MODE_R, 0);
  10450. dd->cntrs[entry->offset] = val;
  10451. hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
  10452. }
  10453. }
  10454. }
  10455. }
  10456. return ret;
  10457. }
  10458. /*
  10459. * Used by sysfs to create files for hfi stats to read
  10460. */
  10461. u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
  10462. {
  10463. int ret;
  10464. u64 val = 0;
  10465. if (namep) {
  10466. ret = ppd->dd->portcntrnameslen;
  10467. *namep = ppd->dd->portcntrnames;
  10468. } else {
  10469. const struct cntr_entry *entry;
  10470. int i, j;
  10471. ret = ppd->dd->nportcntrs * sizeof(u64);
  10472. *cntrp = ppd->cntrs;
  10473. for (i = 0; i < PORT_CNTR_LAST; i++) {
  10474. entry = &port_cntrs[i];
  10475. hfi1_cdbg(CNTR, "reading %s", entry->name);
  10476. if (entry->flags & CNTR_DISABLED) {
  10477. /* Nothing */
  10478. hfi1_cdbg(CNTR, "\tDisabled\n");
  10479. continue;
  10480. }
  10481. if (entry->flags & CNTR_VL) {
  10482. hfi1_cdbg(CNTR, "\tPer VL");
  10483. for (j = 0; j < C_VL_COUNT; j++) {
  10484. val = entry->rw_cntr(entry, ppd, j,
  10485. CNTR_MODE_R,
  10486. 0);
  10487. hfi1_cdbg(
  10488. CNTR,
  10489. "\t\tRead 0x%llx for %d",
  10490. val, j);
  10491. ppd->cntrs[entry->offset + j] = val;
  10492. }
  10493. } else {
  10494. val = entry->rw_cntr(entry, ppd,
  10495. CNTR_INVALID_VL,
  10496. CNTR_MODE_R,
  10497. 0);
  10498. ppd->cntrs[entry->offset] = val;
  10499. hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
  10500. }
  10501. }
  10502. }
  10503. return ret;
  10504. }
  10505. static void free_cntrs(struct hfi1_devdata *dd)
  10506. {
  10507. struct hfi1_pportdata *ppd;
  10508. int i;
  10509. if (dd->synth_stats_timer.data)
  10510. del_timer_sync(&dd->synth_stats_timer);
  10511. dd->synth_stats_timer.data = 0;
  10512. ppd = (struct hfi1_pportdata *)(dd + 1);
  10513. for (i = 0; i < dd->num_pports; i++, ppd++) {
  10514. kfree(ppd->cntrs);
  10515. kfree(ppd->scntrs);
  10516. free_percpu(ppd->ibport_data.rvp.rc_acks);
  10517. free_percpu(ppd->ibport_data.rvp.rc_qacks);
  10518. free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
  10519. ppd->cntrs = NULL;
  10520. ppd->scntrs = NULL;
  10521. ppd->ibport_data.rvp.rc_acks = NULL;
  10522. ppd->ibport_data.rvp.rc_qacks = NULL;
  10523. ppd->ibport_data.rvp.rc_delayed_comp = NULL;
  10524. }
  10525. kfree(dd->portcntrnames);
  10526. dd->portcntrnames = NULL;
  10527. kfree(dd->cntrs);
  10528. dd->cntrs = NULL;
  10529. kfree(dd->scntrs);
  10530. dd->scntrs = NULL;
  10531. kfree(dd->cntrnames);
  10532. dd->cntrnames = NULL;
  10533. if (dd->update_cntr_wq) {
  10534. destroy_workqueue(dd->update_cntr_wq);
  10535. dd->update_cntr_wq = NULL;
  10536. }
  10537. }
  10538. static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
  10539. u64 *psval, void *context, int vl)
  10540. {
  10541. u64 val;
  10542. u64 sval = *psval;
  10543. if (entry->flags & CNTR_DISABLED) {
  10544. dd_dev_err(dd, "Counter %s not enabled", entry->name);
  10545. return 0;
  10546. }
  10547. hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
  10548. val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
  10549. /* If its a synthetic counter there is more work we need to do */
  10550. if (entry->flags & CNTR_SYNTH) {
  10551. if (sval == CNTR_MAX) {
  10552. /* No need to read already saturated */
  10553. return CNTR_MAX;
  10554. }
  10555. if (entry->flags & CNTR_32BIT) {
  10556. /* 32bit counters can wrap multiple times */
  10557. u64 upper = sval >> 32;
  10558. u64 lower = (sval << 32) >> 32;
  10559. if (lower > val) { /* hw wrapped */
  10560. if (upper == CNTR_32BIT_MAX)
  10561. val = CNTR_MAX;
  10562. else
  10563. upper++;
  10564. }
  10565. if (val != CNTR_MAX)
  10566. val = (upper << 32) | val;
  10567. } else {
  10568. /* If we rolled we are saturated */
  10569. if ((val < sval) || (val > CNTR_MAX))
  10570. val = CNTR_MAX;
  10571. }
  10572. }
  10573. *psval = val;
  10574. hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
  10575. return val;
  10576. }
  10577. static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
  10578. struct cntr_entry *entry,
  10579. u64 *psval, void *context, int vl, u64 data)
  10580. {
  10581. u64 val;
  10582. if (entry->flags & CNTR_DISABLED) {
  10583. dd_dev_err(dd, "Counter %s not enabled", entry->name);
  10584. return 0;
  10585. }
  10586. hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
  10587. if (entry->flags & CNTR_SYNTH) {
  10588. *psval = data;
  10589. if (entry->flags & CNTR_32BIT) {
  10590. val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
  10591. (data << 32) >> 32);
  10592. val = data; /* return the full 64bit value */
  10593. } else {
  10594. val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
  10595. data);
  10596. }
  10597. } else {
  10598. val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
  10599. }
  10600. *psval = val;
  10601. hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
  10602. return val;
  10603. }
  10604. u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
  10605. {
  10606. struct cntr_entry *entry;
  10607. u64 *sval;
  10608. entry = &dev_cntrs[index];
  10609. sval = dd->scntrs + entry->offset;
  10610. if (vl != CNTR_INVALID_VL)
  10611. sval += vl;
  10612. return read_dev_port_cntr(dd, entry, sval, dd, vl);
  10613. }
  10614. u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
  10615. {
  10616. struct cntr_entry *entry;
  10617. u64 *sval;
  10618. entry = &dev_cntrs[index];
  10619. sval = dd->scntrs + entry->offset;
  10620. if (vl != CNTR_INVALID_VL)
  10621. sval += vl;
  10622. return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
  10623. }
  10624. u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
  10625. {
  10626. struct cntr_entry *entry;
  10627. u64 *sval;
  10628. entry = &port_cntrs[index];
  10629. sval = ppd->scntrs + entry->offset;
  10630. if (vl != CNTR_INVALID_VL)
  10631. sval += vl;
  10632. if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
  10633. (index <= C_RCV_HDR_OVF_LAST)) {
  10634. /* We do not want to bother for disabled contexts */
  10635. return 0;
  10636. }
  10637. return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
  10638. }
  10639. u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
  10640. {
  10641. struct cntr_entry *entry;
  10642. u64 *sval;
  10643. entry = &port_cntrs[index];
  10644. sval = ppd->scntrs + entry->offset;
  10645. if (vl != CNTR_INVALID_VL)
  10646. sval += vl;
  10647. if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
  10648. (index <= C_RCV_HDR_OVF_LAST)) {
  10649. /* We do not want to bother for disabled contexts */
  10650. return 0;
  10651. }
  10652. return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
  10653. }
  10654. static void do_update_synth_timer(struct work_struct *work)
  10655. {
  10656. u64 cur_tx;
  10657. u64 cur_rx;
  10658. u64 total_flits;
  10659. u8 update = 0;
  10660. int i, j, vl;
  10661. struct hfi1_pportdata *ppd;
  10662. struct cntr_entry *entry;
  10663. struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
  10664. update_cntr_work);
  10665. /*
  10666. * Rather than keep beating on the CSRs pick a minimal set that we can
  10667. * check to watch for potential roll over. We can do this by looking at
  10668. * the number of flits sent/recv. If the total flits exceeds 32bits then
  10669. * we have to iterate all the counters and update.
  10670. */
  10671. entry = &dev_cntrs[C_DC_RCV_FLITS];
  10672. cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
  10673. entry = &dev_cntrs[C_DC_XMIT_FLITS];
  10674. cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
  10675. hfi1_cdbg(
  10676. CNTR,
  10677. "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
  10678. dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
  10679. if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
  10680. /*
  10681. * May not be strictly necessary to update but it won't hurt and
  10682. * simplifies the logic here.
  10683. */
  10684. update = 1;
  10685. hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
  10686. dd->unit);
  10687. } else {
  10688. total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
  10689. hfi1_cdbg(CNTR,
  10690. "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
  10691. total_flits, (u64)CNTR_32BIT_MAX);
  10692. if (total_flits >= CNTR_32BIT_MAX) {
  10693. hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
  10694. dd->unit);
  10695. update = 1;
  10696. }
  10697. }
  10698. if (update) {
  10699. hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
  10700. for (i = 0; i < DEV_CNTR_LAST; i++) {
  10701. entry = &dev_cntrs[i];
  10702. if (entry->flags & CNTR_VL) {
  10703. for (vl = 0; vl < C_VL_COUNT; vl++)
  10704. read_dev_cntr(dd, i, vl);
  10705. } else {
  10706. read_dev_cntr(dd, i, CNTR_INVALID_VL);
  10707. }
  10708. }
  10709. ppd = (struct hfi1_pportdata *)(dd + 1);
  10710. for (i = 0; i < dd->num_pports; i++, ppd++) {
  10711. for (j = 0; j < PORT_CNTR_LAST; j++) {
  10712. entry = &port_cntrs[j];
  10713. if (entry->flags & CNTR_VL) {
  10714. for (vl = 0; vl < C_VL_COUNT; vl++)
  10715. read_port_cntr(ppd, j, vl);
  10716. } else {
  10717. read_port_cntr(ppd, j, CNTR_INVALID_VL);
  10718. }
  10719. }
  10720. }
  10721. /*
  10722. * We want the value in the register. The goal is to keep track
  10723. * of the number of "ticks" not the counter value. In other
  10724. * words if the register rolls we want to notice it and go ahead
  10725. * and force an update.
  10726. */
  10727. entry = &dev_cntrs[C_DC_XMIT_FLITS];
  10728. dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
  10729. CNTR_MODE_R, 0);
  10730. entry = &dev_cntrs[C_DC_RCV_FLITS];
  10731. dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
  10732. CNTR_MODE_R, 0);
  10733. hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
  10734. dd->unit, dd->last_tx, dd->last_rx);
  10735. } else {
  10736. hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
  10737. }
  10738. }
  10739. static void update_synth_timer(unsigned long opaque)
  10740. {
  10741. struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
  10742. queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
  10743. mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
  10744. }
  10745. #define C_MAX_NAME 16 /* 15 chars + one for /0 */
  10746. static int init_cntrs(struct hfi1_devdata *dd)
  10747. {
  10748. int i, rcv_ctxts, j;
  10749. size_t sz;
  10750. char *p;
  10751. char name[C_MAX_NAME];
  10752. struct hfi1_pportdata *ppd;
  10753. const char *bit_type_32 = ",32";
  10754. const int bit_type_32_sz = strlen(bit_type_32);
  10755. /* set up the stats timer; the add_timer is done at the end */
  10756. setup_timer(&dd->synth_stats_timer, update_synth_timer,
  10757. (unsigned long)dd);
  10758. /***********************/
  10759. /* per device counters */
  10760. /***********************/
  10761. /* size names and determine how many we have*/
  10762. dd->ndevcntrs = 0;
  10763. sz = 0;
  10764. for (i = 0; i < DEV_CNTR_LAST; i++) {
  10765. if (dev_cntrs[i].flags & CNTR_DISABLED) {
  10766. hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
  10767. continue;
  10768. }
  10769. if (dev_cntrs[i].flags & CNTR_VL) {
  10770. dev_cntrs[i].offset = dd->ndevcntrs;
  10771. for (j = 0; j < C_VL_COUNT; j++) {
  10772. snprintf(name, C_MAX_NAME, "%s%d",
  10773. dev_cntrs[i].name, vl_from_idx(j));
  10774. sz += strlen(name);
  10775. /* Add ",32" for 32-bit counters */
  10776. if (dev_cntrs[i].flags & CNTR_32BIT)
  10777. sz += bit_type_32_sz;
  10778. sz++;
  10779. dd->ndevcntrs++;
  10780. }
  10781. } else if (dev_cntrs[i].flags & CNTR_SDMA) {
  10782. dev_cntrs[i].offset = dd->ndevcntrs;
  10783. for (j = 0; j < dd->chip_sdma_engines; j++) {
  10784. snprintf(name, C_MAX_NAME, "%s%d",
  10785. dev_cntrs[i].name, j);
  10786. sz += strlen(name);
  10787. /* Add ",32" for 32-bit counters */
  10788. if (dev_cntrs[i].flags & CNTR_32BIT)
  10789. sz += bit_type_32_sz;
  10790. sz++;
  10791. dd->ndevcntrs++;
  10792. }
  10793. } else {
  10794. /* +1 for newline. */
  10795. sz += strlen(dev_cntrs[i].name) + 1;
  10796. /* Add ",32" for 32-bit counters */
  10797. if (dev_cntrs[i].flags & CNTR_32BIT)
  10798. sz += bit_type_32_sz;
  10799. dev_cntrs[i].offset = dd->ndevcntrs;
  10800. dd->ndevcntrs++;
  10801. }
  10802. }
  10803. /* allocate space for the counter values */
  10804. dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
  10805. if (!dd->cntrs)
  10806. goto bail;
  10807. dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
  10808. if (!dd->scntrs)
  10809. goto bail;
  10810. /* allocate space for the counter names */
  10811. dd->cntrnameslen = sz;
  10812. dd->cntrnames = kmalloc(sz, GFP_KERNEL);
  10813. if (!dd->cntrnames)
  10814. goto bail;
  10815. /* fill in the names */
  10816. for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
  10817. if (dev_cntrs[i].flags & CNTR_DISABLED) {
  10818. /* Nothing */
  10819. } else if (dev_cntrs[i].flags & CNTR_VL) {
  10820. for (j = 0; j < C_VL_COUNT; j++) {
  10821. snprintf(name, C_MAX_NAME, "%s%d",
  10822. dev_cntrs[i].name,
  10823. vl_from_idx(j));
  10824. memcpy(p, name, strlen(name));
  10825. p += strlen(name);
  10826. /* Counter is 32 bits */
  10827. if (dev_cntrs[i].flags & CNTR_32BIT) {
  10828. memcpy(p, bit_type_32, bit_type_32_sz);
  10829. p += bit_type_32_sz;
  10830. }
  10831. *p++ = '\n';
  10832. }
  10833. } else if (dev_cntrs[i].flags & CNTR_SDMA) {
  10834. for (j = 0; j < dd->chip_sdma_engines; j++) {
  10835. snprintf(name, C_MAX_NAME, "%s%d",
  10836. dev_cntrs[i].name, j);
  10837. memcpy(p, name, strlen(name));
  10838. p += strlen(name);
  10839. /* Counter is 32 bits */
  10840. if (dev_cntrs[i].flags & CNTR_32BIT) {
  10841. memcpy(p, bit_type_32, bit_type_32_sz);
  10842. p += bit_type_32_sz;
  10843. }
  10844. *p++ = '\n';
  10845. }
  10846. } else {
  10847. memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
  10848. p += strlen(dev_cntrs[i].name);
  10849. /* Counter is 32 bits */
  10850. if (dev_cntrs[i].flags & CNTR_32BIT) {
  10851. memcpy(p, bit_type_32, bit_type_32_sz);
  10852. p += bit_type_32_sz;
  10853. }
  10854. *p++ = '\n';
  10855. }
  10856. }
  10857. /*********************/
  10858. /* per port counters */
  10859. /*********************/
  10860. /*
  10861. * Go through the counters for the overflows and disable the ones we
  10862. * don't need. This varies based on platform so we need to do it
  10863. * dynamically here.
  10864. */
  10865. rcv_ctxts = dd->num_rcv_contexts;
  10866. for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
  10867. i <= C_RCV_HDR_OVF_LAST; i++) {
  10868. port_cntrs[i].flags |= CNTR_DISABLED;
  10869. }
  10870. /* size port counter names and determine how many we have*/
  10871. sz = 0;
  10872. dd->nportcntrs = 0;
  10873. for (i = 0; i < PORT_CNTR_LAST; i++) {
  10874. if (port_cntrs[i].flags & CNTR_DISABLED) {
  10875. hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
  10876. continue;
  10877. }
  10878. if (port_cntrs[i].flags & CNTR_VL) {
  10879. port_cntrs[i].offset = dd->nportcntrs;
  10880. for (j = 0; j < C_VL_COUNT; j++) {
  10881. snprintf(name, C_MAX_NAME, "%s%d",
  10882. port_cntrs[i].name, vl_from_idx(j));
  10883. sz += strlen(name);
  10884. /* Add ",32" for 32-bit counters */
  10885. if (port_cntrs[i].flags & CNTR_32BIT)
  10886. sz += bit_type_32_sz;
  10887. sz++;
  10888. dd->nportcntrs++;
  10889. }
  10890. } else {
  10891. /* +1 for newline */
  10892. sz += strlen(port_cntrs[i].name) + 1;
  10893. /* Add ",32" for 32-bit counters */
  10894. if (port_cntrs[i].flags & CNTR_32BIT)
  10895. sz += bit_type_32_sz;
  10896. port_cntrs[i].offset = dd->nportcntrs;
  10897. dd->nportcntrs++;
  10898. }
  10899. }
  10900. /* allocate space for the counter names */
  10901. dd->portcntrnameslen = sz;
  10902. dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
  10903. if (!dd->portcntrnames)
  10904. goto bail;
  10905. /* fill in port cntr names */
  10906. for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
  10907. if (port_cntrs[i].flags & CNTR_DISABLED)
  10908. continue;
  10909. if (port_cntrs[i].flags & CNTR_VL) {
  10910. for (j = 0; j < C_VL_COUNT; j++) {
  10911. snprintf(name, C_MAX_NAME, "%s%d",
  10912. port_cntrs[i].name, vl_from_idx(j));
  10913. memcpy(p, name, strlen(name));
  10914. p += strlen(name);
  10915. /* Counter is 32 bits */
  10916. if (port_cntrs[i].flags & CNTR_32BIT) {
  10917. memcpy(p, bit_type_32, bit_type_32_sz);
  10918. p += bit_type_32_sz;
  10919. }
  10920. *p++ = '\n';
  10921. }
  10922. } else {
  10923. memcpy(p, port_cntrs[i].name,
  10924. strlen(port_cntrs[i].name));
  10925. p += strlen(port_cntrs[i].name);
  10926. /* Counter is 32 bits */
  10927. if (port_cntrs[i].flags & CNTR_32BIT) {
  10928. memcpy(p, bit_type_32, bit_type_32_sz);
  10929. p += bit_type_32_sz;
  10930. }
  10931. *p++ = '\n';
  10932. }
  10933. }
  10934. /* allocate per port storage for counter values */
  10935. ppd = (struct hfi1_pportdata *)(dd + 1);
  10936. for (i = 0; i < dd->num_pports; i++, ppd++) {
  10937. ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
  10938. if (!ppd->cntrs)
  10939. goto bail;
  10940. ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
  10941. if (!ppd->scntrs)
  10942. goto bail;
  10943. }
  10944. /* CPU counters need to be allocated and zeroed */
  10945. if (init_cpu_counters(dd))
  10946. goto bail;
  10947. dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
  10948. WQ_MEM_RECLAIM, dd->unit);
  10949. if (!dd->update_cntr_wq)
  10950. goto bail;
  10951. INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
  10952. mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
  10953. return 0;
  10954. bail:
  10955. free_cntrs(dd);
  10956. return -ENOMEM;
  10957. }
  10958. static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
  10959. {
  10960. switch (chip_lstate) {
  10961. default:
  10962. dd_dev_err(dd,
  10963. "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
  10964. chip_lstate);
  10965. /* fall through */
  10966. case LSTATE_DOWN:
  10967. return IB_PORT_DOWN;
  10968. case LSTATE_INIT:
  10969. return IB_PORT_INIT;
  10970. case LSTATE_ARMED:
  10971. return IB_PORT_ARMED;
  10972. case LSTATE_ACTIVE:
  10973. return IB_PORT_ACTIVE;
  10974. }
  10975. }
  10976. u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
  10977. {
  10978. /* look at the HFI meta-states only */
  10979. switch (chip_pstate & 0xf0) {
  10980. default:
  10981. dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
  10982. chip_pstate);
  10983. /* fall through */
  10984. case PLS_DISABLED:
  10985. return IB_PORTPHYSSTATE_DISABLED;
  10986. case PLS_OFFLINE:
  10987. return OPA_PORTPHYSSTATE_OFFLINE;
  10988. case PLS_POLLING:
  10989. return IB_PORTPHYSSTATE_POLLING;
  10990. case PLS_CONFIGPHY:
  10991. return IB_PORTPHYSSTATE_TRAINING;
  10992. case PLS_LINKUP:
  10993. return IB_PORTPHYSSTATE_LINKUP;
  10994. case PLS_PHYTEST:
  10995. return IB_PORTPHYSSTATE_PHY_TEST;
  10996. }
  10997. }
  10998. /* return the OPA port logical state name */
  10999. const char *opa_lstate_name(u32 lstate)
  11000. {
  11001. static const char * const port_logical_names[] = {
  11002. "PORT_NOP",
  11003. "PORT_DOWN",
  11004. "PORT_INIT",
  11005. "PORT_ARMED",
  11006. "PORT_ACTIVE",
  11007. "PORT_ACTIVE_DEFER",
  11008. };
  11009. if (lstate < ARRAY_SIZE(port_logical_names))
  11010. return port_logical_names[lstate];
  11011. return "unknown";
  11012. }
  11013. /* return the OPA port physical state name */
  11014. const char *opa_pstate_name(u32 pstate)
  11015. {
  11016. static const char * const port_physical_names[] = {
  11017. "PHYS_NOP",
  11018. "reserved1",
  11019. "PHYS_POLL",
  11020. "PHYS_DISABLED",
  11021. "PHYS_TRAINING",
  11022. "PHYS_LINKUP",
  11023. "PHYS_LINK_ERR_RECOVER",
  11024. "PHYS_PHY_TEST",
  11025. "reserved8",
  11026. "PHYS_OFFLINE",
  11027. "PHYS_GANGED",
  11028. "PHYS_TEST",
  11029. };
  11030. if (pstate < ARRAY_SIZE(port_physical_names))
  11031. return port_physical_names[pstate];
  11032. return "unknown";
  11033. }
  11034. /*
  11035. * Read the hardware link state and set the driver's cached value of it.
  11036. * Return the (new) current value.
  11037. */
  11038. u32 get_logical_state(struct hfi1_pportdata *ppd)
  11039. {
  11040. u32 new_state;
  11041. new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
  11042. if (new_state != ppd->lstate) {
  11043. dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
  11044. opa_lstate_name(new_state), new_state);
  11045. ppd->lstate = new_state;
  11046. }
  11047. /*
  11048. * Set port status flags in the page mapped into userspace
  11049. * memory. Do it here to ensure a reliable state - this is
  11050. * the only function called by all state handling code.
  11051. * Always set the flags due to the fact that the cache value
  11052. * might have been changed explicitly outside of this
  11053. * function.
  11054. */
  11055. if (ppd->statusp) {
  11056. switch (ppd->lstate) {
  11057. case IB_PORT_DOWN:
  11058. case IB_PORT_INIT:
  11059. *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
  11060. HFI1_STATUS_IB_READY);
  11061. break;
  11062. case IB_PORT_ARMED:
  11063. *ppd->statusp |= HFI1_STATUS_IB_CONF;
  11064. break;
  11065. case IB_PORT_ACTIVE:
  11066. *ppd->statusp |= HFI1_STATUS_IB_READY;
  11067. break;
  11068. }
  11069. }
  11070. return ppd->lstate;
  11071. }
  11072. /**
  11073. * wait_logical_linkstate - wait for an IB link state change to occur
  11074. * @ppd: port device
  11075. * @state: the state to wait for
  11076. * @msecs: the number of milliseconds to wait
  11077. *
  11078. * Wait up to msecs milliseconds for IB link state change to occur.
  11079. * For now, take the easy polling route.
  11080. * Returns 0 if state reached, otherwise -ETIMEDOUT.
  11081. */
  11082. static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
  11083. int msecs)
  11084. {
  11085. unsigned long timeout;
  11086. timeout = jiffies + msecs_to_jiffies(msecs);
  11087. while (1) {
  11088. if (get_logical_state(ppd) == state)
  11089. return 0;
  11090. if (time_after(jiffies, timeout))
  11091. break;
  11092. msleep(20);
  11093. }
  11094. dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
  11095. return -ETIMEDOUT;
  11096. }
  11097. u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
  11098. {
  11099. u32 pstate;
  11100. u32 ib_pstate;
  11101. pstate = read_physical_state(ppd->dd);
  11102. ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
  11103. if (ppd->last_pstate != ib_pstate) {
  11104. dd_dev_info(ppd->dd,
  11105. "%s: physical state changed to %s (0x%x), phy 0x%x\n",
  11106. __func__, opa_pstate_name(ib_pstate), ib_pstate,
  11107. pstate);
  11108. ppd->last_pstate = ib_pstate;
  11109. }
  11110. return ib_pstate;
  11111. }
  11112. #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
  11113. (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
  11114. #define SET_STATIC_RATE_CONTROL_SMASK(r) \
  11115. (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
  11116. void hfi1_init_ctxt(struct send_context *sc)
  11117. {
  11118. if (sc) {
  11119. struct hfi1_devdata *dd = sc->dd;
  11120. u64 reg;
  11121. u8 set = (sc->type == SC_USER ?
  11122. HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
  11123. HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
  11124. reg = read_kctxt_csr(dd, sc->hw_context,
  11125. SEND_CTXT_CHECK_ENABLE);
  11126. if (set)
  11127. CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
  11128. else
  11129. SET_STATIC_RATE_CONTROL_SMASK(reg);
  11130. write_kctxt_csr(dd, sc->hw_context,
  11131. SEND_CTXT_CHECK_ENABLE, reg);
  11132. }
  11133. }
  11134. int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
  11135. {
  11136. int ret = 0;
  11137. u64 reg;
  11138. if (dd->icode != ICODE_RTL_SILICON) {
  11139. if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
  11140. dd_dev_info(dd, "%s: tempsense not supported by HW\n",
  11141. __func__);
  11142. return -EINVAL;
  11143. }
  11144. reg = read_csr(dd, ASIC_STS_THERM);
  11145. temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
  11146. ASIC_STS_THERM_CURR_TEMP_MASK);
  11147. temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
  11148. ASIC_STS_THERM_LO_TEMP_MASK);
  11149. temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
  11150. ASIC_STS_THERM_HI_TEMP_MASK);
  11151. temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
  11152. ASIC_STS_THERM_CRIT_TEMP_MASK);
  11153. /* triggers is a 3-bit value - 1 bit per trigger. */
  11154. temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
  11155. return ret;
  11156. }
  11157. /* ========================================================================= */
  11158. /*
  11159. * Enable/disable chip from delivering interrupts.
  11160. */
  11161. void set_intr_state(struct hfi1_devdata *dd, u32 enable)
  11162. {
  11163. int i;
  11164. /*
  11165. * In HFI, the mask needs to be 1 to allow interrupts.
  11166. */
  11167. if (enable) {
  11168. /* enable all interrupts */
  11169. for (i = 0; i < CCE_NUM_INT_CSRS; i++)
  11170. write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
  11171. init_qsfp_int(dd);
  11172. } else {
  11173. for (i = 0; i < CCE_NUM_INT_CSRS; i++)
  11174. write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
  11175. }
  11176. }
  11177. /*
  11178. * Clear all interrupt sources on the chip.
  11179. */
  11180. static void clear_all_interrupts(struct hfi1_devdata *dd)
  11181. {
  11182. int i;
  11183. for (i = 0; i < CCE_NUM_INT_CSRS; i++)
  11184. write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
  11185. write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
  11186. write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
  11187. write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
  11188. write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
  11189. write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
  11190. write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
  11191. write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
  11192. for (i = 0; i < dd->chip_send_contexts; i++)
  11193. write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
  11194. for (i = 0; i < dd->chip_sdma_engines; i++)
  11195. write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
  11196. write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
  11197. write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
  11198. write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
  11199. }
  11200. /* Move to pcie.c? */
  11201. static void disable_intx(struct pci_dev *pdev)
  11202. {
  11203. pci_intx(pdev, 0);
  11204. }
  11205. static void clean_up_interrupts(struct hfi1_devdata *dd)
  11206. {
  11207. int i;
  11208. /* remove irqs - must happen before disabling/turning off */
  11209. if (dd->num_msix_entries) {
  11210. /* MSI-X */
  11211. struct hfi1_msix_entry *me = dd->msix_entries;
  11212. for (i = 0; i < dd->num_msix_entries; i++, me++) {
  11213. if (!me->arg) /* => no irq, no affinity */
  11214. continue;
  11215. hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
  11216. free_irq(me->msix.vector, me->arg);
  11217. }
  11218. } else {
  11219. /* INTx */
  11220. if (dd->requested_intx_irq) {
  11221. free_irq(dd->pcidev->irq, dd);
  11222. dd->requested_intx_irq = 0;
  11223. }
  11224. }
  11225. /* turn off interrupts */
  11226. if (dd->num_msix_entries) {
  11227. /* MSI-X */
  11228. pci_disable_msix(dd->pcidev);
  11229. } else {
  11230. /* INTx */
  11231. disable_intx(dd->pcidev);
  11232. }
  11233. /* clean structures */
  11234. kfree(dd->msix_entries);
  11235. dd->msix_entries = NULL;
  11236. dd->num_msix_entries = 0;
  11237. }
  11238. /*
  11239. * Remap the interrupt source from the general handler to the given MSI-X
  11240. * interrupt.
  11241. */
  11242. static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
  11243. {
  11244. u64 reg;
  11245. int m, n;
  11246. /* clear from the handled mask of the general interrupt */
  11247. m = isrc / 64;
  11248. n = isrc % 64;
  11249. dd->gi_mask[m] &= ~((u64)1 << n);
  11250. /* direct the chip source to the given MSI-X interrupt */
  11251. m = isrc / 8;
  11252. n = isrc % 8;
  11253. reg = read_csr(dd, CCE_INT_MAP + (8 * m));
  11254. reg &= ~((u64)0xff << (8 * n));
  11255. reg |= ((u64)msix_intr & 0xff) << (8 * n);
  11256. write_csr(dd, CCE_INT_MAP + (8 * m), reg);
  11257. }
  11258. static void remap_sdma_interrupts(struct hfi1_devdata *dd,
  11259. int engine, int msix_intr)
  11260. {
  11261. /*
  11262. * SDMA engine interrupt sources grouped by type, rather than
  11263. * engine. Per-engine interrupts are as follows:
  11264. * SDMA
  11265. * SDMAProgress
  11266. * SDMAIdle
  11267. */
  11268. remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
  11269. msix_intr);
  11270. remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
  11271. msix_intr);
  11272. remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
  11273. msix_intr);
  11274. }
  11275. static int request_intx_irq(struct hfi1_devdata *dd)
  11276. {
  11277. int ret;
  11278. snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
  11279. dd->unit);
  11280. ret = request_irq(dd->pcidev->irq, general_interrupt,
  11281. IRQF_SHARED, dd->intx_name, dd);
  11282. if (ret)
  11283. dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
  11284. ret);
  11285. else
  11286. dd->requested_intx_irq = 1;
  11287. return ret;
  11288. }
  11289. static int request_msix_irqs(struct hfi1_devdata *dd)
  11290. {
  11291. int first_general, last_general;
  11292. int first_sdma, last_sdma;
  11293. int first_rx, last_rx;
  11294. int i, ret = 0;
  11295. /* calculate the ranges we are going to use */
  11296. first_general = 0;
  11297. last_general = first_general + 1;
  11298. first_sdma = last_general;
  11299. last_sdma = first_sdma + dd->num_sdma;
  11300. first_rx = last_sdma;
  11301. last_rx = first_rx + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
  11302. /* VNIC MSIx interrupts get mapped when VNIC contexts are created */
  11303. dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
  11304. /*
  11305. * Sanity check - the code expects all SDMA chip source
  11306. * interrupts to be in the same CSR, starting at bit 0. Verify
  11307. * that this is true by checking the bit location of the start.
  11308. */
  11309. BUILD_BUG_ON(IS_SDMA_START % 64);
  11310. for (i = 0; i < dd->num_msix_entries; i++) {
  11311. struct hfi1_msix_entry *me = &dd->msix_entries[i];
  11312. const char *err_info;
  11313. irq_handler_t handler;
  11314. irq_handler_t thread = NULL;
  11315. void *arg = NULL;
  11316. int idx;
  11317. struct hfi1_ctxtdata *rcd = NULL;
  11318. struct sdma_engine *sde = NULL;
  11319. /* obtain the arguments to request_irq */
  11320. if (first_general <= i && i < last_general) {
  11321. idx = i - first_general;
  11322. handler = general_interrupt;
  11323. arg = dd;
  11324. snprintf(me->name, sizeof(me->name),
  11325. DRIVER_NAME "_%d", dd->unit);
  11326. err_info = "general";
  11327. me->type = IRQ_GENERAL;
  11328. } else if (first_sdma <= i && i < last_sdma) {
  11329. idx = i - first_sdma;
  11330. sde = &dd->per_sdma[idx];
  11331. handler = sdma_interrupt;
  11332. arg = sde;
  11333. snprintf(me->name, sizeof(me->name),
  11334. DRIVER_NAME "_%d sdma%d", dd->unit, idx);
  11335. err_info = "sdma";
  11336. remap_sdma_interrupts(dd, idx, i);
  11337. me->type = IRQ_SDMA;
  11338. } else if (first_rx <= i && i < last_rx) {
  11339. idx = i - first_rx;
  11340. rcd = dd->rcd[idx];
  11341. if (rcd) {
  11342. /*
  11343. * Set the interrupt register and mask for this
  11344. * context's interrupt.
  11345. */
  11346. rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
  11347. rcd->imask = ((u64)1) <<
  11348. ((IS_RCVAVAIL_START + idx) % 64);
  11349. handler = receive_context_interrupt;
  11350. thread = receive_context_thread;
  11351. arg = rcd;
  11352. snprintf(me->name, sizeof(me->name),
  11353. DRIVER_NAME "_%d kctxt%d",
  11354. dd->unit, idx);
  11355. err_info = "receive context";
  11356. remap_intr(dd, IS_RCVAVAIL_START + idx, i);
  11357. me->type = IRQ_RCVCTXT;
  11358. rcd->msix_intr = i;
  11359. }
  11360. } else {
  11361. /* not in our expected range - complain, then
  11362. * ignore it
  11363. */
  11364. dd_dev_err(dd,
  11365. "Unexpected extra MSI-X interrupt %d\n", i);
  11366. continue;
  11367. }
  11368. /* no argument, no interrupt */
  11369. if (!arg)
  11370. continue;
  11371. /* make sure the name is terminated */
  11372. me->name[sizeof(me->name) - 1] = 0;
  11373. ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
  11374. me->name, arg);
  11375. if (ret) {
  11376. dd_dev_err(dd,
  11377. "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
  11378. err_info, me->msix.vector, idx, ret);
  11379. return ret;
  11380. }
  11381. /*
  11382. * assign arg after request_irq call, so it will be
  11383. * cleaned up
  11384. */
  11385. me->arg = arg;
  11386. ret = hfi1_get_irq_affinity(dd, me);
  11387. if (ret)
  11388. dd_dev_err(dd,
  11389. "unable to pin IRQ %d\n", ret);
  11390. }
  11391. return ret;
  11392. }
  11393. void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
  11394. {
  11395. int i;
  11396. if (!dd->num_msix_entries) {
  11397. synchronize_irq(dd->pcidev->irq);
  11398. return;
  11399. }
  11400. for (i = 0; i < dd->vnic.num_ctxt; i++) {
  11401. struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
  11402. struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
  11403. synchronize_irq(me->msix.vector);
  11404. }
  11405. }
  11406. void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
  11407. {
  11408. struct hfi1_devdata *dd = rcd->dd;
  11409. struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
  11410. if (!me->arg) /* => no irq, no affinity */
  11411. return;
  11412. hfi1_put_irq_affinity(dd, me);
  11413. free_irq(me->msix.vector, me->arg);
  11414. me->arg = NULL;
  11415. }
  11416. void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
  11417. {
  11418. struct hfi1_devdata *dd = rcd->dd;
  11419. struct hfi1_msix_entry *me;
  11420. int idx = rcd->ctxt;
  11421. void *arg = rcd;
  11422. int ret;
  11423. rcd->msix_intr = dd->vnic.msix_idx++;
  11424. me = &dd->msix_entries[rcd->msix_intr];
  11425. /*
  11426. * Set the interrupt register and mask for this
  11427. * context's interrupt.
  11428. */
  11429. rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
  11430. rcd->imask = ((u64)1) <<
  11431. ((IS_RCVAVAIL_START + idx) % 64);
  11432. snprintf(me->name, sizeof(me->name),
  11433. DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
  11434. me->name[sizeof(me->name) - 1] = 0;
  11435. me->type = IRQ_RCVCTXT;
  11436. remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
  11437. ret = request_threaded_irq(me->msix.vector, receive_context_interrupt,
  11438. receive_context_thread, 0, me->name, arg);
  11439. if (ret) {
  11440. dd_dev_err(dd, "vnic irq request (vector %d, idx %d) fail %d\n",
  11441. me->msix.vector, idx, ret);
  11442. return;
  11443. }
  11444. /*
  11445. * assign arg after request_irq call, so it will be
  11446. * cleaned up
  11447. */
  11448. me->arg = arg;
  11449. ret = hfi1_get_irq_affinity(dd, me);
  11450. if (ret) {
  11451. dd_dev_err(dd,
  11452. "unable to pin IRQ %d\n", ret);
  11453. free_irq(me->msix.vector, me->arg);
  11454. }
  11455. }
  11456. /*
  11457. * Set the general handler to accept all interrupts, remap all
  11458. * chip interrupts back to MSI-X 0.
  11459. */
  11460. static void reset_interrupts(struct hfi1_devdata *dd)
  11461. {
  11462. int i;
  11463. /* all interrupts handled by the general handler */
  11464. for (i = 0; i < CCE_NUM_INT_CSRS; i++)
  11465. dd->gi_mask[i] = ~(u64)0;
  11466. /* all chip interrupts map to MSI-X 0 */
  11467. for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
  11468. write_csr(dd, CCE_INT_MAP + (8 * i), 0);
  11469. }
  11470. static int set_up_interrupts(struct hfi1_devdata *dd)
  11471. {
  11472. struct hfi1_msix_entry *entries;
  11473. u32 total, request;
  11474. int i, ret;
  11475. int single_interrupt = 0; /* we expect to have all the interrupts */
  11476. /*
  11477. * Interrupt count:
  11478. * 1 general, "slow path" interrupt (includes the SDMA engines
  11479. * slow source, SDMACleanupDone)
  11480. * N interrupts - one per used SDMA engine
  11481. * M interrupt - one per kernel receive context
  11482. */
  11483. total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
  11484. entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
  11485. if (!entries) {
  11486. ret = -ENOMEM;
  11487. goto fail;
  11488. }
  11489. /* 1-1 MSI-X entry assignment */
  11490. for (i = 0; i < total; i++)
  11491. entries[i].msix.entry = i;
  11492. /* ask for MSI-X interrupts */
  11493. request = total;
  11494. request_msix(dd, &request, entries);
  11495. if (request == 0) {
  11496. /* using INTx */
  11497. /* dd->num_msix_entries already zero */
  11498. kfree(entries);
  11499. single_interrupt = 1;
  11500. dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
  11501. } else {
  11502. /* using MSI-X */
  11503. dd->num_msix_entries = request;
  11504. dd->msix_entries = entries;
  11505. if (request != total) {
  11506. /* using MSI-X, with reduced interrupts */
  11507. dd_dev_err(
  11508. dd,
  11509. "cannot handle reduced interrupt case, want %u, got %u\n",
  11510. total, request);
  11511. ret = -EINVAL;
  11512. goto fail;
  11513. }
  11514. dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
  11515. }
  11516. /* mask all interrupts */
  11517. set_intr_state(dd, 0);
  11518. /* clear all pending interrupts */
  11519. clear_all_interrupts(dd);
  11520. /* reset general handler mask, chip MSI-X mappings */
  11521. reset_interrupts(dd);
  11522. if (single_interrupt)
  11523. ret = request_intx_irq(dd);
  11524. else
  11525. ret = request_msix_irqs(dd);
  11526. if (ret)
  11527. goto fail;
  11528. return 0;
  11529. fail:
  11530. clean_up_interrupts(dd);
  11531. return ret;
  11532. }
  11533. /*
  11534. * Set up context values in dd. Sets:
  11535. *
  11536. * num_rcv_contexts - number of contexts being used
  11537. * n_krcv_queues - number of kernel contexts
  11538. * first_dyn_alloc_ctxt - first dynamically allocated context
  11539. * in array of contexts
  11540. * freectxts - number of free user contexts
  11541. * num_send_contexts - number of PIO send contexts being used
  11542. */
  11543. static int set_up_context_variables(struct hfi1_devdata *dd)
  11544. {
  11545. unsigned long num_kernel_contexts;
  11546. int total_contexts;
  11547. int ret;
  11548. unsigned ngroups;
  11549. int qos_rmt_count;
  11550. int user_rmt_reduced;
  11551. /*
  11552. * Kernel receive contexts:
  11553. * - Context 0 - control context (VL15/multicast/error)
  11554. * - Context 1 - first kernel context
  11555. * - Context 2 - second kernel context
  11556. * ...
  11557. */
  11558. if (n_krcvqs)
  11559. /*
  11560. * n_krcvqs is the sum of module parameter kernel receive
  11561. * contexts, krcvqs[]. It does not include the control
  11562. * context, so add that.
  11563. */
  11564. num_kernel_contexts = n_krcvqs + 1;
  11565. else
  11566. num_kernel_contexts = DEFAULT_KRCVQS + 1;
  11567. /*
  11568. * Every kernel receive context needs an ACK send context.
  11569. * one send context is allocated for each VL{0-7} and VL15
  11570. */
  11571. if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
  11572. dd_dev_err(dd,
  11573. "Reducing # kernel rcv contexts to: %d, from %lu\n",
  11574. (int)(dd->chip_send_contexts - num_vls - 1),
  11575. num_kernel_contexts);
  11576. num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
  11577. }
  11578. /*
  11579. * User contexts:
  11580. * - default to 1 user context per real (non-HT) CPU core if
  11581. * num_user_contexts is negative
  11582. */
  11583. if (num_user_contexts < 0)
  11584. num_user_contexts =
  11585. cpumask_weight(&node_affinity.real_cpu_mask);
  11586. total_contexts = num_kernel_contexts + num_user_contexts;
  11587. /*
  11588. * Adjust the counts given a global max.
  11589. */
  11590. if (total_contexts > dd->chip_rcv_contexts) {
  11591. dd_dev_err(dd,
  11592. "Reducing # user receive contexts to: %d, from %d\n",
  11593. (int)(dd->chip_rcv_contexts - num_kernel_contexts),
  11594. (int)num_user_contexts);
  11595. num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
  11596. /* recalculate */
  11597. total_contexts = num_kernel_contexts + num_user_contexts;
  11598. }
  11599. /* each user context requires an entry in the RMT */
  11600. qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
  11601. if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
  11602. user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
  11603. dd_dev_err(dd,
  11604. "RMT size is reducing the number of user receive contexts from %d to %d\n",
  11605. (int)num_user_contexts,
  11606. user_rmt_reduced);
  11607. /* recalculate */
  11608. num_user_contexts = user_rmt_reduced;
  11609. total_contexts = num_kernel_contexts + num_user_contexts;
  11610. }
  11611. /* Accommodate VNIC contexts */
  11612. if ((total_contexts + HFI1_NUM_VNIC_CTXT) <= dd->chip_rcv_contexts)
  11613. total_contexts += HFI1_NUM_VNIC_CTXT;
  11614. /* the first N are kernel contexts, the rest are user/vnic contexts */
  11615. dd->num_rcv_contexts = total_contexts;
  11616. dd->n_krcv_queues = num_kernel_contexts;
  11617. dd->first_dyn_alloc_ctxt = num_kernel_contexts;
  11618. dd->num_user_contexts = num_user_contexts;
  11619. dd->freectxts = num_user_contexts;
  11620. dd_dev_info(dd,
  11621. "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
  11622. (int)dd->chip_rcv_contexts,
  11623. (int)dd->num_rcv_contexts,
  11624. (int)dd->n_krcv_queues,
  11625. (int)dd->num_rcv_contexts - dd->n_krcv_queues);
  11626. /*
  11627. * Receive array allocation:
  11628. * All RcvArray entries are divided into groups of 8. This
  11629. * is required by the hardware and will speed up writes to
  11630. * consecutive entries by using write-combining of the entire
  11631. * cacheline.
  11632. *
  11633. * The number of groups are evenly divided among all contexts.
  11634. * any left over groups will be given to the first N user
  11635. * contexts.
  11636. */
  11637. dd->rcv_entries.group_size = RCV_INCREMENT;
  11638. ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
  11639. dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
  11640. dd->rcv_entries.nctxt_extra = ngroups -
  11641. (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
  11642. dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
  11643. dd->rcv_entries.ngroups,
  11644. dd->rcv_entries.nctxt_extra);
  11645. if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
  11646. MAX_EAGER_ENTRIES * 2) {
  11647. dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
  11648. dd->rcv_entries.group_size;
  11649. dd_dev_info(dd,
  11650. "RcvArray group count too high, change to %u\n",
  11651. dd->rcv_entries.ngroups);
  11652. dd->rcv_entries.nctxt_extra = 0;
  11653. }
  11654. /*
  11655. * PIO send contexts
  11656. */
  11657. ret = init_sc_pools_and_sizes(dd);
  11658. if (ret >= 0) { /* success */
  11659. dd->num_send_contexts = ret;
  11660. dd_dev_info(
  11661. dd,
  11662. "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
  11663. dd->chip_send_contexts,
  11664. dd->num_send_contexts,
  11665. dd->sc_sizes[SC_KERNEL].count,
  11666. dd->sc_sizes[SC_ACK].count,
  11667. dd->sc_sizes[SC_USER].count,
  11668. dd->sc_sizes[SC_VL15].count);
  11669. ret = 0; /* success */
  11670. }
  11671. return ret;
  11672. }
  11673. /*
  11674. * Set the device/port partition key table. The MAD code
  11675. * will ensure that, at least, the partial management
  11676. * partition key is present in the table.
  11677. */
  11678. static void set_partition_keys(struct hfi1_pportdata *ppd)
  11679. {
  11680. struct hfi1_devdata *dd = ppd->dd;
  11681. u64 reg = 0;
  11682. int i;
  11683. dd_dev_info(dd, "Setting partition keys\n");
  11684. for (i = 0; i < hfi1_get_npkeys(dd); i++) {
  11685. reg |= (ppd->pkeys[i] &
  11686. RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
  11687. ((i % 4) *
  11688. RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
  11689. /* Each register holds 4 PKey values. */
  11690. if ((i % 4) == 3) {
  11691. write_csr(dd, RCV_PARTITION_KEY +
  11692. ((i - 3) * 2), reg);
  11693. reg = 0;
  11694. }
  11695. }
  11696. /* Always enable HW pkeys check when pkeys table is set */
  11697. add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
  11698. }
  11699. /*
  11700. * These CSRs and memories are uninitialized on reset and must be
  11701. * written before reading to set the ECC/parity bits.
  11702. *
  11703. * NOTE: All user context CSRs that are not mmaped write-only
  11704. * (e.g. the TID flows) must be initialized even if the driver never
  11705. * reads them.
  11706. */
  11707. static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
  11708. {
  11709. int i, j;
  11710. /* CceIntMap */
  11711. for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
  11712. write_csr(dd, CCE_INT_MAP + (8 * i), 0);
  11713. /* SendCtxtCreditReturnAddr */
  11714. for (i = 0; i < dd->chip_send_contexts; i++)
  11715. write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
  11716. /* PIO Send buffers */
  11717. /* SDMA Send buffers */
  11718. /*
  11719. * These are not normally read, and (presently) have no method
  11720. * to be read, so are not pre-initialized
  11721. */
  11722. /* RcvHdrAddr */
  11723. /* RcvHdrTailAddr */
  11724. /* RcvTidFlowTable */
  11725. for (i = 0; i < dd->chip_rcv_contexts; i++) {
  11726. write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
  11727. write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
  11728. for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
  11729. write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
  11730. }
  11731. /* RcvArray */
  11732. for (i = 0; i < dd->chip_rcv_array_count; i++)
  11733. write_csr(dd, RCV_ARRAY + (8 * i),
  11734. RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
  11735. /* RcvQPMapTable */
  11736. for (i = 0; i < 32; i++)
  11737. write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
  11738. }
  11739. /*
  11740. * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
  11741. */
  11742. static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
  11743. u64 ctrl_bits)
  11744. {
  11745. unsigned long timeout;
  11746. u64 reg;
  11747. /* is the condition present? */
  11748. reg = read_csr(dd, CCE_STATUS);
  11749. if ((reg & status_bits) == 0)
  11750. return;
  11751. /* clear the condition */
  11752. write_csr(dd, CCE_CTRL, ctrl_bits);
  11753. /* wait for the condition to clear */
  11754. timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
  11755. while (1) {
  11756. reg = read_csr(dd, CCE_STATUS);
  11757. if ((reg & status_bits) == 0)
  11758. return;
  11759. if (time_after(jiffies, timeout)) {
  11760. dd_dev_err(dd,
  11761. "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
  11762. status_bits, reg & status_bits);
  11763. return;
  11764. }
  11765. udelay(1);
  11766. }
  11767. }
  11768. /* set CCE CSRs to chip reset defaults */
  11769. static void reset_cce_csrs(struct hfi1_devdata *dd)
  11770. {
  11771. int i;
  11772. /* CCE_REVISION read-only */
  11773. /* CCE_REVISION2 read-only */
  11774. /* CCE_CTRL - bits clear automatically */
  11775. /* CCE_STATUS read-only, use CceCtrl to clear */
  11776. clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
  11777. clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
  11778. clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
  11779. for (i = 0; i < CCE_NUM_SCRATCH; i++)
  11780. write_csr(dd, CCE_SCRATCH + (8 * i), 0);
  11781. /* CCE_ERR_STATUS read-only */
  11782. write_csr(dd, CCE_ERR_MASK, 0);
  11783. write_csr(dd, CCE_ERR_CLEAR, ~0ull);
  11784. /* CCE_ERR_FORCE leave alone */
  11785. for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
  11786. write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
  11787. write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
  11788. /* CCE_PCIE_CTRL leave alone */
  11789. for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
  11790. write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
  11791. write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
  11792. CCE_MSIX_TABLE_UPPER_RESETCSR);
  11793. }
  11794. for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
  11795. /* CCE_MSIX_PBA read-only */
  11796. write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
  11797. write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
  11798. }
  11799. for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
  11800. write_csr(dd, CCE_INT_MAP, 0);
  11801. for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
  11802. /* CCE_INT_STATUS read-only */
  11803. write_csr(dd, CCE_INT_MASK + (8 * i), 0);
  11804. write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
  11805. /* CCE_INT_FORCE leave alone */
  11806. /* CCE_INT_BLOCKED read-only */
  11807. }
  11808. for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
  11809. write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
  11810. }
  11811. /* set MISC CSRs to chip reset defaults */
  11812. static void reset_misc_csrs(struct hfi1_devdata *dd)
  11813. {
  11814. int i;
  11815. for (i = 0; i < 32; i++) {
  11816. write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
  11817. write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
  11818. write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
  11819. }
  11820. /*
  11821. * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
  11822. * only be written 128-byte chunks
  11823. */
  11824. /* init RSA engine to clear lingering errors */
  11825. write_csr(dd, MISC_CFG_RSA_CMD, 1);
  11826. write_csr(dd, MISC_CFG_RSA_MU, 0);
  11827. write_csr(dd, MISC_CFG_FW_CTRL, 0);
  11828. /* MISC_STS_8051_DIGEST read-only */
  11829. /* MISC_STS_SBM_DIGEST read-only */
  11830. /* MISC_STS_PCIE_DIGEST read-only */
  11831. /* MISC_STS_FAB_DIGEST read-only */
  11832. /* MISC_ERR_STATUS read-only */
  11833. write_csr(dd, MISC_ERR_MASK, 0);
  11834. write_csr(dd, MISC_ERR_CLEAR, ~0ull);
  11835. /* MISC_ERR_FORCE leave alone */
  11836. }
  11837. /* set TXE CSRs to chip reset defaults */
  11838. static void reset_txe_csrs(struct hfi1_devdata *dd)
  11839. {
  11840. int i;
  11841. /*
  11842. * TXE Kernel CSRs
  11843. */
  11844. write_csr(dd, SEND_CTRL, 0);
  11845. __cm_reset(dd, 0); /* reset CM internal state */
  11846. /* SEND_CONTEXTS read-only */
  11847. /* SEND_DMA_ENGINES read-only */
  11848. /* SEND_PIO_MEM_SIZE read-only */
  11849. /* SEND_DMA_MEM_SIZE read-only */
  11850. write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
  11851. pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
  11852. /* SEND_PIO_ERR_STATUS read-only */
  11853. write_csr(dd, SEND_PIO_ERR_MASK, 0);
  11854. write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
  11855. /* SEND_PIO_ERR_FORCE leave alone */
  11856. /* SEND_DMA_ERR_STATUS read-only */
  11857. write_csr(dd, SEND_DMA_ERR_MASK, 0);
  11858. write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
  11859. /* SEND_DMA_ERR_FORCE leave alone */
  11860. /* SEND_EGRESS_ERR_STATUS read-only */
  11861. write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
  11862. write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
  11863. /* SEND_EGRESS_ERR_FORCE leave alone */
  11864. write_csr(dd, SEND_BTH_QP, 0);
  11865. write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
  11866. write_csr(dd, SEND_SC2VLT0, 0);
  11867. write_csr(dd, SEND_SC2VLT1, 0);
  11868. write_csr(dd, SEND_SC2VLT2, 0);
  11869. write_csr(dd, SEND_SC2VLT3, 0);
  11870. write_csr(dd, SEND_LEN_CHECK0, 0);
  11871. write_csr(dd, SEND_LEN_CHECK1, 0);
  11872. /* SEND_ERR_STATUS read-only */
  11873. write_csr(dd, SEND_ERR_MASK, 0);
  11874. write_csr(dd, SEND_ERR_CLEAR, ~0ull);
  11875. /* SEND_ERR_FORCE read-only */
  11876. for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
  11877. write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
  11878. for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
  11879. write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
  11880. for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
  11881. write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
  11882. for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
  11883. write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
  11884. for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
  11885. write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
  11886. write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
  11887. write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
  11888. /* SEND_CM_CREDIT_USED_STATUS read-only */
  11889. write_csr(dd, SEND_CM_TIMER_CTRL, 0);
  11890. write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
  11891. write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
  11892. write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
  11893. write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
  11894. for (i = 0; i < TXE_NUM_DATA_VL; i++)
  11895. write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
  11896. write_csr(dd, SEND_CM_CREDIT_VL15, 0);
  11897. /* SEND_CM_CREDIT_USED_VL read-only */
  11898. /* SEND_CM_CREDIT_USED_VL15 read-only */
  11899. /* SEND_EGRESS_CTXT_STATUS read-only */
  11900. /* SEND_EGRESS_SEND_DMA_STATUS read-only */
  11901. write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
  11902. /* SEND_EGRESS_ERR_INFO read-only */
  11903. /* SEND_EGRESS_ERR_SOURCE read-only */
  11904. /*
  11905. * TXE Per-Context CSRs
  11906. */
  11907. for (i = 0; i < dd->chip_send_contexts; i++) {
  11908. write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
  11909. write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
  11910. write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
  11911. write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
  11912. write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
  11913. write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
  11914. write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
  11915. write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
  11916. write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
  11917. write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
  11918. write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
  11919. write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
  11920. }
  11921. /*
  11922. * TXE Per-SDMA CSRs
  11923. */
  11924. for (i = 0; i < dd->chip_sdma_engines; i++) {
  11925. write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
  11926. /* SEND_DMA_STATUS read-only */
  11927. write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
  11928. write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
  11929. write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
  11930. /* SEND_DMA_HEAD read-only */
  11931. write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
  11932. write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
  11933. /* SEND_DMA_IDLE_CNT read-only */
  11934. write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
  11935. write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
  11936. /* SEND_DMA_DESC_FETCHED_CNT read-only */
  11937. /* SEND_DMA_ENG_ERR_STATUS read-only */
  11938. write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
  11939. write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
  11940. /* SEND_DMA_ENG_ERR_FORCE leave alone */
  11941. write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
  11942. write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
  11943. write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
  11944. write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
  11945. write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
  11946. write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
  11947. write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
  11948. }
  11949. }
  11950. /*
  11951. * Expect on entry:
  11952. * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
  11953. */
  11954. static void init_rbufs(struct hfi1_devdata *dd)
  11955. {
  11956. u64 reg;
  11957. int count;
  11958. /*
  11959. * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
  11960. * clear.
  11961. */
  11962. count = 0;
  11963. while (1) {
  11964. reg = read_csr(dd, RCV_STATUS);
  11965. if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
  11966. | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
  11967. break;
  11968. /*
  11969. * Give up after 1ms - maximum wait time.
  11970. *
  11971. * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at
  11972. * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
  11973. * 136 KB / (66% * 250MB/s) = 844us
  11974. */
  11975. if (count++ > 500) {
  11976. dd_dev_err(dd,
  11977. "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
  11978. __func__, reg);
  11979. break;
  11980. }
  11981. udelay(2); /* do not busy-wait the CSR */
  11982. }
  11983. /* start the init - expect RcvCtrl to be 0 */
  11984. write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
  11985. /*
  11986. * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
  11987. * period after the write before RcvStatus.RxRbufInitDone is valid.
  11988. * The delay in the first run through the loop below is sufficient and
  11989. * required before the first read of RcvStatus.RxRbufInintDone.
  11990. */
  11991. read_csr(dd, RCV_CTRL);
  11992. /* wait for the init to finish */
  11993. count = 0;
  11994. while (1) {
  11995. /* delay is required first time through - see above */
  11996. udelay(2); /* do not busy-wait the CSR */
  11997. reg = read_csr(dd, RCV_STATUS);
  11998. if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
  11999. break;
  12000. /* give up after 100us - slowest possible at 33MHz is 73us */
  12001. if (count++ > 50) {
  12002. dd_dev_err(dd,
  12003. "%s: RcvStatus.RxRbufInit not set, continuing\n",
  12004. __func__);
  12005. break;
  12006. }
  12007. }
  12008. }
  12009. /* set RXE CSRs to chip reset defaults */
  12010. static void reset_rxe_csrs(struct hfi1_devdata *dd)
  12011. {
  12012. int i, j;
  12013. /*
  12014. * RXE Kernel CSRs
  12015. */
  12016. write_csr(dd, RCV_CTRL, 0);
  12017. init_rbufs(dd);
  12018. /* RCV_STATUS read-only */
  12019. /* RCV_CONTEXTS read-only */
  12020. /* RCV_ARRAY_CNT read-only */
  12021. /* RCV_BUF_SIZE read-only */
  12022. write_csr(dd, RCV_BTH_QP, 0);
  12023. write_csr(dd, RCV_MULTICAST, 0);
  12024. write_csr(dd, RCV_BYPASS, 0);
  12025. write_csr(dd, RCV_VL15, 0);
  12026. /* this is a clear-down */
  12027. write_csr(dd, RCV_ERR_INFO,
  12028. RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
  12029. /* RCV_ERR_STATUS read-only */
  12030. write_csr(dd, RCV_ERR_MASK, 0);
  12031. write_csr(dd, RCV_ERR_CLEAR, ~0ull);
  12032. /* RCV_ERR_FORCE leave alone */
  12033. for (i = 0; i < 32; i++)
  12034. write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
  12035. for (i = 0; i < 4; i++)
  12036. write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
  12037. for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
  12038. write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
  12039. for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
  12040. write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
  12041. for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
  12042. clear_rsm_rule(dd, i);
  12043. for (i = 0; i < 32; i++)
  12044. write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
  12045. /*
  12046. * RXE Kernel and User Per-Context CSRs
  12047. */
  12048. for (i = 0; i < dd->chip_rcv_contexts; i++) {
  12049. /* kernel */
  12050. write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
  12051. /* RCV_CTXT_STATUS read-only */
  12052. write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
  12053. write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
  12054. write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
  12055. write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
  12056. write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
  12057. write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
  12058. write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
  12059. write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
  12060. write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
  12061. write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
  12062. /* user */
  12063. /* RCV_HDR_TAIL read-only */
  12064. write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
  12065. /* RCV_EGR_INDEX_TAIL read-only */
  12066. write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
  12067. /* RCV_EGR_OFFSET_TAIL read-only */
  12068. for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
  12069. write_uctxt_csr(dd, i,
  12070. RCV_TID_FLOW_TABLE + (8 * j), 0);
  12071. }
  12072. }
  12073. }
  12074. /*
  12075. * Set sc2vl tables.
  12076. *
  12077. * They power on to zeros, so to avoid send context errors
  12078. * they need to be set:
  12079. *
  12080. * SC 0-7 -> VL 0-7 (respectively)
  12081. * SC 15 -> VL 15
  12082. * otherwise
  12083. * -> VL 0
  12084. */
  12085. static void init_sc2vl_tables(struct hfi1_devdata *dd)
  12086. {
  12087. int i;
  12088. /* init per architecture spec, constrained by hardware capability */
  12089. /* HFI maps sent packets */
  12090. write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
  12091. 0,
  12092. 0, 0, 1, 1,
  12093. 2, 2, 3, 3,
  12094. 4, 4, 5, 5,
  12095. 6, 6, 7, 7));
  12096. write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
  12097. 1,
  12098. 8, 0, 9, 0,
  12099. 10, 0, 11, 0,
  12100. 12, 0, 13, 0,
  12101. 14, 0, 15, 15));
  12102. write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
  12103. 2,
  12104. 16, 0, 17, 0,
  12105. 18, 0, 19, 0,
  12106. 20, 0, 21, 0,
  12107. 22, 0, 23, 0));
  12108. write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
  12109. 3,
  12110. 24, 0, 25, 0,
  12111. 26, 0, 27, 0,
  12112. 28, 0, 29, 0,
  12113. 30, 0, 31, 0));
  12114. /* DC maps received packets */
  12115. write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
  12116. 15_0,
  12117. 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
  12118. 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
  12119. write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
  12120. 31_16,
  12121. 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
  12122. 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
  12123. /* initialize the cached sc2vl values consistently with h/w */
  12124. for (i = 0; i < 32; i++) {
  12125. if (i < 8 || i == 15)
  12126. *((u8 *)(dd->sc2vl) + i) = (u8)i;
  12127. else
  12128. *((u8 *)(dd->sc2vl) + i) = 0;
  12129. }
  12130. }
  12131. /*
  12132. * Read chip sizes and then reset parts to sane, disabled, values. We cannot
  12133. * depend on the chip going through a power-on reset - a driver may be loaded
  12134. * and unloaded many times.
  12135. *
  12136. * Do not write any CSR values to the chip in this routine - there may be
  12137. * a reset following the (possible) FLR in this routine.
  12138. *
  12139. */
  12140. static void init_chip(struct hfi1_devdata *dd)
  12141. {
  12142. int i;
  12143. /*
  12144. * Put the HFI CSRs in a known state.
  12145. * Combine this with a DC reset.
  12146. *
  12147. * Stop the device from doing anything while we do a
  12148. * reset. We know there are no other active users of
  12149. * the device since we are now in charge. Turn off
  12150. * off all outbound and inbound traffic and make sure
  12151. * the device does not generate any interrupts.
  12152. */
  12153. /* disable send contexts and SDMA engines */
  12154. write_csr(dd, SEND_CTRL, 0);
  12155. for (i = 0; i < dd->chip_send_contexts; i++)
  12156. write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
  12157. for (i = 0; i < dd->chip_sdma_engines; i++)
  12158. write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
  12159. /* disable port (turn off RXE inbound traffic) and contexts */
  12160. write_csr(dd, RCV_CTRL, 0);
  12161. for (i = 0; i < dd->chip_rcv_contexts; i++)
  12162. write_csr(dd, RCV_CTXT_CTRL, 0);
  12163. /* mask all interrupt sources */
  12164. for (i = 0; i < CCE_NUM_INT_CSRS; i++)
  12165. write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
  12166. /*
  12167. * DC Reset: do a full DC reset before the register clear.
  12168. * A recommended length of time to hold is one CSR read,
  12169. * so reread the CceDcCtrl. Then, hold the DC in reset
  12170. * across the clear.
  12171. */
  12172. write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
  12173. (void)read_csr(dd, CCE_DC_CTRL);
  12174. if (use_flr) {
  12175. /*
  12176. * A FLR will reset the SPC core and part of the PCIe.
  12177. * The parts that need to be restored have already been
  12178. * saved.
  12179. */
  12180. dd_dev_info(dd, "Resetting CSRs with FLR\n");
  12181. /* do the FLR, the DC reset will remain */
  12182. pcie_flr(dd->pcidev);
  12183. /* restore command and BARs */
  12184. restore_pci_variables(dd);
  12185. if (is_ax(dd)) {
  12186. dd_dev_info(dd, "Resetting CSRs with FLR\n");
  12187. pcie_flr(dd->pcidev);
  12188. restore_pci_variables(dd);
  12189. }
  12190. } else {
  12191. dd_dev_info(dd, "Resetting CSRs with writes\n");
  12192. reset_cce_csrs(dd);
  12193. reset_txe_csrs(dd);
  12194. reset_rxe_csrs(dd);
  12195. reset_misc_csrs(dd);
  12196. }
  12197. /* clear the DC reset */
  12198. write_csr(dd, CCE_DC_CTRL, 0);
  12199. /* Set the LED off */
  12200. setextled(dd, 0);
  12201. /*
  12202. * Clear the QSFP reset.
  12203. * An FLR enforces a 0 on all out pins. The driver does not touch
  12204. * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
  12205. * anything plugged constantly in reset, if it pays attention
  12206. * to RESET_N.
  12207. * Prime examples of this are optical cables. Set all pins high.
  12208. * I2CCLK and I2CDAT will change per direction, and INT_N and
  12209. * MODPRS_N are input only and their value is ignored.
  12210. */
  12211. write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
  12212. write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
  12213. init_chip_resources(dd);
  12214. }
  12215. static void init_early_variables(struct hfi1_devdata *dd)
  12216. {
  12217. int i;
  12218. /* assign link credit variables */
  12219. dd->vau = CM_VAU;
  12220. dd->link_credits = CM_GLOBAL_CREDITS;
  12221. if (is_ax(dd))
  12222. dd->link_credits--;
  12223. dd->vcu = cu_to_vcu(hfi1_cu);
  12224. /* enough room for 8 MAD packets plus header - 17K */
  12225. dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
  12226. if (dd->vl15_init > dd->link_credits)
  12227. dd->vl15_init = dd->link_credits;
  12228. write_uninitialized_csrs_and_memories(dd);
  12229. if (HFI1_CAP_IS_KSET(PKEY_CHECK))
  12230. for (i = 0; i < dd->num_pports; i++) {
  12231. struct hfi1_pportdata *ppd = &dd->pport[i];
  12232. set_partition_keys(ppd);
  12233. }
  12234. init_sc2vl_tables(dd);
  12235. }
  12236. static void init_kdeth_qp(struct hfi1_devdata *dd)
  12237. {
  12238. /* user changed the KDETH_QP */
  12239. if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
  12240. /* out of range or illegal value */
  12241. dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
  12242. kdeth_qp = 0;
  12243. }
  12244. if (kdeth_qp == 0) /* not set, or failed range check */
  12245. kdeth_qp = DEFAULT_KDETH_QP;
  12246. write_csr(dd, SEND_BTH_QP,
  12247. (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
  12248. SEND_BTH_QP_KDETH_QP_SHIFT);
  12249. write_csr(dd, RCV_BTH_QP,
  12250. (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
  12251. RCV_BTH_QP_KDETH_QP_SHIFT);
  12252. }
  12253. /**
  12254. * init_qpmap_table
  12255. * @dd - device data
  12256. * @first_ctxt - first context
  12257. * @last_ctxt - first context
  12258. *
  12259. * This return sets the qpn mapping table that
  12260. * is indexed by qpn[8:1].
  12261. *
  12262. * The routine will round robin the 256 settings
  12263. * from first_ctxt to last_ctxt.
  12264. *
  12265. * The first/last looks ahead to having specialized
  12266. * receive contexts for mgmt and bypass. Normal
  12267. * verbs traffic will assumed to be on a range
  12268. * of receive contexts.
  12269. */
  12270. static void init_qpmap_table(struct hfi1_devdata *dd,
  12271. u32 first_ctxt,
  12272. u32 last_ctxt)
  12273. {
  12274. u64 reg = 0;
  12275. u64 regno = RCV_QP_MAP_TABLE;
  12276. int i;
  12277. u64 ctxt = first_ctxt;
  12278. for (i = 0; i < 256; i++) {
  12279. reg |= ctxt << (8 * (i % 8));
  12280. ctxt++;
  12281. if (ctxt > last_ctxt)
  12282. ctxt = first_ctxt;
  12283. if (i % 8 == 7) {
  12284. write_csr(dd, regno, reg);
  12285. reg = 0;
  12286. regno += 8;
  12287. }
  12288. }
  12289. add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
  12290. | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
  12291. }
  12292. struct rsm_map_table {
  12293. u64 map[NUM_MAP_REGS];
  12294. unsigned int used;
  12295. };
  12296. struct rsm_rule_data {
  12297. u8 offset;
  12298. u8 pkt_type;
  12299. u32 field1_off;
  12300. u32 field2_off;
  12301. u32 index1_off;
  12302. u32 index1_width;
  12303. u32 index2_off;
  12304. u32 index2_width;
  12305. u32 mask1;
  12306. u32 value1;
  12307. u32 mask2;
  12308. u32 value2;
  12309. };
  12310. /*
  12311. * Return an initialized RMT map table for users to fill in. OK if it
  12312. * returns NULL, indicating no table.
  12313. */
  12314. static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
  12315. {
  12316. struct rsm_map_table *rmt;
  12317. u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
  12318. rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
  12319. if (rmt) {
  12320. memset(rmt->map, rxcontext, sizeof(rmt->map));
  12321. rmt->used = 0;
  12322. }
  12323. return rmt;
  12324. }
  12325. /*
  12326. * Write the final RMT map table to the chip and free the table. OK if
  12327. * table is NULL.
  12328. */
  12329. static void complete_rsm_map_table(struct hfi1_devdata *dd,
  12330. struct rsm_map_table *rmt)
  12331. {
  12332. int i;
  12333. if (rmt) {
  12334. /* write table to chip */
  12335. for (i = 0; i < NUM_MAP_REGS; i++)
  12336. write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
  12337. /* enable RSM */
  12338. add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
  12339. }
  12340. }
  12341. /*
  12342. * Add a receive side mapping rule.
  12343. */
  12344. static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
  12345. struct rsm_rule_data *rrd)
  12346. {
  12347. write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
  12348. (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
  12349. 1ull << rule_index | /* enable bit */
  12350. (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
  12351. write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
  12352. (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
  12353. (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
  12354. (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
  12355. (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
  12356. (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
  12357. (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
  12358. write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
  12359. (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
  12360. (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
  12361. (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
  12362. (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
  12363. }
  12364. /*
  12365. * Clear a receive side mapping rule.
  12366. */
  12367. static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
  12368. {
  12369. write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
  12370. write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
  12371. write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
  12372. }
  12373. /* return the number of RSM map table entries that will be used for QOS */
  12374. static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
  12375. unsigned int *np)
  12376. {
  12377. int i;
  12378. unsigned int m, n;
  12379. u8 max_by_vl = 0;
  12380. /* is QOS active at all? */
  12381. if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
  12382. num_vls == 1 ||
  12383. krcvqsset <= 1)
  12384. goto no_qos;
  12385. /* determine bits for qpn */
  12386. for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
  12387. if (krcvqs[i] > max_by_vl)
  12388. max_by_vl = krcvqs[i];
  12389. if (max_by_vl > 32)
  12390. goto no_qos;
  12391. m = ilog2(__roundup_pow_of_two(max_by_vl));
  12392. /* determine bits for vl */
  12393. n = ilog2(__roundup_pow_of_two(num_vls));
  12394. /* reject if too much is used */
  12395. if ((m + n) > 7)
  12396. goto no_qos;
  12397. if (mp)
  12398. *mp = m;
  12399. if (np)
  12400. *np = n;
  12401. return 1 << (m + n);
  12402. no_qos:
  12403. if (mp)
  12404. *mp = 0;
  12405. if (np)
  12406. *np = 0;
  12407. return 0;
  12408. }
  12409. /**
  12410. * init_qos - init RX qos
  12411. * @dd - device data
  12412. * @rmt - RSM map table
  12413. *
  12414. * This routine initializes Rule 0 and the RSM map table to implement
  12415. * quality of service (qos).
  12416. *
  12417. * If all of the limit tests succeed, qos is applied based on the array
  12418. * interpretation of krcvqs where entry 0 is VL0.
  12419. *
  12420. * The number of vl bits (n) and the number of qpn bits (m) are computed to
  12421. * feed both the RSM map table and the single rule.
  12422. */
  12423. static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
  12424. {
  12425. struct rsm_rule_data rrd;
  12426. unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
  12427. unsigned int rmt_entries;
  12428. u64 reg;
  12429. if (!rmt)
  12430. goto bail;
  12431. rmt_entries = qos_rmt_entries(dd, &m, &n);
  12432. if (rmt_entries == 0)
  12433. goto bail;
  12434. qpns_per_vl = 1 << m;
  12435. /* enough room in the map table? */
  12436. rmt_entries = 1 << (m + n);
  12437. if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
  12438. goto bail;
  12439. /* add qos entries to the the RSM map table */
  12440. for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
  12441. unsigned tctxt;
  12442. for (qpn = 0, tctxt = ctxt;
  12443. krcvqs[i] && qpn < qpns_per_vl; qpn++) {
  12444. unsigned idx, regoff, regidx;
  12445. /* generate the index the hardware will produce */
  12446. idx = rmt->used + ((qpn << n) ^ i);
  12447. regoff = (idx % 8) * 8;
  12448. regidx = idx / 8;
  12449. /* replace default with context number */
  12450. reg = rmt->map[regidx];
  12451. reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
  12452. << regoff);
  12453. reg |= (u64)(tctxt++) << regoff;
  12454. rmt->map[regidx] = reg;
  12455. if (tctxt == ctxt + krcvqs[i])
  12456. tctxt = ctxt;
  12457. }
  12458. ctxt += krcvqs[i];
  12459. }
  12460. rrd.offset = rmt->used;
  12461. rrd.pkt_type = 2;
  12462. rrd.field1_off = LRH_BTH_MATCH_OFFSET;
  12463. rrd.field2_off = LRH_SC_MATCH_OFFSET;
  12464. rrd.index1_off = LRH_SC_SELECT_OFFSET;
  12465. rrd.index1_width = n;
  12466. rrd.index2_off = QPN_SELECT_OFFSET;
  12467. rrd.index2_width = m + n;
  12468. rrd.mask1 = LRH_BTH_MASK;
  12469. rrd.value1 = LRH_BTH_VALUE;
  12470. rrd.mask2 = LRH_SC_MASK;
  12471. rrd.value2 = LRH_SC_VALUE;
  12472. /* add rule 0 */
  12473. add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
  12474. /* mark RSM map entries as used */
  12475. rmt->used += rmt_entries;
  12476. /* map everything else to the mcast/err/vl15 context */
  12477. init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
  12478. dd->qos_shift = n + 1;
  12479. return;
  12480. bail:
  12481. dd->qos_shift = 1;
  12482. init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
  12483. }
  12484. static void init_user_fecn_handling(struct hfi1_devdata *dd,
  12485. struct rsm_map_table *rmt)
  12486. {
  12487. struct rsm_rule_data rrd;
  12488. u64 reg;
  12489. int i, idx, regoff, regidx;
  12490. u8 offset;
  12491. /* there needs to be enough room in the map table */
  12492. if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
  12493. dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
  12494. return;
  12495. }
  12496. /*
  12497. * RSM will extract the destination context as an index into the
  12498. * map table. The destination contexts are a sequential block
  12499. * in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive).
  12500. * Map entries are accessed as offset + extracted value. Adjust
  12501. * the added offset so this sequence can be placed anywhere in
  12502. * the table - as long as the entries themselves do not wrap.
  12503. * There are only enough bits in offset for the table size, so
  12504. * start with that to allow for a "negative" offset.
  12505. */
  12506. offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
  12507. (int)dd->first_dyn_alloc_ctxt);
  12508. for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used;
  12509. i < dd->num_rcv_contexts; i++, idx++) {
  12510. /* replace with identity mapping */
  12511. regoff = (idx % 8) * 8;
  12512. regidx = idx / 8;
  12513. reg = rmt->map[regidx];
  12514. reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
  12515. reg |= (u64)i << regoff;
  12516. rmt->map[regidx] = reg;
  12517. }
  12518. /*
  12519. * For RSM intercept of Expected FECN packets:
  12520. * o packet type 0 - expected
  12521. * o match on F (bit 95), using select/match 1, and
  12522. * o match on SH (bit 133), using select/match 2.
  12523. *
  12524. * Use index 1 to extract the 8-bit receive context from DestQP
  12525. * (start at bit 64). Use that as the RSM map table index.
  12526. */
  12527. rrd.offset = offset;
  12528. rrd.pkt_type = 0;
  12529. rrd.field1_off = 95;
  12530. rrd.field2_off = 133;
  12531. rrd.index1_off = 64;
  12532. rrd.index1_width = 8;
  12533. rrd.index2_off = 0;
  12534. rrd.index2_width = 0;
  12535. rrd.mask1 = 1;
  12536. rrd.value1 = 1;
  12537. rrd.mask2 = 1;
  12538. rrd.value2 = 1;
  12539. /* add rule 1 */
  12540. add_rsm_rule(dd, RSM_INS_FECN, &rrd);
  12541. rmt->used += dd->num_user_contexts;
  12542. }
  12543. /* Initialize RSM for VNIC */
  12544. void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
  12545. {
  12546. u8 i, j;
  12547. u8 ctx_id = 0;
  12548. u64 reg;
  12549. u32 regoff;
  12550. struct rsm_rule_data rrd;
  12551. if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
  12552. dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
  12553. dd->vnic.rmt_start);
  12554. return;
  12555. }
  12556. dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
  12557. dd->vnic.rmt_start,
  12558. dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
  12559. /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
  12560. regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
  12561. reg = read_csr(dd, regoff);
  12562. for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
  12563. /* Update map register with vnic context */
  12564. j = (dd->vnic.rmt_start + i) % 8;
  12565. reg &= ~(0xffllu << (j * 8));
  12566. reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
  12567. /* Wrap up vnic ctx index */
  12568. ctx_id %= dd->vnic.num_ctxt;
  12569. /* Write back map register */
  12570. if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
  12571. dev_dbg(&(dd)->pcidev->dev,
  12572. "Vnic rsm map reg[%d] =0x%llx\n",
  12573. regoff - RCV_RSM_MAP_TABLE, reg);
  12574. write_csr(dd, regoff, reg);
  12575. regoff += 8;
  12576. if (i < (NUM_VNIC_MAP_ENTRIES - 1))
  12577. reg = read_csr(dd, regoff);
  12578. }
  12579. }
  12580. /* Add rule for vnic */
  12581. rrd.offset = dd->vnic.rmt_start;
  12582. rrd.pkt_type = 4;
  12583. /* Match 16B packets */
  12584. rrd.field1_off = L2_TYPE_MATCH_OFFSET;
  12585. rrd.mask1 = L2_TYPE_MASK;
  12586. rrd.value1 = L2_16B_VALUE;
  12587. /* Match ETH L4 packets */
  12588. rrd.field2_off = L4_TYPE_MATCH_OFFSET;
  12589. rrd.mask2 = L4_16B_TYPE_MASK;
  12590. rrd.value2 = L4_16B_ETH_VALUE;
  12591. /* Calc context from veswid and entropy */
  12592. rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
  12593. rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
  12594. rrd.index2_off = L2_16B_ENTROPY_OFFSET;
  12595. rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
  12596. add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
  12597. /* Enable RSM if not already enabled */
  12598. add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
  12599. }
  12600. void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
  12601. {
  12602. clear_rsm_rule(dd, RSM_INS_VNIC);
  12603. /* Disable RSM if used only by vnic */
  12604. if (dd->vnic.rmt_start == 0)
  12605. clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
  12606. }
  12607. static void init_rxe(struct hfi1_devdata *dd)
  12608. {
  12609. struct rsm_map_table *rmt;
  12610. /* enable all receive errors */
  12611. write_csr(dd, RCV_ERR_MASK, ~0ull);
  12612. rmt = alloc_rsm_map_table(dd);
  12613. /* set up QOS, including the QPN map table */
  12614. init_qos(dd, rmt);
  12615. init_user_fecn_handling(dd, rmt);
  12616. complete_rsm_map_table(dd, rmt);
  12617. /* record number of used rsm map entries for vnic */
  12618. dd->vnic.rmt_start = rmt->used;
  12619. kfree(rmt);
  12620. /*
  12621. * make sure RcvCtrl.RcvWcb <= PCIe Device Control
  12622. * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
  12623. * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
  12624. * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
  12625. * Max_PayLoad_Size set to its minimum of 128.
  12626. *
  12627. * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
  12628. * (64 bytes). Max_Payload_Size is possibly modified upward in
  12629. * tune_pcie_caps() which is called after this routine.
  12630. */
  12631. }
  12632. static void init_other(struct hfi1_devdata *dd)
  12633. {
  12634. /* enable all CCE errors */
  12635. write_csr(dd, CCE_ERR_MASK, ~0ull);
  12636. /* enable *some* Misc errors */
  12637. write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
  12638. /* enable all DC errors, except LCB */
  12639. write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
  12640. write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
  12641. }
  12642. /*
  12643. * Fill out the given AU table using the given CU. A CU is defined in terms
  12644. * AUs. The table is a an encoding: given the index, how many AUs does that
  12645. * represent?
  12646. *
  12647. * NOTE: Assumes that the register layout is the same for the
  12648. * local and remote tables.
  12649. */
  12650. static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
  12651. u32 csr0to3, u32 csr4to7)
  12652. {
  12653. write_csr(dd, csr0to3,
  12654. 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
  12655. 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
  12656. 2ull * cu <<
  12657. SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
  12658. 4ull * cu <<
  12659. SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
  12660. write_csr(dd, csr4to7,
  12661. 8ull * cu <<
  12662. SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
  12663. 16ull * cu <<
  12664. SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
  12665. 32ull * cu <<
  12666. SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
  12667. 64ull * cu <<
  12668. SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
  12669. }
  12670. static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
  12671. {
  12672. assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
  12673. SEND_CM_LOCAL_AU_TABLE4_TO7);
  12674. }
  12675. void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
  12676. {
  12677. assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
  12678. SEND_CM_REMOTE_AU_TABLE4_TO7);
  12679. }
  12680. static void init_txe(struct hfi1_devdata *dd)
  12681. {
  12682. int i;
  12683. /* enable all PIO, SDMA, general, and Egress errors */
  12684. write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
  12685. write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
  12686. write_csr(dd, SEND_ERR_MASK, ~0ull);
  12687. write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
  12688. /* enable all per-context and per-SDMA engine errors */
  12689. for (i = 0; i < dd->chip_send_contexts; i++)
  12690. write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
  12691. for (i = 0; i < dd->chip_sdma_engines; i++)
  12692. write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
  12693. /* set the local CU to AU mapping */
  12694. assign_local_cm_au_table(dd, dd->vcu);
  12695. /*
  12696. * Set reasonable default for Credit Return Timer
  12697. * Don't set on Simulator - causes it to choke.
  12698. */
  12699. if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
  12700. write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
  12701. }
  12702. int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
  12703. {
  12704. struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
  12705. unsigned sctxt;
  12706. int ret = 0;
  12707. u64 reg;
  12708. if (!rcd || !rcd->sc) {
  12709. ret = -EINVAL;
  12710. goto done;
  12711. }
  12712. sctxt = rcd->sc->hw_context;
  12713. reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
  12714. ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
  12715. SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
  12716. /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
  12717. if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
  12718. reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
  12719. write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
  12720. /*
  12721. * Enable send-side J_KEY integrity check, unless this is A0 h/w
  12722. */
  12723. if (!is_ax(dd)) {
  12724. reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
  12725. reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
  12726. write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
  12727. }
  12728. /* Enable J_KEY check on receive context. */
  12729. reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
  12730. ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
  12731. RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
  12732. write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
  12733. done:
  12734. return ret;
  12735. }
  12736. int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
  12737. {
  12738. struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
  12739. unsigned sctxt;
  12740. int ret = 0;
  12741. u64 reg;
  12742. if (!rcd || !rcd->sc) {
  12743. ret = -EINVAL;
  12744. goto done;
  12745. }
  12746. sctxt = rcd->sc->hw_context;
  12747. write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
  12748. /*
  12749. * Disable send-side J_KEY integrity check, unless this is A0 h/w.
  12750. * This check would not have been enabled for A0 h/w, see
  12751. * set_ctxt_jkey().
  12752. */
  12753. if (!is_ax(dd)) {
  12754. reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
  12755. reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
  12756. write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
  12757. }
  12758. /* Turn off the J_KEY on the receive side */
  12759. write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
  12760. done:
  12761. return ret;
  12762. }
  12763. int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
  12764. {
  12765. struct hfi1_ctxtdata *rcd;
  12766. unsigned sctxt;
  12767. int ret = 0;
  12768. u64 reg;
  12769. if (ctxt < dd->num_rcv_contexts) {
  12770. rcd = dd->rcd[ctxt];
  12771. } else {
  12772. ret = -EINVAL;
  12773. goto done;
  12774. }
  12775. if (!rcd || !rcd->sc) {
  12776. ret = -EINVAL;
  12777. goto done;
  12778. }
  12779. sctxt = rcd->sc->hw_context;
  12780. reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
  12781. SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
  12782. write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
  12783. reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
  12784. reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
  12785. reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
  12786. write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
  12787. done:
  12788. return ret;
  12789. }
  12790. int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
  12791. {
  12792. u8 hw_ctxt;
  12793. u64 reg;
  12794. if (!ctxt || !ctxt->sc)
  12795. return -EINVAL;
  12796. if (ctxt->ctxt >= dd->num_rcv_contexts)
  12797. return -EINVAL;
  12798. hw_ctxt = ctxt->sc->hw_context;
  12799. reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
  12800. reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
  12801. write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
  12802. write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
  12803. return 0;
  12804. }
  12805. /*
  12806. * Start doing the clean up the the chip. Our clean up happens in multiple
  12807. * stages and this is just the first.
  12808. */
  12809. void hfi1_start_cleanup(struct hfi1_devdata *dd)
  12810. {
  12811. aspm_exit(dd);
  12812. free_cntrs(dd);
  12813. free_rcverr(dd);
  12814. clean_up_interrupts(dd);
  12815. finish_chip_resources(dd);
  12816. }
  12817. #define HFI_BASE_GUID(dev) \
  12818. ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
  12819. /*
  12820. * Information can be shared between the two HFIs on the same ASIC
  12821. * in the same OS. This function finds the peer device and sets
  12822. * up a shared structure.
  12823. */
  12824. static int init_asic_data(struct hfi1_devdata *dd)
  12825. {
  12826. unsigned long flags;
  12827. struct hfi1_devdata *tmp, *peer = NULL;
  12828. struct hfi1_asic_data *asic_data;
  12829. int ret = 0;
  12830. /* pre-allocate the asic structure in case we are the first device */
  12831. asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
  12832. if (!asic_data)
  12833. return -ENOMEM;
  12834. spin_lock_irqsave(&hfi1_devs_lock, flags);
  12835. /* Find our peer device */
  12836. list_for_each_entry(tmp, &hfi1_dev_list, list) {
  12837. if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
  12838. dd->unit != tmp->unit) {
  12839. peer = tmp;
  12840. break;
  12841. }
  12842. }
  12843. if (peer) {
  12844. /* use already allocated structure */
  12845. dd->asic_data = peer->asic_data;
  12846. kfree(asic_data);
  12847. } else {
  12848. dd->asic_data = asic_data;
  12849. mutex_init(&dd->asic_data->asic_resource_mutex);
  12850. }
  12851. dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
  12852. spin_unlock_irqrestore(&hfi1_devs_lock, flags);
  12853. /* first one through - set up i2c devices */
  12854. if (!peer)
  12855. ret = set_up_i2c(dd, dd->asic_data);
  12856. return ret;
  12857. }
  12858. /*
  12859. * Set dd->boardname. Use a generic name if a name is not returned from
  12860. * EFI variable space.
  12861. *
  12862. * Return 0 on success, -ENOMEM if space could not be allocated.
  12863. */
  12864. static int obtain_boardname(struct hfi1_devdata *dd)
  12865. {
  12866. /* generic board description */
  12867. const char generic[] =
  12868. "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
  12869. unsigned long size;
  12870. int ret;
  12871. ret = read_hfi1_efi_var(dd, "description", &size,
  12872. (void **)&dd->boardname);
  12873. if (ret) {
  12874. dd_dev_info(dd, "Board description not found\n");
  12875. /* use generic description */
  12876. dd->boardname = kstrdup(generic, GFP_KERNEL);
  12877. if (!dd->boardname)
  12878. return -ENOMEM;
  12879. }
  12880. return 0;
  12881. }
  12882. /*
  12883. * Check the interrupt registers to make sure that they are mapped correctly.
  12884. * It is intended to help user identify any mismapping by VMM when the driver
  12885. * is running in a VM. This function should only be called before interrupt
  12886. * is set up properly.
  12887. *
  12888. * Return 0 on success, -EINVAL on failure.
  12889. */
  12890. static int check_int_registers(struct hfi1_devdata *dd)
  12891. {
  12892. u64 reg;
  12893. u64 all_bits = ~(u64)0;
  12894. u64 mask;
  12895. /* Clear CceIntMask[0] to avoid raising any interrupts */
  12896. mask = read_csr(dd, CCE_INT_MASK);
  12897. write_csr(dd, CCE_INT_MASK, 0ull);
  12898. reg = read_csr(dd, CCE_INT_MASK);
  12899. if (reg)
  12900. goto err_exit;
  12901. /* Clear all interrupt status bits */
  12902. write_csr(dd, CCE_INT_CLEAR, all_bits);
  12903. reg = read_csr(dd, CCE_INT_STATUS);
  12904. if (reg)
  12905. goto err_exit;
  12906. /* Set all interrupt status bits */
  12907. write_csr(dd, CCE_INT_FORCE, all_bits);
  12908. reg = read_csr(dd, CCE_INT_STATUS);
  12909. if (reg != all_bits)
  12910. goto err_exit;
  12911. /* Restore the interrupt mask */
  12912. write_csr(dd, CCE_INT_CLEAR, all_bits);
  12913. write_csr(dd, CCE_INT_MASK, mask);
  12914. return 0;
  12915. err_exit:
  12916. write_csr(dd, CCE_INT_MASK, mask);
  12917. dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
  12918. return -EINVAL;
  12919. }
  12920. /**
  12921. * Allocate and initialize the device structure for the hfi.
  12922. * @dev: the pci_dev for hfi1_ib device
  12923. * @ent: pci_device_id struct for this dev
  12924. *
  12925. * Also allocates, initializes, and returns the devdata struct for this
  12926. * device instance
  12927. *
  12928. * This is global, and is called directly at init to set up the
  12929. * chip-specific function pointers for later use.
  12930. */
  12931. struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
  12932. const struct pci_device_id *ent)
  12933. {
  12934. struct hfi1_devdata *dd;
  12935. struct hfi1_pportdata *ppd;
  12936. u64 reg;
  12937. int i, ret;
  12938. static const char * const inames[] = { /* implementation names */
  12939. "RTL silicon",
  12940. "RTL VCS simulation",
  12941. "RTL FPGA emulation",
  12942. "Functional simulator"
  12943. };
  12944. struct pci_dev *parent = pdev->bus->self;
  12945. dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
  12946. sizeof(struct hfi1_pportdata));
  12947. if (IS_ERR(dd))
  12948. goto bail;
  12949. ppd = dd->pport;
  12950. for (i = 0; i < dd->num_pports; i++, ppd++) {
  12951. int vl;
  12952. /* init common fields */
  12953. hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
  12954. /* DC supports 4 link widths */
  12955. ppd->link_width_supported =
  12956. OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
  12957. OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
  12958. ppd->link_width_downgrade_supported =
  12959. ppd->link_width_supported;
  12960. /* start out enabling only 4X */
  12961. ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
  12962. ppd->link_width_downgrade_enabled =
  12963. ppd->link_width_downgrade_supported;
  12964. /* link width active is 0 when link is down */
  12965. /* link width downgrade active is 0 when link is down */
  12966. if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
  12967. num_vls > HFI1_MAX_VLS_SUPPORTED) {
  12968. hfi1_early_err(&pdev->dev,
  12969. "Invalid num_vls %u, using %u VLs\n",
  12970. num_vls, HFI1_MAX_VLS_SUPPORTED);
  12971. num_vls = HFI1_MAX_VLS_SUPPORTED;
  12972. }
  12973. ppd->vls_supported = num_vls;
  12974. ppd->vls_operational = ppd->vls_supported;
  12975. ppd->actual_vls_operational = ppd->vls_supported;
  12976. /* Set the default MTU. */
  12977. for (vl = 0; vl < num_vls; vl++)
  12978. dd->vld[vl].mtu = hfi1_max_mtu;
  12979. dd->vld[15].mtu = MAX_MAD_PACKET;
  12980. /*
  12981. * Set the initial values to reasonable default, will be set
  12982. * for real when link is up.
  12983. */
  12984. ppd->lstate = IB_PORT_DOWN;
  12985. ppd->overrun_threshold = 0x4;
  12986. ppd->phy_error_threshold = 0xf;
  12987. ppd->port_crc_mode_enabled = link_crc_mask;
  12988. /* initialize supported LTP CRC mode */
  12989. ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
  12990. /* initialize enabled LTP CRC mode */
  12991. ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
  12992. /* start in offline */
  12993. ppd->host_link_state = HLS_DN_OFFLINE;
  12994. init_vl_arb_caches(ppd);
  12995. ppd->last_pstate = 0xff; /* invalid value */
  12996. }
  12997. dd->link_default = HLS_DN_POLL;
  12998. /*
  12999. * Do remaining PCIe setup and save PCIe values in dd.
  13000. * Any error printing is already done by the init code.
  13001. * On return, we have the chip mapped.
  13002. */
  13003. ret = hfi1_pcie_ddinit(dd, pdev);
  13004. if (ret < 0)
  13005. goto bail_free;
  13006. /* verify that reads actually work, save revision for reset check */
  13007. dd->revision = read_csr(dd, CCE_REVISION);
  13008. if (dd->revision == ~(u64)0) {
  13009. dd_dev_err(dd, "cannot read chip CSRs\n");
  13010. ret = -EINVAL;
  13011. goto bail_cleanup;
  13012. }
  13013. dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
  13014. & CCE_REVISION_CHIP_REV_MAJOR_MASK;
  13015. dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
  13016. & CCE_REVISION_CHIP_REV_MINOR_MASK;
  13017. /*
  13018. * Check interrupt registers mapping if the driver has no access to
  13019. * the upstream component. In this case, it is likely that the driver
  13020. * is running in a VM.
  13021. */
  13022. if (!parent) {
  13023. ret = check_int_registers(dd);
  13024. if (ret)
  13025. goto bail_cleanup;
  13026. }
  13027. /*
  13028. * obtain the hardware ID - NOT related to unit, which is a
  13029. * software enumeration
  13030. */
  13031. reg = read_csr(dd, CCE_REVISION2);
  13032. dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
  13033. & CCE_REVISION2_HFI_ID_MASK;
  13034. /* the variable size will remove unwanted bits */
  13035. dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
  13036. dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
  13037. dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
  13038. dd->icode < ARRAY_SIZE(inames) ?
  13039. inames[dd->icode] : "unknown", (int)dd->irev);
  13040. /* speeds the hardware can support */
  13041. dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
  13042. /* speeds allowed to run at */
  13043. dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
  13044. /* give a reasonable active value, will be set on link up */
  13045. dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
  13046. dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
  13047. dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
  13048. dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
  13049. dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
  13050. dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
  13051. /* fix up link widths for emulation _p */
  13052. ppd = dd->pport;
  13053. if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
  13054. ppd->link_width_supported =
  13055. ppd->link_width_enabled =
  13056. ppd->link_width_downgrade_supported =
  13057. ppd->link_width_downgrade_enabled =
  13058. OPA_LINK_WIDTH_1X;
  13059. }
  13060. /* insure num_vls isn't larger than number of sdma engines */
  13061. if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
  13062. dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
  13063. num_vls, dd->chip_sdma_engines);
  13064. num_vls = dd->chip_sdma_engines;
  13065. ppd->vls_supported = dd->chip_sdma_engines;
  13066. ppd->vls_operational = ppd->vls_supported;
  13067. }
  13068. /*
  13069. * Convert the ns parameter to the 64 * cclocks used in the CSR.
  13070. * Limit the max if larger than the field holds. If timeout is
  13071. * non-zero, then the calculated field will be at least 1.
  13072. *
  13073. * Must be after icode is set up - the cclock rate depends
  13074. * on knowing the hardware being used.
  13075. */
  13076. dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
  13077. if (dd->rcv_intr_timeout_csr >
  13078. RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
  13079. dd->rcv_intr_timeout_csr =
  13080. RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
  13081. else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
  13082. dd->rcv_intr_timeout_csr = 1;
  13083. /* needs to be done before we look for the peer device */
  13084. read_guid(dd);
  13085. /* set up shared ASIC data with peer device */
  13086. ret = init_asic_data(dd);
  13087. if (ret)
  13088. goto bail_cleanup;
  13089. /* obtain chip sizes, reset chip CSRs */
  13090. init_chip(dd);
  13091. /* read in the PCIe link speed information */
  13092. ret = pcie_speeds(dd);
  13093. if (ret)
  13094. goto bail_cleanup;
  13095. /* call before get_platform_config(), after init_chip_resources() */
  13096. ret = eprom_init(dd);
  13097. if (ret)
  13098. goto bail_free_rcverr;
  13099. /* Needs to be called before hfi1_firmware_init */
  13100. get_platform_config(dd);
  13101. /* read in firmware */
  13102. ret = hfi1_firmware_init(dd);
  13103. if (ret)
  13104. goto bail_cleanup;
  13105. /*
  13106. * In general, the PCIe Gen3 transition must occur after the
  13107. * chip has been idled (so it won't initiate any PCIe transactions
  13108. * e.g. an interrupt) and before the driver changes any registers
  13109. * (the transition will reset the registers).
  13110. *
  13111. * In particular, place this call after:
  13112. * - init_chip() - the chip will not initiate any PCIe transactions
  13113. * - pcie_speeds() - reads the current link speed
  13114. * - hfi1_firmware_init() - the needed firmware is ready to be
  13115. * downloaded
  13116. */
  13117. ret = do_pcie_gen3_transition(dd);
  13118. if (ret)
  13119. goto bail_cleanup;
  13120. /* start setting dd values and adjusting CSRs */
  13121. init_early_variables(dd);
  13122. parse_platform_config(dd);
  13123. ret = obtain_boardname(dd);
  13124. if (ret)
  13125. goto bail_cleanup;
  13126. snprintf(dd->boardversion, BOARD_VERS_MAX,
  13127. "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
  13128. HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
  13129. (u32)dd->majrev,
  13130. (u32)dd->minrev,
  13131. (dd->revision >> CCE_REVISION_SW_SHIFT)
  13132. & CCE_REVISION_SW_MASK);
  13133. ret = set_up_context_variables(dd);
  13134. if (ret)
  13135. goto bail_cleanup;
  13136. /* set initial RXE CSRs */
  13137. init_rxe(dd);
  13138. /* set initial TXE CSRs */
  13139. init_txe(dd);
  13140. /* set initial non-RXE, non-TXE CSRs */
  13141. init_other(dd);
  13142. /* set up KDETH QP prefix in both RX and TX CSRs */
  13143. init_kdeth_qp(dd);
  13144. ret = hfi1_dev_affinity_init(dd);
  13145. if (ret)
  13146. goto bail_cleanup;
  13147. /* send contexts must be set up before receive contexts */
  13148. ret = init_send_contexts(dd);
  13149. if (ret)
  13150. goto bail_cleanup;
  13151. ret = hfi1_create_ctxts(dd);
  13152. if (ret)
  13153. goto bail_cleanup;
  13154. dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
  13155. /*
  13156. * rcd[0] is guaranteed to be valid by this point. Also, all
  13157. * context are using the same value, as per the module parameter.
  13158. */
  13159. dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
  13160. ret = init_pervl_scs(dd);
  13161. if (ret)
  13162. goto bail_cleanup;
  13163. /* sdma init */
  13164. for (i = 0; i < dd->num_pports; ++i) {
  13165. ret = sdma_init(dd, i);
  13166. if (ret)
  13167. goto bail_cleanup;
  13168. }
  13169. /* use contexts created by hfi1_create_ctxts */
  13170. ret = set_up_interrupts(dd);
  13171. if (ret)
  13172. goto bail_cleanup;
  13173. /* set up LCB access - must be after set_up_interrupts() */
  13174. init_lcb_access(dd);
  13175. /*
  13176. * Serial number is created from the base guid:
  13177. * [27:24] = base guid [38:35]
  13178. * [23: 0] = base guid [23: 0]
  13179. */
  13180. snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
  13181. (dd->base_guid & 0xFFFFFF) |
  13182. ((dd->base_guid >> 11) & 0xF000000));
  13183. dd->oui1 = dd->base_guid >> 56 & 0xFF;
  13184. dd->oui2 = dd->base_guid >> 48 & 0xFF;
  13185. dd->oui3 = dd->base_guid >> 40 & 0xFF;
  13186. ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
  13187. if (ret)
  13188. goto bail_clear_intr;
  13189. thermal_init(dd);
  13190. ret = init_cntrs(dd);
  13191. if (ret)
  13192. goto bail_clear_intr;
  13193. ret = init_rcverr(dd);
  13194. if (ret)
  13195. goto bail_free_cntrs;
  13196. init_completion(&dd->user_comp);
  13197. /* The user refcount starts with one to inidicate an active device */
  13198. atomic_set(&dd->user_refcount, 1);
  13199. goto bail;
  13200. bail_free_rcverr:
  13201. free_rcverr(dd);
  13202. bail_free_cntrs:
  13203. free_cntrs(dd);
  13204. bail_clear_intr:
  13205. clean_up_interrupts(dd);
  13206. bail_cleanup:
  13207. hfi1_pcie_ddcleanup(dd);
  13208. bail_free:
  13209. hfi1_free_devdata(dd);
  13210. dd = ERR_PTR(ret);
  13211. bail:
  13212. return dd;
  13213. }
  13214. static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
  13215. u32 dw_len)
  13216. {
  13217. u32 delta_cycles;
  13218. u32 current_egress_rate = ppd->current_egress_rate;
  13219. /* rates here are in units of 10^6 bits/sec */
  13220. if (desired_egress_rate == -1)
  13221. return 0; /* shouldn't happen */
  13222. if (desired_egress_rate >= current_egress_rate)
  13223. return 0; /* we can't help go faster, only slower */
  13224. delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
  13225. egress_cycles(dw_len * 4, current_egress_rate);
  13226. return (u16)delta_cycles;
  13227. }
  13228. /**
  13229. * create_pbc - build a pbc for transmission
  13230. * @flags: special case flags or-ed in built pbc
  13231. * @srate: static rate
  13232. * @vl: vl
  13233. * @dwlen: dword length (header words + data words + pbc words)
  13234. *
  13235. * Create a PBC with the given flags, rate, VL, and length.
  13236. *
  13237. * NOTE: The PBC created will not insert any HCRC - all callers but one are
  13238. * for verbs, which does not use this PSM feature. The lone other caller
  13239. * is for the diagnostic interface which calls this if the user does not
  13240. * supply their own PBC.
  13241. */
  13242. u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
  13243. u32 dw_len)
  13244. {
  13245. u64 pbc, delay = 0;
  13246. if (unlikely(srate_mbs))
  13247. delay = delay_cycles(ppd, srate_mbs, dw_len);
  13248. pbc = flags
  13249. | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
  13250. | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
  13251. | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
  13252. | (dw_len & PBC_LENGTH_DWS_MASK)
  13253. << PBC_LENGTH_DWS_SHIFT;
  13254. return pbc;
  13255. }
  13256. #define SBUS_THERMAL 0x4f
  13257. #define SBUS_THERM_MONITOR_MODE 0x1
  13258. #define THERM_FAILURE(dev, ret, reason) \
  13259. dd_dev_err((dd), \
  13260. "Thermal sensor initialization failed: %s (%d)\n", \
  13261. (reason), (ret))
  13262. /*
  13263. * Initialize the thermal sensor.
  13264. *
  13265. * After initialization, enable polling of thermal sensor through
  13266. * SBus interface. In order for this to work, the SBus Master
  13267. * firmware has to be loaded due to the fact that the HW polling
  13268. * logic uses SBus interrupts, which are not supported with
  13269. * default firmware. Otherwise, no data will be returned through
  13270. * the ASIC_STS_THERM CSR.
  13271. */
  13272. static int thermal_init(struct hfi1_devdata *dd)
  13273. {
  13274. int ret = 0;
  13275. if (dd->icode != ICODE_RTL_SILICON ||
  13276. check_chip_resource(dd, CR_THERM_INIT, NULL))
  13277. return ret;
  13278. ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
  13279. if (ret) {
  13280. THERM_FAILURE(dd, ret, "Acquire SBus");
  13281. return ret;
  13282. }
  13283. dd_dev_info(dd, "Initializing thermal sensor\n");
  13284. /* Disable polling of thermal readings */
  13285. write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
  13286. msleep(100);
  13287. /* Thermal Sensor Initialization */
  13288. /* Step 1: Reset the Thermal SBus Receiver */
  13289. ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
  13290. RESET_SBUS_RECEIVER, 0);
  13291. if (ret) {
  13292. THERM_FAILURE(dd, ret, "Bus Reset");
  13293. goto done;
  13294. }
  13295. /* Step 2: Set Reset bit in Thermal block */
  13296. ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
  13297. WRITE_SBUS_RECEIVER, 0x1);
  13298. if (ret) {
  13299. THERM_FAILURE(dd, ret, "Therm Block Reset");
  13300. goto done;
  13301. }
  13302. /* Step 3: Write clock divider value (100MHz -> 2MHz) */
  13303. ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
  13304. WRITE_SBUS_RECEIVER, 0x32);
  13305. if (ret) {
  13306. THERM_FAILURE(dd, ret, "Write Clock Div");
  13307. goto done;
  13308. }
  13309. /* Step 4: Select temperature mode */
  13310. ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
  13311. WRITE_SBUS_RECEIVER,
  13312. SBUS_THERM_MONITOR_MODE);
  13313. if (ret) {
  13314. THERM_FAILURE(dd, ret, "Write Mode Sel");
  13315. goto done;
  13316. }
  13317. /* Step 5: De-assert block reset and start conversion */
  13318. ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
  13319. WRITE_SBUS_RECEIVER, 0x2);
  13320. if (ret) {
  13321. THERM_FAILURE(dd, ret, "Write Reset Deassert");
  13322. goto done;
  13323. }
  13324. /* Step 5.1: Wait for first conversion (21.5ms per spec) */
  13325. msleep(22);
  13326. /* Enable polling of thermal readings */
  13327. write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
  13328. /* Set initialized flag */
  13329. ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
  13330. if (ret)
  13331. THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
  13332. done:
  13333. release_chip_resource(dd, CR_SBUS);
  13334. return ret;
  13335. }
  13336. static void handle_temp_err(struct hfi1_devdata *dd)
  13337. {
  13338. struct hfi1_pportdata *ppd = &dd->pport[0];
  13339. /*
  13340. * Thermal Critical Interrupt
  13341. * Put the device into forced freeze mode, take link down to
  13342. * offline, and put DC into reset.
  13343. */
  13344. dd_dev_emerg(dd,
  13345. "Critical temperature reached! Forcing device into freeze mode!\n");
  13346. dd->flags |= HFI1_FORCED_FREEZE;
  13347. start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
  13348. /*
  13349. * Shut DC down as much and as quickly as possible.
  13350. *
  13351. * Step 1: Take the link down to OFFLINE. This will cause the
  13352. * 8051 to put the Serdes in reset. However, we don't want to
  13353. * go through the entire link state machine since we want to
  13354. * shutdown ASAP. Furthermore, this is not a graceful shutdown
  13355. * but rather an attempt to save the chip.
  13356. * Code below is almost the same as quiet_serdes() but avoids
  13357. * all the extra work and the sleeps.
  13358. */
  13359. ppd->driver_link_ready = 0;
  13360. ppd->link_enabled = 0;
  13361. set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
  13362. PLS_OFFLINE);
  13363. /*
  13364. * Step 2: Shutdown LCB and 8051
  13365. * After shutdown, do not restore DC_CFG_RESET value.
  13366. */
  13367. dc_shutdown(dd);
  13368. }