i915_gem.c 164 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177
  1. /*
  2. * Copyright © 2008-2015 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. *
  26. */
  27. #include <drm/drmP.h>
  28. #include <drm/drm_vma_manager.h>
  29. #include <drm/i915_drm.h>
  30. #include "i915_drv.h"
  31. #include "i915_gem_clflush.h"
  32. #include "i915_vgpu.h"
  33. #include "i915_trace.h"
  34. #include "intel_drv.h"
  35. #include "intel_frontbuffer.h"
  36. #include "intel_mocs.h"
  37. #include "intel_workarounds.h"
  38. #include "i915_gemfs.h"
  39. #include <linux/dma-fence-array.h>
  40. #include <linux/kthread.h>
  41. #include <linux/reservation.h>
  42. #include <linux/shmem_fs.h>
  43. #include <linux/slab.h>
  44. #include <linux/stop_machine.h>
  45. #include <linux/swap.h>
  46. #include <linux/pci.h>
  47. #include <linux/dma-buf.h>
  48. static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
  49. static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
  50. {
  51. if (obj->cache_dirty)
  52. return false;
  53. if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
  54. return true;
  55. return obj->pin_global; /* currently in use by HW, keep flushed */
  56. }
  57. static int
  58. insert_mappable_node(struct i915_ggtt *ggtt,
  59. struct drm_mm_node *node, u32 size)
  60. {
  61. memset(node, 0, sizeof(*node));
  62. return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
  63. size, 0, I915_COLOR_UNEVICTABLE,
  64. 0, ggtt->mappable_end,
  65. DRM_MM_INSERT_LOW);
  66. }
  67. static void
  68. remove_mappable_node(struct drm_mm_node *node)
  69. {
  70. drm_mm_remove_node(node);
  71. }
  72. /* some bookkeeping */
  73. static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
  74. u64 size)
  75. {
  76. spin_lock(&dev_priv->mm.object_stat_lock);
  77. dev_priv->mm.object_count++;
  78. dev_priv->mm.object_memory += size;
  79. spin_unlock(&dev_priv->mm.object_stat_lock);
  80. }
  81. static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
  82. u64 size)
  83. {
  84. spin_lock(&dev_priv->mm.object_stat_lock);
  85. dev_priv->mm.object_count--;
  86. dev_priv->mm.object_memory -= size;
  87. spin_unlock(&dev_priv->mm.object_stat_lock);
  88. }
  89. static int
  90. i915_gem_wait_for_error(struct i915_gpu_error *error)
  91. {
  92. int ret;
  93. might_sleep();
  94. /*
  95. * Only wait 10 seconds for the gpu reset to complete to avoid hanging
  96. * userspace. If it takes that long something really bad is going on and
  97. * we should simply try to bail out and fail as gracefully as possible.
  98. */
  99. ret = wait_event_interruptible_timeout(error->reset_queue,
  100. !i915_reset_backoff(error),
  101. I915_RESET_TIMEOUT);
  102. if (ret == 0) {
  103. DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
  104. return -EIO;
  105. } else if (ret < 0) {
  106. return ret;
  107. } else {
  108. return 0;
  109. }
  110. }
  111. int i915_mutex_lock_interruptible(struct drm_device *dev)
  112. {
  113. struct drm_i915_private *dev_priv = to_i915(dev);
  114. int ret;
  115. ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
  116. if (ret)
  117. return ret;
  118. ret = mutex_lock_interruptible(&dev->struct_mutex);
  119. if (ret)
  120. return ret;
  121. return 0;
  122. }
  123. static u32 __i915_gem_park(struct drm_i915_private *i915)
  124. {
  125. GEM_TRACE("\n");
  126. lockdep_assert_held(&i915->drm.struct_mutex);
  127. GEM_BUG_ON(i915->gt.active_requests);
  128. GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
  129. if (!i915->gt.awake)
  130. return I915_EPOCH_INVALID;
  131. GEM_BUG_ON(i915->gt.epoch == I915_EPOCH_INVALID);
  132. /*
  133. * Be paranoid and flush a concurrent interrupt to make sure
  134. * we don't reactivate any irq tasklets after parking.
  135. *
  136. * FIXME: Note that even though we have waited for execlists to be idle,
  137. * there may still be an in-flight interrupt even though the CSB
  138. * is now empty. synchronize_irq() makes sure that a residual interrupt
  139. * is completed before we continue, but it doesn't prevent the HW from
  140. * raising a spurious interrupt later. To complete the shield we should
  141. * coordinate disabling the CS irq with flushing the interrupts.
  142. */
  143. synchronize_irq(i915->drm.irq);
  144. intel_engines_park(i915);
  145. i915_timelines_park(i915);
  146. i915_pmu_gt_parked(i915);
  147. i915_vma_parked(i915);
  148. i915->gt.awake = false;
  149. if (INTEL_GEN(i915) >= 6)
  150. gen6_rps_idle(i915);
  151. intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ);
  152. intel_runtime_pm_put(i915);
  153. return i915->gt.epoch;
  154. }
  155. void i915_gem_park(struct drm_i915_private *i915)
  156. {
  157. GEM_TRACE("\n");
  158. lockdep_assert_held(&i915->drm.struct_mutex);
  159. GEM_BUG_ON(i915->gt.active_requests);
  160. if (!i915->gt.awake)
  161. return;
  162. /* Defer the actual call to __i915_gem_park() to prevent ping-pongs */
  163. mod_delayed_work(i915->wq, &i915->gt.idle_work, msecs_to_jiffies(100));
  164. }
  165. void i915_gem_unpark(struct drm_i915_private *i915)
  166. {
  167. GEM_TRACE("\n");
  168. lockdep_assert_held(&i915->drm.struct_mutex);
  169. GEM_BUG_ON(!i915->gt.active_requests);
  170. if (i915->gt.awake)
  171. return;
  172. intel_runtime_pm_get_noresume(i915);
  173. /*
  174. * It seems that the DMC likes to transition between the DC states a lot
  175. * when there are no connected displays (no active power domains) during
  176. * command submission.
  177. *
  178. * This activity has negative impact on the performance of the chip with
  179. * huge latencies observed in the interrupt handler and elsewhere.
  180. *
  181. * Work around it by grabbing a GT IRQ power domain whilst there is any
  182. * GT activity, preventing any DC state transitions.
  183. */
  184. intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
  185. i915->gt.awake = true;
  186. if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
  187. i915->gt.epoch = 1;
  188. intel_enable_gt_powersave(i915);
  189. i915_update_gfx_val(i915);
  190. if (INTEL_GEN(i915) >= 6)
  191. gen6_rps_busy(i915);
  192. i915_pmu_gt_unparked(i915);
  193. intel_engines_unpark(i915);
  194. i915_queue_hangcheck(i915);
  195. queue_delayed_work(i915->wq,
  196. &i915->gt.retire_work,
  197. round_jiffies_up_relative(HZ));
  198. }
  199. int
  200. i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  201. struct drm_file *file)
  202. {
  203. struct drm_i915_private *dev_priv = to_i915(dev);
  204. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  205. struct drm_i915_gem_get_aperture *args = data;
  206. struct i915_vma *vma;
  207. u64 pinned;
  208. pinned = ggtt->vm.reserved;
  209. mutex_lock(&dev->struct_mutex);
  210. list_for_each_entry(vma, &ggtt->vm.active_list, vm_link)
  211. if (i915_vma_is_pinned(vma))
  212. pinned += vma->node.size;
  213. list_for_each_entry(vma, &ggtt->vm.inactive_list, vm_link)
  214. if (i915_vma_is_pinned(vma))
  215. pinned += vma->node.size;
  216. mutex_unlock(&dev->struct_mutex);
  217. args->aper_size = ggtt->vm.total;
  218. args->aper_available_size = args->aper_size - pinned;
  219. return 0;
  220. }
  221. static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
  222. {
  223. struct address_space *mapping = obj->base.filp->f_mapping;
  224. drm_dma_handle_t *phys;
  225. struct sg_table *st;
  226. struct scatterlist *sg;
  227. char *vaddr;
  228. int i;
  229. int err;
  230. if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
  231. return -EINVAL;
  232. /* Always aligning to the object size, allows a single allocation
  233. * to handle all possible callers, and given typical object sizes,
  234. * the alignment of the buddy allocation will naturally match.
  235. */
  236. phys = drm_pci_alloc(obj->base.dev,
  237. roundup_pow_of_two(obj->base.size),
  238. roundup_pow_of_two(obj->base.size));
  239. if (!phys)
  240. return -ENOMEM;
  241. vaddr = phys->vaddr;
  242. for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
  243. struct page *page;
  244. char *src;
  245. page = shmem_read_mapping_page(mapping, i);
  246. if (IS_ERR(page)) {
  247. err = PTR_ERR(page);
  248. goto err_phys;
  249. }
  250. src = kmap_atomic(page);
  251. memcpy(vaddr, src, PAGE_SIZE);
  252. drm_clflush_virt_range(vaddr, PAGE_SIZE);
  253. kunmap_atomic(src);
  254. put_page(page);
  255. vaddr += PAGE_SIZE;
  256. }
  257. i915_gem_chipset_flush(to_i915(obj->base.dev));
  258. st = kmalloc(sizeof(*st), GFP_KERNEL);
  259. if (!st) {
  260. err = -ENOMEM;
  261. goto err_phys;
  262. }
  263. if (sg_alloc_table(st, 1, GFP_KERNEL)) {
  264. kfree(st);
  265. err = -ENOMEM;
  266. goto err_phys;
  267. }
  268. sg = st->sgl;
  269. sg->offset = 0;
  270. sg->length = obj->base.size;
  271. sg_dma_address(sg) = phys->busaddr;
  272. sg_dma_len(sg) = obj->base.size;
  273. obj->phys_handle = phys;
  274. __i915_gem_object_set_pages(obj, st, sg->length);
  275. return 0;
  276. err_phys:
  277. drm_pci_free(obj->base.dev, phys);
  278. return err;
  279. }
  280. static void __start_cpu_write(struct drm_i915_gem_object *obj)
  281. {
  282. obj->read_domains = I915_GEM_DOMAIN_CPU;
  283. obj->write_domain = I915_GEM_DOMAIN_CPU;
  284. if (cpu_write_needs_clflush(obj))
  285. obj->cache_dirty = true;
  286. }
  287. static void
  288. __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
  289. struct sg_table *pages,
  290. bool needs_clflush)
  291. {
  292. GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
  293. if (obj->mm.madv == I915_MADV_DONTNEED)
  294. obj->mm.dirty = false;
  295. if (needs_clflush &&
  296. (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
  297. !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
  298. drm_clflush_sg(pages);
  299. __start_cpu_write(obj);
  300. }
  301. static void
  302. i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
  303. struct sg_table *pages)
  304. {
  305. __i915_gem_object_release_shmem(obj, pages, false);
  306. if (obj->mm.dirty) {
  307. struct address_space *mapping = obj->base.filp->f_mapping;
  308. char *vaddr = obj->phys_handle->vaddr;
  309. int i;
  310. for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
  311. struct page *page;
  312. char *dst;
  313. page = shmem_read_mapping_page(mapping, i);
  314. if (IS_ERR(page))
  315. continue;
  316. dst = kmap_atomic(page);
  317. drm_clflush_virt_range(vaddr, PAGE_SIZE);
  318. memcpy(dst, vaddr, PAGE_SIZE);
  319. kunmap_atomic(dst);
  320. set_page_dirty(page);
  321. if (obj->mm.madv == I915_MADV_WILLNEED)
  322. mark_page_accessed(page);
  323. put_page(page);
  324. vaddr += PAGE_SIZE;
  325. }
  326. obj->mm.dirty = false;
  327. }
  328. sg_free_table(pages);
  329. kfree(pages);
  330. drm_pci_free(obj->base.dev, obj->phys_handle);
  331. }
  332. static void
  333. i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
  334. {
  335. i915_gem_object_unpin_pages(obj);
  336. }
  337. static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
  338. .get_pages = i915_gem_object_get_pages_phys,
  339. .put_pages = i915_gem_object_put_pages_phys,
  340. .release = i915_gem_object_release_phys,
  341. };
  342. static const struct drm_i915_gem_object_ops i915_gem_object_ops;
  343. int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
  344. {
  345. struct i915_vma *vma;
  346. LIST_HEAD(still_in_list);
  347. int ret;
  348. lockdep_assert_held(&obj->base.dev->struct_mutex);
  349. /* Closed vma are removed from the obj->vma_list - but they may
  350. * still have an active binding on the object. To remove those we
  351. * must wait for all rendering to complete to the object (as unbinding
  352. * must anyway), and retire the requests.
  353. */
  354. ret = i915_gem_object_set_to_cpu_domain(obj, false);
  355. if (ret)
  356. return ret;
  357. while ((vma = list_first_entry_or_null(&obj->vma_list,
  358. struct i915_vma,
  359. obj_link))) {
  360. list_move_tail(&vma->obj_link, &still_in_list);
  361. ret = i915_vma_unbind(vma);
  362. if (ret)
  363. break;
  364. }
  365. list_splice(&still_in_list, &obj->vma_list);
  366. return ret;
  367. }
  368. static long
  369. i915_gem_object_wait_fence(struct dma_fence *fence,
  370. unsigned int flags,
  371. long timeout,
  372. struct intel_rps_client *rps_client)
  373. {
  374. struct i915_request *rq;
  375. BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
  376. if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
  377. return timeout;
  378. if (!dma_fence_is_i915(fence))
  379. return dma_fence_wait_timeout(fence,
  380. flags & I915_WAIT_INTERRUPTIBLE,
  381. timeout);
  382. rq = to_request(fence);
  383. if (i915_request_completed(rq))
  384. goto out;
  385. /*
  386. * This client is about to stall waiting for the GPU. In many cases
  387. * this is undesirable and limits the throughput of the system, as
  388. * many clients cannot continue processing user input/output whilst
  389. * blocked. RPS autotuning may take tens of milliseconds to respond
  390. * to the GPU load and thus incurs additional latency for the client.
  391. * We can circumvent that by promoting the GPU frequency to maximum
  392. * before we wait. This makes the GPU throttle up much more quickly
  393. * (good for benchmarks and user experience, e.g. window animations),
  394. * but at a cost of spending more power processing the workload
  395. * (bad for battery). Not all clients even want their results
  396. * immediately and for them we should just let the GPU select its own
  397. * frequency to maximise efficiency. To prevent a single client from
  398. * forcing the clocks too high for the whole system, we only allow
  399. * each client to waitboost once in a busy period.
  400. */
  401. if (rps_client && !i915_request_started(rq)) {
  402. if (INTEL_GEN(rq->i915) >= 6)
  403. gen6_rps_boost(rq, rps_client);
  404. }
  405. timeout = i915_request_wait(rq, flags, timeout);
  406. out:
  407. if (flags & I915_WAIT_LOCKED && i915_request_completed(rq))
  408. i915_request_retire_upto(rq);
  409. return timeout;
  410. }
  411. static long
  412. i915_gem_object_wait_reservation(struct reservation_object *resv,
  413. unsigned int flags,
  414. long timeout,
  415. struct intel_rps_client *rps_client)
  416. {
  417. unsigned int seq = __read_seqcount_begin(&resv->seq);
  418. struct dma_fence *excl;
  419. bool prune_fences = false;
  420. if (flags & I915_WAIT_ALL) {
  421. struct dma_fence **shared;
  422. unsigned int count, i;
  423. int ret;
  424. ret = reservation_object_get_fences_rcu(resv,
  425. &excl, &count, &shared);
  426. if (ret)
  427. return ret;
  428. for (i = 0; i < count; i++) {
  429. timeout = i915_gem_object_wait_fence(shared[i],
  430. flags, timeout,
  431. rps_client);
  432. if (timeout < 0)
  433. break;
  434. dma_fence_put(shared[i]);
  435. }
  436. for (; i < count; i++)
  437. dma_fence_put(shared[i]);
  438. kfree(shared);
  439. /*
  440. * If both shared fences and an exclusive fence exist,
  441. * then by construction the shared fences must be later
  442. * than the exclusive fence. If we successfully wait for
  443. * all the shared fences, we know that the exclusive fence
  444. * must all be signaled. If all the shared fences are
  445. * signaled, we can prune the array and recover the
  446. * floating references on the fences/requests.
  447. */
  448. prune_fences = count && timeout >= 0;
  449. } else {
  450. excl = reservation_object_get_excl_rcu(resv);
  451. }
  452. if (excl && timeout >= 0)
  453. timeout = i915_gem_object_wait_fence(excl, flags, timeout,
  454. rps_client);
  455. dma_fence_put(excl);
  456. /*
  457. * Opportunistically prune the fences iff we know they have *all* been
  458. * signaled and that the reservation object has not been changed (i.e.
  459. * no new fences have been added).
  460. */
  461. if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
  462. if (reservation_object_trylock(resv)) {
  463. if (!__read_seqcount_retry(&resv->seq, seq))
  464. reservation_object_add_excl_fence(resv, NULL);
  465. reservation_object_unlock(resv);
  466. }
  467. }
  468. return timeout;
  469. }
  470. static void __fence_set_priority(struct dma_fence *fence,
  471. const struct i915_sched_attr *attr)
  472. {
  473. struct i915_request *rq;
  474. struct intel_engine_cs *engine;
  475. if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
  476. return;
  477. rq = to_request(fence);
  478. engine = rq->engine;
  479. local_bh_disable();
  480. rcu_read_lock(); /* RCU serialisation for set-wedged protection */
  481. if (engine->schedule)
  482. engine->schedule(rq, attr);
  483. rcu_read_unlock();
  484. local_bh_enable(); /* kick the tasklets if queues were reprioritised */
  485. }
  486. static void fence_set_priority(struct dma_fence *fence,
  487. const struct i915_sched_attr *attr)
  488. {
  489. /* Recurse once into a fence-array */
  490. if (dma_fence_is_array(fence)) {
  491. struct dma_fence_array *array = to_dma_fence_array(fence);
  492. int i;
  493. for (i = 0; i < array->num_fences; i++)
  494. __fence_set_priority(array->fences[i], attr);
  495. } else {
  496. __fence_set_priority(fence, attr);
  497. }
  498. }
  499. int
  500. i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
  501. unsigned int flags,
  502. const struct i915_sched_attr *attr)
  503. {
  504. struct dma_fence *excl;
  505. if (flags & I915_WAIT_ALL) {
  506. struct dma_fence **shared;
  507. unsigned int count, i;
  508. int ret;
  509. ret = reservation_object_get_fences_rcu(obj->resv,
  510. &excl, &count, &shared);
  511. if (ret)
  512. return ret;
  513. for (i = 0; i < count; i++) {
  514. fence_set_priority(shared[i], attr);
  515. dma_fence_put(shared[i]);
  516. }
  517. kfree(shared);
  518. } else {
  519. excl = reservation_object_get_excl_rcu(obj->resv);
  520. }
  521. if (excl) {
  522. fence_set_priority(excl, attr);
  523. dma_fence_put(excl);
  524. }
  525. return 0;
  526. }
  527. /**
  528. * Waits for rendering to the object to be completed
  529. * @obj: i915 gem object
  530. * @flags: how to wait (under a lock, for all rendering or just for writes etc)
  531. * @timeout: how long to wait
  532. * @rps_client: client (user process) to charge for any waitboosting
  533. */
  534. int
  535. i915_gem_object_wait(struct drm_i915_gem_object *obj,
  536. unsigned int flags,
  537. long timeout,
  538. struct intel_rps_client *rps_client)
  539. {
  540. might_sleep();
  541. #if IS_ENABLED(CONFIG_LOCKDEP)
  542. GEM_BUG_ON(debug_locks &&
  543. !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
  544. !!(flags & I915_WAIT_LOCKED));
  545. #endif
  546. GEM_BUG_ON(timeout < 0);
  547. timeout = i915_gem_object_wait_reservation(obj->resv,
  548. flags, timeout,
  549. rps_client);
  550. return timeout < 0 ? timeout : 0;
  551. }
  552. static struct intel_rps_client *to_rps_client(struct drm_file *file)
  553. {
  554. struct drm_i915_file_private *fpriv = file->driver_priv;
  555. return &fpriv->rps_client;
  556. }
  557. static int
  558. i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
  559. struct drm_i915_gem_pwrite *args,
  560. struct drm_file *file)
  561. {
  562. void *vaddr = obj->phys_handle->vaddr + args->offset;
  563. char __user *user_data = u64_to_user_ptr(args->data_ptr);
  564. /* We manually control the domain here and pretend that it
  565. * remains coherent i.e. in the GTT domain, like shmem_pwrite.
  566. */
  567. intel_fb_obj_invalidate(obj, ORIGIN_CPU);
  568. if (copy_from_user(vaddr, user_data, args->size))
  569. return -EFAULT;
  570. drm_clflush_virt_range(vaddr, args->size);
  571. i915_gem_chipset_flush(to_i915(obj->base.dev));
  572. intel_fb_obj_flush(obj, ORIGIN_CPU);
  573. return 0;
  574. }
  575. void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
  576. {
  577. return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
  578. }
  579. void i915_gem_object_free(struct drm_i915_gem_object *obj)
  580. {
  581. struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
  582. kmem_cache_free(dev_priv->objects, obj);
  583. }
  584. static int
  585. i915_gem_create(struct drm_file *file,
  586. struct drm_i915_private *dev_priv,
  587. uint64_t size,
  588. uint32_t *handle_p)
  589. {
  590. struct drm_i915_gem_object *obj;
  591. int ret;
  592. u32 handle;
  593. size = roundup(size, PAGE_SIZE);
  594. if (size == 0)
  595. return -EINVAL;
  596. /* Allocate the new object */
  597. obj = i915_gem_object_create(dev_priv, size);
  598. if (IS_ERR(obj))
  599. return PTR_ERR(obj);
  600. ret = drm_gem_handle_create(file, &obj->base, &handle);
  601. /* drop reference from allocate - handle holds it now */
  602. i915_gem_object_put(obj);
  603. if (ret)
  604. return ret;
  605. *handle_p = handle;
  606. return 0;
  607. }
  608. int
  609. i915_gem_dumb_create(struct drm_file *file,
  610. struct drm_device *dev,
  611. struct drm_mode_create_dumb *args)
  612. {
  613. /* have to work out size/pitch and return them */
  614. args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
  615. args->size = args->pitch * args->height;
  616. return i915_gem_create(file, to_i915(dev),
  617. args->size, &args->handle);
  618. }
  619. static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
  620. {
  621. return !(obj->cache_level == I915_CACHE_NONE ||
  622. obj->cache_level == I915_CACHE_WT);
  623. }
  624. /**
  625. * Creates a new mm object and returns a handle to it.
  626. * @dev: drm device pointer
  627. * @data: ioctl data blob
  628. * @file: drm file pointer
  629. */
  630. int
  631. i915_gem_create_ioctl(struct drm_device *dev, void *data,
  632. struct drm_file *file)
  633. {
  634. struct drm_i915_private *dev_priv = to_i915(dev);
  635. struct drm_i915_gem_create *args = data;
  636. i915_gem_flush_free_objects(dev_priv);
  637. return i915_gem_create(file, dev_priv,
  638. args->size, &args->handle);
  639. }
  640. static inline enum fb_op_origin
  641. fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
  642. {
  643. return (domain == I915_GEM_DOMAIN_GTT ?
  644. obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
  645. }
  646. void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
  647. {
  648. /*
  649. * No actual flushing is required for the GTT write domain for reads
  650. * from the GTT domain. Writes to it "immediately" go to main memory
  651. * as far as we know, so there's no chipset flush. It also doesn't
  652. * land in the GPU render cache.
  653. *
  654. * However, we do have to enforce the order so that all writes through
  655. * the GTT land before any writes to the device, such as updates to
  656. * the GATT itself.
  657. *
  658. * We also have to wait a bit for the writes to land from the GTT.
  659. * An uncached read (i.e. mmio) seems to be ideal for the round-trip
  660. * timing. This issue has only been observed when switching quickly
  661. * between GTT writes and CPU reads from inside the kernel on recent hw,
  662. * and it appears to only affect discrete GTT blocks (i.e. on LLC
  663. * system agents we cannot reproduce this behaviour, until Cannonlake
  664. * that was!).
  665. */
  666. i915_gem_chipset_flush(dev_priv);
  667. intel_runtime_pm_get(dev_priv);
  668. spin_lock_irq(&dev_priv->uncore.lock);
  669. POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
  670. spin_unlock_irq(&dev_priv->uncore.lock);
  671. intel_runtime_pm_put(dev_priv);
  672. }
  673. static void
  674. flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
  675. {
  676. struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
  677. struct i915_vma *vma;
  678. if (!(obj->write_domain & flush_domains))
  679. return;
  680. switch (obj->write_domain) {
  681. case I915_GEM_DOMAIN_GTT:
  682. i915_gem_flush_ggtt_writes(dev_priv);
  683. intel_fb_obj_flush(obj,
  684. fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
  685. for_each_ggtt_vma(vma, obj) {
  686. if (vma->iomap)
  687. continue;
  688. i915_vma_unset_ggtt_write(vma);
  689. }
  690. break;
  691. case I915_GEM_DOMAIN_WC:
  692. wmb();
  693. break;
  694. case I915_GEM_DOMAIN_CPU:
  695. i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
  696. break;
  697. case I915_GEM_DOMAIN_RENDER:
  698. if (gpu_write_needs_clflush(obj))
  699. obj->cache_dirty = true;
  700. break;
  701. }
  702. obj->write_domain = 0;
  703. }
  704. static inline int
  705. __copy_to_user_swizzled(char __user *cpu_vaddr,
  706. const char *gpu_vaddr, int gpu_offset,
  707. int length)
  708. {
  709. int ret, cpu_offset = 0;
  710. while (length > 0) {
  711. int cacheline_end = ALIGN(gpu_offset + 1, 64);
  712. int this_length = min(cacheline_end - gpu_offset, length);
  713. int swizzled_gpu_offset = gpu_offset ^ 64;
  714. ret = __copy_to_user(cpu_vaddr + cpu_offset,
  715. gpu_vaddr + swizzled_gpu_offset,
  716. this_length);
  717. if (ret)
  718. return ret + length;
  719. cpu_offset += this_length;
  720. gpu_offset += this_length;
  721. length -= this_length;
  722. }
  723. return 0;
  724. }
  725. static inline int
  726. __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
  727. const char __user *cpu_vaddr,
  728. int length)
  729. {
  730. int ret, cpu_offset = 0;
  731. while (length > 0) {
  732. int cacheline_end = ALIGN(gpu_offset + 1, 64);
  733. int this_length = min(cacheline_end - gpu_offset, length);
  734. int swizzled_gpu_offset = gpu_offset ^ 64;
  735. ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
  736. cpu_vaddr + cpu_offset,
  737. this_length);
  738. if (ret)
  739. return ret + length;
  740. cpu_offset += this_length;
  741. gpu_offset += this_length;
  742. length -= this_length;
  743. }
  744. return 0;
  745. }
  746. /*
  747. * Pins the specified object's pages and synchronizes the object with
  748. * GPU accesses. Sets needs_clflush to non-zero if the caller should
  749. * flush the object from the CPU cache.
  750. */
  751. int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
  752. unsigned int *needs_clflush)
  753. {
  754. int ret;
  755. lockdep_assert_held(&obj->base.dev->struct_mutex);
  756. *needs_clflush = 0;
  757. if (!i915_gem_object_has_struct_page(obj))
  758. return -ENODEV;
  759. ret = i915_gem_object_wait(obj,
  760. I915_WAIT_INTERRUPTIBLE |
  761. I915_WAIT_LOCKED,
  762. MAX_SCHEDULE_TIMEOUT,
  763. NULL);
  764. if (ret)
  765. return ret;
  766. ret = i915_gem_object_pin_pages(obj);
  767. if (ret)
  768. return ret;
  769. if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
  770. !static_cpu_has(X86_FEATURE_CLFLUSH)) {
  771. ret = i915_gem_object_set_to_cpu_domain(obj, false);
  772. if (ret)
  773. goto err_unpin;
  774. else
  775. goto out;
  776. }
  777. flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
  778. /* If we're not in the cpu read domain, set ourself into the gtt
  779. * read domain and manually flush cachelines (if required). This
  780. * optimizes for the case when the gpu will dirty the data
  781. * anyway again before the next pread happens.
  782. */
  783. if (!obj->cache_dirty &&
  784. !(obj->read_domains & I915_GEM_DOMAIN_CPU))
  785. *needs_clflush = CLFLUSH_BEFORE;
  786. out:
  787. /* return with the pages pinned */
  788. return 0;
  789. err_unpin:
  790. i915_gem_object_unpin_pages(obj);
  791. return ret;
  792. }
  793. int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
  794. unsigned int *needs_clflush)
  795. {
  796. int ret;
  797. lockdep_assert_held(&obj->base.dev->struct_mutex);
  798. *needs_clflush = 0;
  799. if (!i915_gem_object_has_struct_page(obj))
  800. return -ENODEV;
  801. ret = i915_gem_object_wait(obj,
  802. I915_WAIT_INTERRUPTIBLE |
  803. I915_WAIT_LOCKED |
  804. I915_WAIT_ALL,
  805. MAX_SCHEDULE_TIMEOUT,
  806. NULL);
  807. if (ret)
  808. return ret;
  809. ret = i915_gem_object_pin_pages(obj);
  810. if (ret)
  811. return ret;
  812. if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
  813. !static_cpu_has(X86_FEATURE_CLFLUSH)) {
  814. ret = i915_gem_object_set_to_cpu_domain(obj, true);
  815. if (ret)
  816. goto err_unpin;
  817. else
  818. goto out;
  819. }
  820. flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
  821. /* If we're not in the cpu write domain, set ourself into the
  822. * gtt write domain and manually flush cachelines (as required).
  823. * This optimizes for the case when the gpu will use the data
  824. * right away and we therefore have to clflush anyway.
  825. */
  826. if (!obj->cache_dirty) {
  827. *needs_clflush |= CLFLUSH_AFTER;
  828. /*
  829. * Same trick applies to invalidate partially written
  830. * cachelines read before writing.
  831. */
  832. if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
  833. *needs_clflush |= CLFLUSH_BEFORE;
  834. }
  835. out:
  836. intel_fb_obj_invalidate(obj, ORIGIN_CPU);
  837. obj->mm.dirty = true;
  838. /* return with the pages pinned */
  839. return 0;
  840. err_unpin:
  841. i915_gem_object_unpin_pages(obj);
  842. return ret;
  843. }
  844. static void
  845. shmem_clflush_swizzled_range(char *addr, unsigned long length,
  846. bool swizzled)
  847. {
  848. if (unlikely(swizzled)) {
  849. unsigned long start = (unsigned long) addr;
  850. unsigned long end = (unsigned long) addr + length;
  851. /* For swizzling simply ensure that we always flush both
  852. * channels. Lame, but simple and it works. Swizzled
  853. * pwrite/pread is far from a hotpath - current userspace
  854. * doesn't use it at all. */
  855. start = round_down(start, 128);
  856. end = round_up(end, 128);
  857. drm_clflush_virt_range((void *)start, end - start);
  858. } else {
  859. drm_clflush_virt_range(addr, length);
  860. }
  861. }
  862. /* Only difference to the fast-path function is that this can handle bit17
  863. * and uses non-atomic copy and kmap functions. */
  864. static int
  865. shmem_pread_slow(struct page *page, int offset, int length,
  866. char __user *user_data,
  867. bool page_do_bit17_swizzling, bool needs_clflush)
  868. {
  869. char *vaddr;
  870. int ret;
  871. vaddr = kmap(page);
  872. if (needs_clflush)
  873. shmem_clflush_swizzled_range(vaddr + offset, length,
  874. page_do_bit17_swizzling);
  875. if (page_do_bit17_swizzling)
  876. ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
  877. else
  878. ret = __copy_to_user(user_data, vaddr + offset, length);
  879. kunmap(page);
  880. return ret ? - EFAULT : 0;
  881. }
  882. static int
  883. shmem_pread(struct page *page, int offset, int length, char __user *user_data,
  884. bool page_do_bit17_swizzling, bool needs_clflush)
  885. {
  886. int ret;
  887. ret = -ENODEV;
  888. if (!page_do_bit17_swizzling) {
  889. char *vaddr = kmap_atomic(page);
  890. if (needs_clflush)
  891. drm_clflush_virt_range(vaddr + offset, length);
  892. ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
  893. kunmap_atomic(vaddr);
  894. }
  895. if (ret == 0)
  896. return 0;
  897. return shmem_pread_slow(page, offset, length, user_data,
  898. page_do_bit17_swizzling, needs_clflush);
  899. }
  900. static int
  901. i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
  902. struct drm_i915_gem_pread *args)
  903. {
  904. char __user *user_data;
  905. u64 remain;
  906. unsigned int obj_do_bit17_swizzling;
  907. unsigned int needs_clflush;
  908. unsigned int idx, offset;
  909. int ret;
  910. obj_do_bit17_swizzling = 0;
  911. if (i915_gem_object_needs_bit17_swizzle(obj))
  912. obj_do_bit17_swizzling = BIT(17);
  913. ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
  914. if (ret)
  915. return ret;
  916. ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
  917. mutex_unlock(&obj->base.dev->struct_mutex);
  918. if (ret)
  919. return ret;
  920. remain = args->size;
  921. user_data = u64_to_user_ptr(args->data_ptr);
  922. offset = offset_in_page(args->offset);
  923. for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
  924. struct page *page = i915_gem_object_get_page(obj, idx);
  925. unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
  926. ret = shmem_pread(page, offset, length, user_data,
  927. page_to_phys(page) & obj_do_bit17_swizzling,
  928. needs_clflush);
  929. if (ret)
  930. break;
  931. remain -= length;
  932. user_data += length;
  933. offset = 0;
  934. }
  935. i915_gem_obj_finish_shmem_access(obj);
  936. return ret;
  937. }
  938. static inline bool
  939. gtt_user_read(struct io_mapping *mapping,
  940. loff_t base, int offset,
  941. char __user *user_data, int length)
  942. {
  943. void __iomem *vaddr;
  944. unsigned long unwritten;
  945. /* We can use the cpu mem copy function because this is X86. */
  946. vaddr = io_mapping_map_atomic_wc(mapping, base);
  947. unwritten = __copy_to_user_inatomic(user_data,
  948. (void __force *)vaddr + offset,
  949. length);
  950. io_mapping_unmap_atomic(vaddr);
  951. if (unwritten) {
  952. vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
  953. unwritten = copy_to_user(user_data,
  954. (void __force *)vaddr + offset,
  955. length);
  956. io_mapping_unmap(vaddr);
  957. }
  958. return unwritten;
  959. }
  960. static int
  961. i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
  962. const struct drm_i915_gem_pread *args)
  963. {
  964. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  965. struct i915_ggtt *ggtt = &i915->ggtt;
  966. struct drm_mm_node node;
  967. struct i915_vma *vma;
  968. void __user *user_data;
  969. u64 remain, offset;
  970. int ret;
  971. ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
  972. if (ret)
  973. return ret;
  974. intel_runtime_pm_get(i915);
  975. vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
  976. PIN_MAPPABLE |
  977. PIN_NONFAULT |
  978. PIN_NONBLOCK);
  979. if (!IS_ERR(vma)) {
  980. node.start = i915_ggtt_offset(vma);
  981. node.allocated = false;
  982. ret = i915_vma_put_fence(vma);
  983. if (ret) {
  984. i915_vma_unpin(vma);
  985. vma = ERR_PTR(ret);
  986. }
  987. }
  988. if (IS_ERR(vma)) {
  989. ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
  990. if (ret)
  991. goto out_unlock;
  992. GEM_BUG_ON(!node.allocated);
  993. }
  994. ret = i915_gem_object_set_to_gtt_domain(obj, false);
  995. if (ret)
  996. goto out_unpin;
  997. mutex_unlock(&i915->drm.struct_mutex);
  998. user_data = u64_to_user_ptr(args->data_ptr);
  999. remain = args->size;
  1000. offset = args->offset;
  1001. while (remain > 0) {
  1002. /* Operation in this page
  1003. *
  1004. * page_base = page offset within aperture
  1005. * page_offset = offset within page
  1006. * page_length = bytes to copy for this page
  1007. */
  1008. u32 page_base = node.start;
  1009. unsigned page_offset = offset_in_page(offset);
  1010. unsigned page_length = PAGE_SIZE - page_offset;
  1011. page_length = remain < page_length ? remain : page_length;
  1012. if (node.allocated) {
  1013. wmb();
  1014. ggtt->vm.insert_page(&ggtt->vm,
  1015. i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
  1016. node.start, I915_CACHE_NONE, 0);
  1017. wmb();
  1018. } else {
  1019. page_base += offset & PAGE_MASK;
  1020. }
  1021. if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
  1022. user_data, page_length)) {
  1023. ret = -EFAULT;
  1024. break;
  1025. }
  1026. remain -= page_length;
  1027. user_data += page_length;
  1028. offset += page_length;
  1029. }
  1030. mutex_lock(&i915->drm.struct_mutex);
  1031. out_unpin:
  1032. if (node.allocated) {
  1033. wmb();
  1034. ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
  1035. remove_mappable_node(&node);
  1036. } else {
  1037. i915_vma_unpin(vma);
  1038. }
  1039. out_unlock:
  1040. intel_runtime_pm_put(i915);
  1041. mutex_unlock(&i915->drm.struct_mutex);
  1042. return ret;
  1043. }
  1044. /**
  1045. * Reads data from the object referenced by handle.
  1046. * @dev: drm device pointer
  1047. * @data: ioctl data blob
  1048. * @file: drm file pointer
  1049. *
  1050. * On error, the contents of *data are undefined.
  1051. */
  1052. int
  1053. i915_gem_pread_ioctl(struct drm_device *dev, void *data,
  1054. struct drm_file *file)
  1055. {
  1056. struct drm_i915_gem_pread *args = data;
  1057. struct drm_i915_gem_object *obj;
  1058. int ret;
  1059. if (args->size == 0)
  1060. return 0;
  1061. if (!access_ok(VERIFY_WRITE,
  1062. u64_to_user_ptr(args->data_ptr),
  1063. args->size))
  1064. return -EFAULT;
  1065. obj = i915_gem_object_lookup(file, args->handle);
  1066. if (!obj)
  1067. return -ENOENT;
  1068. /* Bounds check source. */
  1069. if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
  1070. ret = -EINVAL;
  1071. goto out;
  1072. }
  1073. trace_i915_gem_object_pread(obj, args->offset, args->size);
  1074. ret = i915_gem_object_wait(obj,
  1075. I915_WAIT_INTERRUPTIBLE,
  1076. MAX_SCHEDULE_TIMEOUT,
  1077. to_rps_client(file));
  1078. if (ret)
  1079. goto out;
  1080. ret = i915_gem_object_pin_pages(obj);
  1081. if (ret)
  1082. goto out;
  1083. ret = i915_gem_shmem_pread(obj, args);
  1084. if (ret == -EFAULT || ret == -ENODEV)
  1085. ret = i915_gem_gtt_pread(obj, args);
  1086. i915_gem_object_unpin_pages(obj);
  1087. out:
  1088. i915_gem_object_put(obj);
  1089. return ret;
  1090. }
  1091. /* This is the fast write path which cannot handle
  1092. * page faults in the source data
  1093. */
  1094. static inline bool
  1095. ggtt_write(struct io_mapping *mapping,
  1096. loff_t base, int offset,
  1097. char __user *user_data, int length)
  1098. {
  1099. void __iomem *vaddr;
  1100. unsigned long unwritten;
  1101. /* We can use the cpu mem copy function because this is X86. */
  1102. vaddr = io_mapping_map_atomic_wc(mapping, base);
  1103. unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
  1104. user_data, length);
  1105. io_mapping_unmap_atomic(vaddr);
  1106. if (unwritten) {
  1107. vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
  1108. unwritten = copy_from_user((void __force *)vaddr + offset,
  1109. user_data, length);
  1110. io_mapping_unmap(vaddr);
  1111. }
  1112. return unwritten;
  1113. }
  1114. /**
  1115. * This is the fast pwrite path, where we copy the data directly from the
  1116. * user into the GTT, uncached.
  1117. * @obj: i915 GEM object
  1118. * @args: pwrite arguments structure
  1119. */
  1120. static int
  1121. i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
  1122. const struct drm_i915_gem_pwrite *args)
  1123. {
  1124. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  1125. struct i915_ggtt *ggtt = &i915->ggtt;
  1126. struct drm_mm_node node;
  1127. struct i915_vma *vma;
  1128. u64 remain, offset;
  1129. void __user *user_data;
  1130. int ret;
  1131. ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
  1132. if (ret)
  1133. return ret;
  1134. if (i915_gem_object_has_struct_page(obj)) {
  1135. /*
  1136. * Avoid waking the device up if we can fallback, as
  1137. * waking/resuming is very slow (worst-case 10-100 ms
  1138. * depending on PCI sleeps and our own resume time).
  1139. * This easily dwarfs any performance advantage from
  1140. * using the cache bypass of indirect GGTT access.
  1141. */
  1142. if (!intel_runtime_pm_get_if_in_use(i915)) {
  1143. ret = -EFAULT;
  1144. goto out_unlock;
  1145. }
  1146. } else {
  1147. /* No backing pages, no fallback, we must force GGTT access */
  1148. intel_runtime_pm_get(i915);
  1149. }
  1150. vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
  1151. PIN_MAPPABLE |
  1152. PIN_NONFAULT |
  1153. PIN_NONBLOCK);
  1154. if (!IS_ERR(vma)) {
  1155. node.start = i915_ggtt_offset(vma);
  1156. node.allocated = false;
  1157. ret = i915_vma_put_fence(vma);
  1158. if (ret) {
  1159. i915_vma_unpin(vma);
  1160. vma = ERR_PTR(ret);
  1161. }
  1162. }
  1163. if (IS_ERR(vma)) {
  1164. ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
  1165. if (ret)
  1166. goto out_rpm;
  1167. GEM_BUG_ON(!node.allocated);
  1168. }
  1169. ret = i915_gem_object_set_to_gtt_domain(obj, true);
  1170. if (ret)
  1171. goto out_unpin;
  1172. mutex_unlock(&i915->drm.struct_mutex);
  1173. intel_fb_obj_invalidate(obj, ORIGIN_CPU);
  1174. user_data = u64_to_user_ptr(args->data_ptr);
  1175. offset = args->offset;
  1176. remain = args->size;
  1177. while (remain) {
  1178. /* Operation in this page
  1179. *
  1180. * page_base = page offset within aperture
  1181. * page_offset = offset within page
  1182. * page_length = bytes to copy for this page
  1183. */
  1184. u32 page_base = node.start;
  1185. unsigned int page_offset = offset_in_page(offset);
  1186. unsigned int page_length = PAGE_SIZE - page_offset;
  1187. page_length = remain < page_length ? remain : page_length;
  1188. if (node.allocated) {
  1189. wmb(); /* flush the write before we modify the GGTT */
  1190. ggtt->vm.insert_page(&ggtt->vm,
  1191. i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
  1192. node.start, I915_CACHE_NONE, 0);
  1193. wmb(); /* flush modifications to the GGTT (insert_page) */
  1194. } else {
  1195. page_base += offset & PAGE_MASK;
  1196. }
  1197. /* If we get a fault while copying data, then (presumably) our
  1198. * source page isn't available. Return the error and we'll
  1199. * retry in the slow path.
  1200. * If the object is non-shmem backed, we retry again with the
  1201. * path that handles page fault.
  1202. */
  1203. if (ggtt_write(&ggtt->iomap, page_base, page_offset,
  1204. user_data, page_length)) {
  1205. ret = -EFAULT;
  1206. break;
  1207. }
  1208. remain -= page_length;
  1209. user_data += page_length;
  1210. offset += page_length;
  1211. }
  1212. intel_fb_obj_flush(obj, ORIGIN_CPU);
  1213. mutex_lock(&i915->drm.struct_mutex);
  1214. out_unpin:
  1215. if (node.allocated) {
  1216. wmb();
  1217. ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
  1218. remove_mappable_node(&node);
  1219. } else {
  1220. i915_vma_unpin(vma);
  1221. }
  1222. out_rpm:
  1223. intel_runtime_pm_put(i915);
  1224. out_unlock:
  1225. mutex_unlock(&i915->drm.struct_mutex);
  1226. return ret;
  1227. }
  1228. static int
  1229. shmem_pwrite_slow(struct page *page, int offset, int length,
  1230. char __user *user_data,
  1231. bool page_do_bit17_swizzling,
  1232. bool needs_clflush_before,
  1233. bool needs_clflush_after)
  1234. {
  1235. char *vaddr;
  1236. int ret;
  1237. vaddr = kmap(page);
  1238. if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
  1239. shmem_clflush_swizzled_range(vaddr + offset, length,
  1240. page_do_bit17_swizzling);
  1241. if (page_do_bit17_swizzling)
  1242. ret = __copy_from_user_swizzled(vaddr, offset, user_data,
  1243. length);
  1244. else
  1245. ret = __copy_from_user(vaddr + offset, user_data, length);
  1246. if (needs_clflush_after)
  1247. shmem_clflush_swizzled_range(vaddr + offset, length,
  1248. page_do_bit17_swizzling);
  1249. kunmap(page);
  1250. return ret ? -EFAULT : 0;
  1251. }
  1252. /* Per-page copy function for the shmem pwrite fastpath.
  1253. * Flushes invalid cachelines before writing to the target if
  1254. * needs_clflush_before is set and flushes out any written cachelines after
  1255. * writing if needs_clflush is set.
  1256. */
  1257. static int
  1258. shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
  1259. bool page_do_bit17_swizzling,
  1260. bool needs_clflush_before,
  1261. bool needs_clflush_after)
  1262. {
  1263. int ret;
  1264. ret = -ENODEV;
  1265. if (!page_do_bit17_swizzling) {
  1266. char *vaddr = kmap_atomic(page);
  1267. if (needs_clflush_before)
  1268. drm_clflush_virt_range(vaddr + offset, len);
  1269. ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
  1270. if (needs_clflush_after)
  1271. drm_clflush_virt_range(vaddr + offset, len);
  1272. kunmap_atomic(vaddr);
  1273. }
  1274. if (ret == 0)
  1275. return ret;
  1276. return shmem_pwrite_slow(page, offset, len, user_data,
  1277. page_do_bit17_swizzling,
  1278. needs_clflush_before,
  1279. needs_clflush_after);
  1280. }
  1281. static int
  1282. i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
  1283. const struct drm_i915_gem_pwrite *args)
  1284. {
  1285. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  1286. void __user *user_data;
  1287. u64 remain;
  1288. unsigned int obj_do_bit17_swizzling;
  1289. unsigned int partial_cacheline_write;
  1290. unsigned int needs_clflush;
  1291. unsigned int offset, idx;
  1292. int ret;
  1293. ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
  1294. if (ret)
  1295. return ret;
  1296. ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
  1297. mutex_unlock(&i915->drm.struct_mutex);
  1298. if (ret)
  1299. return ret;
  1300. obj_do_bit17_swizzling = 0;
  1301. if (i915_gem_object_needs_bit17_swizzle(obj))
  1302. obj_do_bit17_swizzling = BIT(17);
  1303. /* If we don't overwrite a cacheline completely we need to be
  1304. * careful to have up-to-date data by first clflushing. Don't
  1305. * overcomplicate things and flush the entire patch.
  1306. */
  1307. partial_cacheline_write = 0;
  1308. if (needs_clflush & CLFLUSH_BEFORE)
  1309. partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
  1310. user_data = u64_to_user_ptr(args->data_ptr);
  1311. remain = args->size;
  1312. offset = offset_in_page(args->offset);
  1313. for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
  1314. struct page *page = i915_gem_object_get_page(obj, idx);
  1315. unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
  1316. ret = shmem_pwrite(page, offset, length, user_data,
  1317. page_to_phys(page) & obj_do_bit17_swizzling,
  1318. (offset | length) & partial_cacheline_write,
  1319. needs_clflush & CLFLUSH_AFTER);
  1320. if (ret)
  1321. break;
  1322. remain -= length;
  1323. user_data += length;
  1324. offset = 0;
  1325. }
  1326. intel_fb_obj_flush(obj, ORIGIN_CPU);
  1327. i915_gem_obj_finish_shmem_access(obj);
  1328. return ret;
  1329. }
  1330. /**
  1331. * Writes data to the object referenced by handle.
  1332. * @dev: drm device
  1333. * @data: ioctl data blob
  1334. * @file: drm file
  1335. *
  1336. * On error, the contents of the buffer that were to be modified are undefined.
  1337. */
  1338. int
  1339. i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  1340. struct drm_file *file)
  1341. {
  1342. struct drm_i915_gem_pwrite *args = data;
  1343. struct drm_i915_gem_object *obj;
  1344. int ret;
  1345. if (args->size == 0)
  1346. return 0;
  1347. if (!access_ok(VERIFY_READ,
  1348. u64_to_user_ptr(args->data_ptr),
  1349. args->size))
  1350. return -EFAULT;
  1351. obj = i915_gem_object_lookup(file, args->handle);
  1352. if (!obj)
  1353. return -ENOENT;
  1354. /* Bounds check destination. */
  1355. if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
  1356. ret = -EINVAL;
  1357. goto err;
  1358. }
  1359. /* Writes not allowed into this read-only object */
  1360. if (i915_gem_object_is_readonly(obj)) {
  1361. ret = -EINVAL;
  1362. goto err;
  1363. }
  1364. trace_i915_gem_object_pwrite(obj, args->offset, args->size);
  1365. ret = -ENODEV;
  1366. if (obj->ops->pwrite)
  1367. ret = obj->ops->pwrite(obj, args);
  1368. if (ret != -ENODEV)
  1369. goto err;
  1370. ret = i915_gem_object_wait(obj,
  1371. I915_WAIT_INTERRUPTIBLE |
  1372. I915_WAIT_ALL,
  1373. MAX_SCHEDULE_TIMEOUT,
  1374. to_rps_client(file));
  1375. if (ret)
  1376. goto err;
  1377. ret = i915_gem_object_pin_pages(obj);
  1378. if (ret)
  1379. goto err;
  1380. ret = -EFAULT;
  1381. /* We can only do the GTT pwrite on untiled buffers, as otherwise
  1382. * it would end up going through the fenced access, and we'll get
  1383. * different detiling behavior between reading and writing.
  1384. * pread/pwrite currently are reading and writing from the CPU
  1385. * perspective, requiring manual detiling by the client.
  1386. */
  1387. if (!i915_gem_object_has_struct_page(obj) ||
  1388. cpu_write_needs_clflush(obj))
  1389. /* Note that the gtt paths might fail with non-page-backed user
  1390. * pointers (e.g. gtt mappings when moving data between
  1391. * textures). Fallback to the shmem path in that case.
  1392. */
  1393. ret = i915_gem_gtt_pwrite_fast(obj, args);
  1394. if (ret == -EFAULT || ret == -ENOSPC) {
  1395. if (obj->phys_handle)
  1396. ret = i915_gem_phys_pwrite(obj, args, file);
  1397. else
  1398. ret = i915_gem_shmem_pwrite(obj, args);
  1399. }
  1400. i915_gem_object_unpin_pages(obj);
  1401. err:
  1402. i915_gem_object_put(obj);
  1403. return ret;
  1404. }
  1405. static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
  1406. {
  1407. struct drm_i915_private *i915;
  1408. struct list_head *list;
  1409. struct i915_vma *vma;
  1410. GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
  1411. for_each_ggtt_vma(vma, obj) {
  1412. if (i915_vma_is_active(vma))
  1413. continue;
  1414. if (!drm_mm_node_allocated(&vma->node))
  1415. continue;
  1416. list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
  1417. }
  1418. i915 = to_i915(obj->base.dev);
  1419. spin_lock(&i915->mm.obj_lock);
  1420. list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
  1421. list_move_tail(&obj->mm.link, list);
  1422. spin_unlock(&i915->mm.obj_lock);
  1423. }
  1424. /**
  1425. * Called when user space prepares to use an object with the CPU, either
  1426. * through the mmap ioctl's mapping or a GTT mapping.
  1427. * @dev: drm device
  1428. * @data: ioctl data blob
  1429. * @file: drm file
  1430. */
  1431. int
  1432. i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  1433. struct drm_file *file)
  1434. {
  1435. struct drm_i915_gem_set_domain *args = data;
  1436. struct drm_i915_gem_object *obj;
  1437. uint32_t read_domains = args->read_domains;
  1438. uint32_t write_domain = args->write_domain;
  1439. int err;
  1440. /* Only handle setting domains to types used by the CPU. */
  1441. if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
  1442. return -EINVAL;
  1443. /* Having something in the write domain implies it's in the read
  1444. * domain, and only that read domain. Enforce that in the request.
  1445. */
  1446. if (write_domain != 0 && read_domains != write_domain)
  1447. return -EINVAL;
  1448. obj = i915_gem_object_lookup(file, args->handle);
  1449. if (!obj)
  1450. return -ENOENT;
  1451. /* Try to flush the object off the GPU without holding the lock.
  1452. * We will repeat the flush holding the lock in the normal manner
  1453. * to catch cases where we are gazumped.
  1454. */
  1455. err = i915_gem_object_wait(obj,
  1456. I915_WAIT_INTERRUPTIBLE |
  1457. (write_domain ? I915_WAIT_ALL : 0),
  1458. MAX_SCHEDULE_TIMEOUT,
  1459. to_rps_client(file));
  1460. if (err)
  1461. goto out;
  1462. /*
  1463. * Proxy objects do not control access to the backing storage, ergo
  1464. * they cannot be used as a means to manipulate the cache domain
  1465. * tracking for that backing storage. The proxy object is always
  1466. * considered to be outside of any cache domain.
  1467. */
  1468. if (i915_gem_object_is_proxy(obj)) {
  1469. err = -ENXIO;
  1470. goto out;
  1471. }
  1472. /*
  1473. * Flush and acquire obj->pages so that we are coherent through
  1474. * direct access in memory with previous cached writes through
  1475. * shmemfs and that our cache domain tracking remains valid.
  1476. * For example, if the obj->filp was moved to swap without us
  1477. * being notified and releasing the pages, we would mistakenly
  1478. * continue to assume that the obj remained out of the CPU cached
  1479. * domain.
  1480. */
  1481. err = i915_gem_object_pin_pages(obj);
  1482. if (err)
  1483. goto out;
  1484. err = i915_mutex_lock_interruptible(dev);
  1485. if (err)
  1486. goto out_unpin;
  1487. if (read_domains & I915_GEM_DOMAIN_WC)
  1488. err = i915_gem_object_set_to_wc_domain(obj, write_domain);
  1489. else if (read_domains & I915_GEM_DOMAIN_GTT)
  1490. err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
  1491. else
  1492. err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
  1493. /* And bump the LRU for this access */
  1494. i915_gem_object_bump_inactive_ggtt(obj);
  1495. mutex_unlock(&dev->struct_mutex);
  1496. if (write_domain != 0)
  1497. intel_fb_obj_invalidate(obj,
  1498. fb_write_origin(obj, write_domain));
  1499. out_unpin:
  1500. i915_gem_object_unpin_pages(obj);
  1501. out:
  1502. i915_gem_object_put(obj);
  1503. return err;
  1504. }
  1505. /**
  1506. * Called when user space has done writes to this buffer
  1507. * @dev: drm device
  1508. * @data: ioctl data blob
  1509. * @file: drm file
  1510. */
  1511. int
  1512. i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
  1513. struct drm_file *file)
  1514. {
  1515. struct drm_i915_gem_sw_finish *args = data;
  1516. struct drm_i915_gem_object *obj;
  1517. obj = i915_gem_object_lookup(file, args->handle);
  1518. if (!obj)
  1519. return -ENOENT;
  1520. /*
  1521. * Proxy objects are barred from CPU access, so there is no
  1522. * need to ban sw_finish as it is a nop.
  1523. */
  1524. /* Pinned buffers may be scanout, so flush the cache */
  1525. i915_gem_object_flush_if_display(obj);
  1526. i915_gem_object_put(obj);
  1527. return 0;
  1528. }
  1529. /**
  1530. * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
  1531. * it is mapped to.
  1532. * @dev: drm device
  1533. * @data: ioctl data blob
  1534. * @file: drm file
  1535. *
  1536. * While the mapping holds a reference on the contents of the object, it doesn't
  1537. * imply a ref on the object itself.
  1538. *
  1539. * IMPORTANT:
  1540. *
  1541. * DRM driver writers who look a this function as an example for how to do GEM
  1542. * mmap support, please don't implement mmap support like here. The modern way
  1543. * to implement DRM mmap support is with an mmap offset ioctl (like
  1544. * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
  1545. * That way debug tooling like valgrind will understand what's going on, hiding
  1546. * the mmap call in a driver private ioctl will break that. The i915 driver only
  1547. * does cpu mmaps this way because we didn't know better.
  1548. */
  1549. int
  1550. i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  1551. struct drm_file *file)
  1552. {
  1553. struct drm_i915_gem_mmap *args = data;
  1554. struct drm_i915_gem_object *obj;
  1555. unsigned long addr;
  1556. if (args->flags & ~(I915_MMAP_WC))
  1557. return -EINVAL;
  1558. if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
  1559. return -ENODEV;
  1560. obj = i915_gem_object_lookup(file, args->handle);
  1561. if (!obj)
  1562. return -ENOENT;
  1563. /* prime objects have no backing filp to GEM mmap
  1564. * pages from.
  1565. */
  1566. if (!obj->base.filp) {
  1567. i915_gem_object_put(obj);
  1568. return -ENXIO;
  1569. }
  1570. addr = vm_mmap(obj->base.filp, 0, args->size,
  1571. PROT_READ | PROT_WRITE, MAP_SHARED,
  1572. args->offset);
  1573. if (args->flags & I915_MMAP_WC) {
  1574. struct mm_struct *mm = current->mm;
  1575. struct vm_area_struct *vma;
  1576. if (down_write_killable(&mm->mmap_sem)) {
  1577. i915_gem_object_put(obj);
  1578. return -EINTR;
  1579. }
  1580. vma = find_vma(mm, addr);
  1581. if (vma)
  1582. vma->vm_page_prot =
  1583. pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  1584. else
  1585. addr = -ENOMEM;
  1586. up_write(&mm->mmap_sem);
  1587. /* This may race, but that's ok, it only gets set */
  1588. WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
  1589. }
  1590. i915_gem_object_put(obj);
  1591. if (IS_ERR((void *)addr))
  1592. return addr;
  1593. args->addr_ptr = (uint64_t) addr;
  1594. return 0;
  1595. }
  1596. static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
  1597. {
  1598. return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
  1599. }
  1600. /**
  1601. * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
  1602. *
  1603. * A history of the GTT mmap interface:
  1604. *
  1605. * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
  1606. * aligned and suitable for fencing, and still fit into the available
  1607. * mappable space left by the pinned display objects. A classic problem
  1608. * we called the page-fault-of-doom where we would ping-pong between
  1609. * two objects that could not fit inside the GTT and so the memcpy
  1610. * would page one object in at the expense of the other between every
  1611. * single byte.
  1612. *
  1613. * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
  1614. * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
  1615. * object is too large for the available space (or simply too large
  1616. * for the mappable aperture!), a view is created instead and faulted
  1617. * into userspace. (This view is aligned and sized appropriately for
  1618. * fenced access.)
  1619. *
  1620. * 2 - Recognise WC as a separate cache domain so that we can flush the
  1621. * delayed writes via GTT before performing direct access via WC.
  1622. *
  1623. * Restrictions:
  1624. *
  1625. * * snoopable objects cannot be accessed via the GTT. It can cause machine
  1626. * hangs on some architectures, corruption on others. An attempt to service
  1627. * a GTT page fault from a snoopable object will generate a SIGBUS.
  1628. *
  1629. * * the object must be able to fit into RAM (physical memory, though no
  1630. * limited to the mappable aperture).
  1631. *
  1632. *
  1633. * Caveats:
  1634. *
  1635. * * a new GTT page fault will synchronize rendering from the GPU and flush
  1636. * all data to system memory. Subsequent access will not be synchronized.
  1637. *
  1638. * * all mappings are revoked on runtime device suspend.
  1639. *
  1640. * * there are only 8, 16 or 32 fence registers to share between all users
  1641. * (older machines require fence register for display and blitter access
  1642. * as well). Contention of the fence registers will cause the previous users
  1643. * to be unmapped and any new access will generate new page faults.
  1644. *
  1645. * * running out of memory while servicing a fault may generate a SIGBUS,
  1646. * rather than the expected SIGSEGV.
  1647. */
  1648. int i915_gem_mmap_gtt_version(void)
  1649. {
  1650. return 2;
  1651. }
  1652. static inline struct i915_ggtt_view
  1653. compute_partial_view(struct drm_i915_gem_object *obj,
  1654. pgoff_t page_offset,
  1655. unsigned int chunk)
  1656. {
  1657. struct i915_ggtt_view view;
  1658. if (i915_gem_object_is_tiled(obj))
  1659. chunk = roundup(chunk, tile_row_pages(obj));
  1660. view.type = I915_GGTT_VIEW_PARTIAL;
  1661. view.partial.offset = rounddown(page_offset, chunk);
  1662. view.partial.size =
  1663. min_t(unsigned int, chunk,
  1664. (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
  1665. /* If the partial covers the entire object, just create a normal VMA. */
  1666. if (chunk >= obj->base.size >> PAGE_SHIFT)
  1667. view.type = I915_GGTT_VIEW_NORMAL;
  1668. return view;
  1669. }
  1670. /**
  1671. * i915_gem_fault - fault a page into the GTT
  1672. * @vmf: fault info
  1673. *
  1674. * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
  1675. * from userspace. The fault handler takes care of binding the object to
  1676. * the GTT (if needed), allocating and programming a fence register (again,
  1677. * only if needed based on whether the old reg is still valid or the object
  1678. * is tiled) and inserting a new PTE into the faulting process.
  1679. *
  1680. * Note that the faulting process may involve evicting existing objects
  1681. * from the GTT and/or fence registers to make room. So performance may
  1682. * suffer if the GTT working set is large or there are few fence registers
  1683. * left.
  1684. *
  1685. * The current feature set supported by i915_gem_fault() and thus GTT mmaps
  1686. * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
  1687. */
  1688. vm_fault_t i915_gem_fault(struct vm_fault *vmf)
  1689. {
  1690. #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
  1691. struct vm_area_struct *area = vmf->vma;
  1692. struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
  1693. struct drm_device *dev = obj->base.dev;
  1694. struct drm_i915_private *dev_priv = to_i915(dev);
  1695. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  1696. bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
  1697. struct i915_vma *vma;
  1698. pgoff_t page_offset;
  1699. int ret;
  1700. /* Sanity check that we allow writing into this object */
  1701. if (i915_gem_object_is_readonly(obj) && write)
  1702. return VM_FAULT_SIGBUS;
  1703. /* We don't use vmf->pgoff since that has the fake offset */
  1704. page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
  1705. trace_i915_gem_object_fault(obj, page_offset, true, write);
  1706. /* Try to flush the object off the GPU first without holding the lock.
  1707. * Upon acquiring the lock, we will perform our sanity checks and then
  1708. * repeat the flush holding the lock in the normal manner to catch cases
  1709. * where we are gazumped.
  1710. */
  1711. ret = i915_gem_object_wait(obj,
  1712. I915_WAIT_INTERRUPTIBLE,
  1713. MAX_SCHEDULE_TIMEOUT,
  1714. NULL);
  1715. if (ret)
  1716. goto err;
  1717. ret = i915_gem_object_pin_pages(obj);
  1718. if (ret)
  1719. goto err;
  1720. intel_runtime_pm_get(dev_priv);
  1721. ret = i915_mutex_lock_interruptible(dev);
  1722. if (ret)
  1723. goto err_rpm;
  1724. /* Access to snoopable pages through the GTT is incoherent. */
  1725. if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
  1726. ret = -EFAULT;
  1727. goto err_unlock;
  1728. }
  1729. /* Now pin it into the GTT as needed */
  1730. vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
  1731. PIN_MAPPABLE |
  1732. PIN_NONBLOCK |
  1733. PIN_NONFAULT);
  1734. if (IS_ERR(vma)) {
  1735. /* Use a partial view if it is bigger than available space */
  1736. struct i915_ggtt_view view =
  1737. compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
  1738. unsigned int flags;
  1739. flags = PIN_MAPPABLE;
  1740. if (view.type == I915_GGTT_VIEW_NORMAL)
  1741. flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
  1742. /*
  1743. * Userspace is now writing through an untracked VMA, abandon
  1744. * all hope that the hardware is able to track future writes.
  1745. */
  1746. obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
  1747. vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
  1748. if (IS_ERR(vma) && !view.type) {
  1749. flags = PIN_MAPPABLE;
  1750. view.type = I915_GGTT_VIEW_PARTIAL;
  1751. vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
  1752. }
  1753. }
  1754. if (IS_ERR(vma)) {
  1755. ret = PTR_ERR(vma);
  1756. goto err_unlock;
  1757. }
  1758. ret = i915_gem_object_set_to_gtt_domain(obj, write);
  1759. if (ret)
  1760. goto err_unpin;
  1761. ret = i915_vma_pin_fence(vma);
  1762. if (ret)
  1763. goto err_unpin;
  1764. /* Finally, remap it using the new GTT offset */
  1765. ret = remap_io_mapping(area,
  1766. area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
  1767. (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
  1768. min_t(u64, vma->size, area->vm_end - area->vm_start),
  1769. &ggtt->iomap);
  1770. if (ret)
  1771. goto err_fence;
  1772. /* Mark as being mmapped into userspace for later revocation */
  1773. assert_rpm_wakelock_held(dev_priv);
  1774. if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
  1775. list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
  1776. GEM_BUG_ON(!obj->userfault_count);
  1777. i915_vma_set_ggtt_write(vma);
  1778. err_fence:
  1779. i915_vma_unpin_fence(vma);
  1780. err_unpin:
  1781. __i915_vma_unpin(vma);
  1782. err_unlock:
  1783. mutex_unlock(&dev->struct_mutex);
  1784. err_rpm:
  1785. intel_runtime_pm_put(dev_priv);
  1786. i915_gem_object_unpin_pages(obj);
  1787. err:
  1788. switch (ret) {
  1789. case -EIO:
  1790. /*
  1791. * We eat errors when the gpu is terminally wedged to avoid
  1792. * userspace unduly crashing (gl has no provisions for mmaps to
  1793. * fail). But any other -EIO isn't ours (e.g. swap in failure)
  1794. * and so needs to be reported.
  1795. */
  1796. if (!i915_terminally_wedged(&dev_priv->gpu_error))
  1797. return VM_FAULT_SIGBUS;
  1798. /* else: fall through */
  1799. case -EAGAIN:
  1800. /*
  1801. * EAGAIN means the gpu is hung and we'll wait for the error
  1802. * handler to reset everything when re-faulting in
  1803. * i915_mutex_lock_interruptible.
  1804. */
  1805. case 0:
  1806. case -ERESTARTSYS:
  1807. case -EINTR:
  1808. case -EBUSY:
  1809. /*
  1810. * EBUSY is ok: this just means that another thread
  1811. * already did the job.
  1812. */
  1813. return VM_FAULT_NOPAGE;
  1814. case -ENOMEM:
  1815. return VM_FAULT_OOM;
  1816. case -ENOSPC:
  1817. case -EFAULT:
  1818. return VM_FAULT_SIGBUS;
  1819. default:
  1820. WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
  1821. return VM_FAULT_SIGBUS;
  1822. }
  1823. }
  1824. static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
  1825. {
  1826. struct i915_vma *vma;
  1827. GEM_BUG_ON(!obj->userfault_count);
  1828. obj->userfault_count = 0;
  1829. list_del(&obj->userfault_link);
  1830. drm_vma_node_unmap(&obj->base.vma_node,
  1831. obj->base.dev->anon_inode->i_mapping);
  1832. for_each_ggtt_vma(vma, obj)
  1833. i915_vma_unset_userfault(vma);
  1834. }
  1835. /**
  1836. * i915_gem_release_mmap - remove physical page mappings
  1837. * @obj: obj in question
  1838. *
  1839. * Preserve the reservation of the mmapping with the DRM core code, but
  1840. * relinquish ownership of the pages back to the system.
  1841. *
  1842. * It is vital that we remove the page mapping if we have mapped a tiled
  1843. * object through the GTT and then lose the fence register due to
  1844. * resource pressure. Similarly if the object has been moved out of the
  1845. * aperture, than pages mapped into userspace must be revoked. Removing the
  1846. * mapping will then trigger a page fault on the next user access, allowing
  1847. * fixup by i915_gem_fault().
  1848. */
  1849. void
  1850. i915_gem_release_mmap(struct drm_i915_gem_object *obj)
  1851. {
  1852. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  1853. /* Serialisation between user GTT access and our code depends upon
  1854. * revoking the CPU's PTE whilst the mutex is held. The next user
  1855. * pagefault then has to wait until we release the mutex.
  1856. *
  1857. * Note that RPM complicates somewhat by adding an additional
  1858. * requirement that operations to the GGTT be made holding the RPM
  1859. * wakeref.
  1860. */
  1861. lockdep_assert_held(&i915->drm.struct_mutex);
  1862. intel_runtime_pm_get(i915);
  1863. if (!obj->userfault_count)
  1864. goto out;
  1865. __i915_gem_object_release_mmap(obj);
  1866. /* Ensure that the CPU's PTE are revoked and there are not outstanding
  1867. * memory transactions from userspace before we return. The TLB
  1868. * flushing implied above by changing the PTE above *should* be
  1869. * sufficient, an extra barrier here just provides us with a bit
  1870. * of paranoid documentation about our requirement to serialise
  1871. * memory writes before touching registers / GSM.
  1872. */
  1873. wmb();
  1874. out:
  1875. intel_runtime_pm_put(i915);
  1876. }
  1877. void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
  1878. {
  1879. struct drm_i915_gem_object *obj, *on;
  1880. int i;
  1881. /*
  1882. * Only called during RPM suspend. All users of the userfault_list
  1883. * must be holding an RPM wakeref to ensure that this can not
  1884. * run concurrently with themselves (and use the struct_mutex for
  1885. * protection between themselves).
  1886. */
  1887. list_for_each_entry_safe(obj, on,
  1888. &dev_priv->mm.userfault_list, userfault_link)
  1889. __i915_gem_object_release_mmap(obj);
  1890. /* The fence will be lost when the device powers down. If any were
  1891. * in use by hardware (i.e. they are pinned), we should not be powering
  1892. * down! All other fences will be reacquired by the user upon waking.
  1893. */
  1894. for (i = 0; i < dev_priv->num_fence_regs; i++) {
  1895. struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
  1896. /* Ideally we want to assert that the fence register is not
  1897. * live at this point (i.e. that no piece of code will be
  1898. * trying to write through fence + GTT, as that both violates
  1899. * our tracking of activity and associated locking/barriers,
  1900. * but also is illegal given that the hw is powered down).
  1901. *
  1902. * Previously we used reg->pin_count as a "liveness" indicator.
  1903. * That is not sufficient, and we need a more fine-grained
  1904. * tool if we want to have a sanity check here.
  1905. */
  1906. if (!reg->vma)
  1907. continue;
  1908. GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
  1909. reg->dirty = true;
  1910. }
  1911. }
  1912. static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
  1913. {
  1914. struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
  1915. int err;
  1916. err = drm_gem_create_mmap_offset(&obj->base);
  1917. if (likely(!err))
  1918. return 0;
  1919. /* Attempt to reap some mmap space from dead objects */
  1920. do {
  1921. err = i915_gem_wait_for_idle(dev_priv,
  1922. I915_WAIT_INTERRUPTIBLE,
  1923. MAX_SCHEDULE_TIMEOUT);
  1924. if (err)
  1925. break;
  1926. i915_gem_drain_freed_objects(dev_priv);
  1927. err = drm_gem_create_mmap_offset(&obj->base);
  1928. if (!err)
  1929. break;
  1930. } while (flush_delayed_work(&dev_priv->gt.retire_work));
  1931. return err;
  1932. }
  1933. static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
  1934. {
  1935. drm_gem_free_mmap_offset(&obj->base);
  1936. }
  1937. int
  1938. i915_gem_mmap_gtt(struct drm_file *file,
  1939. struct drm_device *dev,
  1940. uint32_t handle,
  1941. uint64_t *offset)
  1942. {
  1943. struct drm_i915_gem_object *obj;
  1944. int ret;
  1945. obj = i915_gem_object_lookup(file, handle);
  1946. if (!obj)
  1947. return -ENOENT;
  1948. ret = i915_gem_object_create_mmap_offset(obj);
  1949. if (ret == 0)
  1950. *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
  1951. i915_gem_object_put(obj);
  1952. return ret;
  1953. }
  1954. /**
  1955. * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
  1956. * @dev: DRM device
  1957. * @data: GTT mapping ioctl data
  1958. * @file: GEM object info
  1959. *
  1960. * Simply returns the fake offset to userspace so it can mmap it.
  1961. * The mmap call will end up in drm_gem_mmap(), which will set things
  1962. * up so we can get faults in the handler above.
  1963. *
  1964. * The fault handler will take care of binding the object into the GTT
  1965. * (since it may have been evicted to make room for something), allocating
  1966. * a fence register, and mapping the appropriate aperture address into
  1967. * userspace.
  1968. */
  1969. int
  1970. i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
  1971. struct drm_file *file)
  1972. {
  1973. struct drm_i915_gem_mmap_gtt *args = data;
  1974. return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
  1975. }
  1976. /* Immediately discard the backing storage */
  1977. static void
  1978. i915_gem_object_truncate(struct drm_i915_gem_object *obj)
  1979. {
  1980. i915_gem_object_free_mmap_offset(obj);
  1981. if (obj->base.filp == NULL)
  1982. return;
  1983. /* Our goal here is to return as much of the memory as
  1984. * is possible back to the system as we are called from OOM.
  1985. * To do this we must instruct the shmfs to drop all of its
  1986. * backing pages, *now*.
  1987. */
  1988. shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
  1989. obj->mm.madv = __I915_MADV_PURGED;
  1990. obj->mm.pages = ERR_PTR(-EFAULT);
  1991. }
  1992. /* Try to discard unwanted pages */
  1993. void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
  1994. {
  1995. struct address_space *mapping;
  1996. lockdep_assert_held(&obj->mm.lock);
  1997. GEM_BUG_ON(i915_gem_object_has_pages(obj));
  1998. switch (obj->mm.madv) {
  1999. case I915_MADV_DONTNEED:
  2000. i915_gem_object_truncate(obj);
  2001. case __I915_MADV_PURGED:
  2002. return;
  2003. }
  2004. if (obj->base.filp == NULL)
  2005. return;
  2006. mapping = obj->base.filp->f_mapping,
  2007. invalidate_mapping_pages(mapping, 0, (loff_t)-1);
  2008. }
  2009. static void
  2010. i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
  2011. struct sg_table *pages)
  2012. {
  2013. struct sgt_iter sgt_iter;
  2014. struct page *page;
  2015. __i915_gem_object_release_shmem(obj, pages, true);
  2016. i915_gem_gtt_finish_pages(obj, pages);
  2017. if (i915_gem_object_needs_bit17_swizzle(obj))
  2018. i915_gem_object_save_bit_17_swizzle(obj, pages);
  2019. for_each_sgt_page(page, sgt_iter, pages) {
  2020. if (obj->mm.dirty)
  2021. set_page_dirty(page);
  2022. if (obj->mm.madv == I915_MADV_WILLNEED)
  2023. mark_page_accessed(page);
  2024. put_page(page);
  2025. }
  2026. obj->mm.dirty = false;
  2027. sg_free_table(pages);
  2028. kfree(pages);
  2029. }
  2030. static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
  2031. {
  2032. struct radix_tree_iter iter;
  2033. void __rcu **slot;
  2034. rcu_read_lock();
  2035. radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
  2036. radix_tree_delete(&obj->mm.get_page.radix, iter.index);
  2037. rcu_read_unlock();
  2038. }
  2039. static struct sg_table *
  2040. __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
  2041. {
  2042. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  2043. struct sg_table *pages;
  2044. pages = fetch_and_zero(&obj->mm.pages);
  2045. if (!pages)
  2046. return NULL;
  2047. spin_lock(&i915->mm.obj_lock);
  2048. list_del(&obj->mm.link);
  2049. spin_unlock(&i915->mm.obj_lock);
  2050. if (obj->mm.mapping) {
  2051. void *ptr;
  2052. ptr = page_mask_bits(obj->mm.mapping);
  2053. if (is_vmalloc_addr(ptr))
  2054. vunmap(ptr);
  2055. else
  2056. kunmap(kmap_to_page(ptr));
  2057. obj->mm.mapping = NULL;
  2058. }
  2059. __i915_gem_object_reset_page_iter(obj);
  2060. obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
  2061. return pages;
  2062. }
  2063. void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
  2064. enum i915_mm_subclass subclass)
  2065. {
  2066. struct sg_table *pages;
  2067. if (i915_gem_object_has_pinned_pages(obj))
  2068. return;
  2069. GEM_BUG_ON(obj->bind_count);
  2070. if (!i915_gem_object_has_pages(obj))
  2071. return;
  2072. /* May be called by shrinker from within get_pages() (on another bo) */
  2073. mutex_lock_nested(&obj->mm.lock, subclass);
  2074. if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
  2075. goto unlock;
  2076. /*
  2077. * ->put_pages might need to allocate memory for the bit17 swizzle
  2078. * array, hence protect them from being reaped by removing them from gtt
  2079. * lists early.
  2080. */
  2081. pages = __i915_gem_object_unset_pages(obj);
  2082. if (!IS_ERR(pages))
  2083. obj->ops->put_pages(obj, pages);
  2084. unlock:
  2085. mutex_unlock(&obj->mm.lock);
  2086. }
  2087. static bool i915_sg_trim(struct sg_table *orig_st)
  2088. {
  2089. struct sg_table new_st;
  2090. struct scatterlist *sg, *new_sg;
  2091. unsigned int i;
  2092. if (orig_st->nents == orig_st->orig_nents)
  2093. return false;
  2094. if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
  2095. return false;
  2096. new_sg = new_st.sgl;
  2097. for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
  2098. sg_set_page(new_sg, sg_page(sg), sg->length, 0);
  2099. /* called before being DMA mapped, no need to copy sg->dma_* */
  2100. new_sg = sg_next(new_sg);
  2101. }
  2102. GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
  2103. sg_free_table(orig_st);
  2104. *orig_st = new_st;
  2105. return true;
  2106. }
  2107. static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
  2108. {
  2109. struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
  2110. const unsigned long page_count = obj->base.size / PAGE_SIZE;
  2111. unsigned long i;
  2112. struct address_space *mapping;
  2113. struct sg_table *st;
  2114. struct scatterlist *sg;
  2115. struct sgt_iter sgt_iter;
  2116. struct page *page;
  2117. unsigned long last_pfn = 0; /* suppress gcc warning */
  2118. unsigned int max_segment = i915_sg_segment_size();
  2119. unsigned int sg_page_sizes;
  2120. gfp_t noreclaim;
  2121. int ret;
  2122. /* Assert that the object is not currently in any GPU domain. As it
  2123. * wasn't in the GTT, there shouldn't be any way it could have been in
  2124. * a GPU cache
  2125. */
  2126. GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
  2127. GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
  2128. st = kmalloc(sizeof(*st), GFP_KERNEL);
  2129. if (st == NULL)
  2130. return -ENOMEM;
  2131. rebuild_st:
  2132. if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
  2133. kfree(st);
  2134. return -ENOMEM;
  2135. }
  2136. /* Get the list of pages out of our struct file. They'll be pinned
  2137. * at this point until we release them.
  2138. *
  2139. * Fail silently without starting the shrinker
  2140. */
  2141. mapping = obj->base.filp->f_mapping;
  2142. noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
  2143. noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
  2144. sg = st->sgl;
  2145. st->nents = 0;
  2146. sg_page_sizes = 0;
  2147. for (i = 0; i < page_count; i++) {
  2148. const unsigned int shrink[] = {
  2149. I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
  2150. 0,
  2151. }, *s = shrink;
  2152. gfp_t gfp = noreclaim;
  2153. do {
  2154. page = shmem_read_mapping_page_gfp(mapping, i, gfp);
  2155. if (likely(!IS_ERR(page)))
  2156. break;
  2157. if (!*s) {
  2158. ret = PTR_ERR(page);
  2159. goto err_sg;
  2160. }
  2161. i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
  2162. cond_resched();
  2163. /* We've tried hard to allocate the memory by reaping
  2164. * our own buffer, now let the real VM do its job and
  2165. * go down in flames if truly OOM.
  2166. *
  2167. * However, since graphics tend to be disposable,
  2168. * defer the oom here by reporting the ENOMEM back
  2169. * to userspace.
  2170. */
  2171. if (!*s) {
  2172. /* reclaim and warn, but no oom */
  2173. gfp = mapping_gfp_mask(mapping);
  2174. /* Our bo are always dirty and so we require
  2175. * kswapd to reclaim our pages (direct reclaim
  2176. * does not effectively begin pageout of our
  2177. * buffers on its own). However, direct reclaim
  2178. * only waits for kswapd when under allocation
  2179. * congestion. So as a result __GFP_RECLAIM is
  2180. * unreliable and fails to actually reclaim our
  2181. * dirty pages -- unless you try over and over
  2182. * again with !__GFP_NORETRY. However, we still
  2183. * want to fail this allocation rather than
  2184. * trigger the out-of-memory killer and for
  2185. * this we want __GFP_RETRY_MAYFAIL.
  2186. */
  2187. gfp |= __GFP_RETRY_MAYFAIL;
  2188. }
  2189. } while (1);
  2190. if (!i ||
  2191. sg->length >= max_segment ||
  2192. page_to_pfn(page) != last_pfn + 1) {
  2193. if (i) {
  2194. sg_page_sizes |= sg->length;
  2195. sg = sg_next(sg);
  2196. }
  2197. st->nents++;
  2198. sg_set_page(sg, page, PAGE_SIZE, 0);
  2199. } else {
  2200. sg->length += PAGE_SIZE;
  2201. }
  2202. last_pfn = page_to_pfn(page);
  2203. /* Check that the i965g/gm workaround works. */
  2204. WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
  2205. }
  2206. if (sg) { /* loop terminated early; short sg table */
  2207. sg_page_sizes |= sg->length;
  2208. sg_mark_end(sg);
  2209. }
  2210. /* Trim unused sg entries to avoid wasting memory. */
  2211. i915_sg_trim(st);
  2212. ret = i915_gem_gtt_prepare_pages(obj, st);
  2213. if (ret) {
  2214. /* DMA remapping failed? One possible cause is that
  2215. * it could not reserve enough large entries, asking
  2216. * for PAGE_SIZE chunks instead may be helpful.
  2217. */
  2218. if (max_segment > PAGE_SIZE) {
  2219. for_each_sgt_page(page, sgt_iter, st)
  2220. put_page(page);
  2221. sg_free_table(st);
  2222. max_segment = PAGE_SIZE;
  2223. goto rebuild_st;
  2224. } else {
  2225. dev_warn(&dev_priv->drm.pdev->dev,
  2226. "Failed to DMA remap %lu pages\n",
  2227. page_count);
  2228. goto err_pages;
  2229. }
  2230. }
  2231. if (i915_gem_object_needs_bit17_swizzle(obj))
  2232. i915_gem_object_do_bit_17_swizzle(obj, st);
  2233. __i915_gem_object_set_pages(obj, st, sg_page_sizes);
  2234. return 0;
  2235. err_sg:
  2236. sg_mark_end(sg);
  2237. err_pages:
  2238. for_each_sgt_page(page, sgt_iter, st)
  2239. put_page(page);
  2240. sg_free_table(st);
  2241. kfree(st);
  2242. /* shmemfs first checks if there is enough memory to allocate the page
  2243. * and reports ENOSPC should there be insufficient, along with the usual
  2244. * ENOMEM for a genuine allocation failure.
  2245. *
  2246. * We use ENOSPC in our driver to mean that we have run out of aperture
  2247. * space and so want to translate the error from shmemfs back to our
  2248. * usual understanding of ENOMEM.
  2249. */
  2250. if (ret == -ENOSPC)
  2251. ret = -ENOMEM;
  2252. return ret;
  2253. }
  2254. void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
  2255. struct sg_table *pages,
  2256. unsigned int sg_page_sizes)
  2257. {
  2258. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  2259. unsigned long supported = INTEL_INFO(i915)->page_sizes;
  2260. int i;
  2261. lockdep_assert_held(&obj->mm.lock);
  2262. obj->mm.get_page.sg_pos = pages->sgl;
  2263. obj->mm.get_page.sg_idx = 0;
  2264. obj->mm.pages = pages;
  2265. if (i915_gem_object_is_tiled(obj) &&
  2266. i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
  2267. GEM_BUG_ON(obj->mm.quirked);
  2268. __i915_gem_object_pin_pages(obj);
  2269. obj->mm.quirked = true;
  2270. }
  2271. GEM_BUG_ON(!sg_page_sizes);
  2272. obj->mm.page_sizes.phys = sg_page_sizes;
  2273. /*
  2274. * Calculate the supported page-sizes which fit into the given
  2275. * sg_page_sizes. This will give us the page-sizes which we may be able
  2276. * to use opportunistically when later inserting into the GTT. For
  2277. * example if phys=2G, then in theory we should be able to use 1G, 2M,
  2278. * 64K or 4K pages, although in practice this will depend on a number of
  2279. * other factors.
  2280. */
  2281. obj->mm.page_sizes.sg = 0;
  2282. for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
  2283. if (obj->mm.page_sizes.phys & ~0u << i)
  2284. obj->mm.page_sizes.sg |= BIT(i);
  2285. }
  2286. GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
  2287. spin_lock(&i915->mm.obj_lock);
  2288. list_add(&obj->mm.link, &i915->mm.unbound_list);
  2289. spin_unlock(&i915->mm.obj_lock);
  2290. }
  2291. static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
  2292. {
  2293. int err;
  2294. if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
  2295. DRM_DEBUG("Attempting to obtain a purgeable object\n");
  2296. return -EFAULT;
  2297. }
  2298. err = obj->ops->get_pages(obj);
  2299. GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
  2300. return err;
  2301. }
  2302. /* Ensure that the associated pages are gathered from the backing storage
  2303. * and pinned into our object. i915_gem_object_pin_pages() may be called
  2304. * multiple times before they are released by a single call to
  2305. * i915_gem_object_unpin_pages() - once the pages are no longer referenced
  2306. * either as a result of memory pressure (reaping pages under the shrinker)
  2307. * or as the object is itself released.
  2308. */
  2309. int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
  2310. {
  2311. int err;
  2312. err = mutex_lock_interruptible(&obj->mm.lock);
  2313. if (err)
  2314. return err;
  2315. if (unlikely(!i915_gem_object_has_pages(obj))) {
  2316. GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
  2317. err = ____i915_gem_object_get_pages(obj);
  2318. if (err)
  2319. goto unlock;
  2320. smp_mb__before_atomic();
  2321. }
  2322. atomic_inc(&obj->mm.pages_pin_count);
  2323. unlock:
  2324. mutex_unlock(&obj->mm.lock);
  2325. return err;
  2326. }
  2327. /* The 'mapping' part of i915_gem_object_pin_map() below */
  2328. static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
  2329. enum i915_map_type type)
  2330. {
  2331. unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
  2332. struct sg_table *sgt = obj->mm.pages;
  2333. struct sgt_iter sgt_iter;
  2334. struct page *page;
  2335. struct page *stack_pages[32];
  2336. struct page **pages = stack_pages;
  2337. unsigned long i = 0;
  2338. pgprot_t pgprot;
  2339. void *addr;
  2340. /* A single page can always be kmapped */
  2341. if (n_pages == 1 && type == I915_MAP_WB)
  2342. return kmap(sg_page(sgt->sgl));
  2343. if (n_pages > ARRAY_SIZE(stack_pages)) {
  2344. /* Too big for stack -- allocate temporary array instead */
  2345. pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
  2346. if (!pages)
  2347. return NULL;
  2348. }
  2349. for_each_sgt_page(page, sgt_iter, sgt)
  2350. pages[i++] = page;
  2351. /* Check that we have the expected number of pages */
  2352. GEM_BUG_ON(i != n_pages);
  2353. switch (type) {
  2354. default:
  2355. MISSING_CASE(type);
  2356. /* fallthrough to use PAGE_KERNEL anyway */
  2357. case I915_MAP_WB:
  2358. pgprot = PAGE_KERNEL;
  2359. break;
  2360. case I915_MAP_WC:
  2361. pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
  2362. break;
  2363. }
  2364. addr = vmap(pages, n_pages, 0, pgprot);
  2365. if (pages != stack_pages)
  2366. kvfree(pages);
  2367. return addr;
  2368. }
  2369. /* get, pin, and map the pages of the object into kernel space */
  2370. void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
  2371. enum i915_map_type type)
  2372. {
  2373. enum i915_map_type has_type;
  2374. bool pinned;
  2375. void *ptr;
  2376. int ret;
  2377. if (unlikely(!i915_gem_object_has_struct_page(obj)))
  2378. return ERR_PTR(-ENXIO);
  2379. ret = mutex_lock_interruptible(&obj->mm.lock);
  2380. if (ret)
  2381. return ERR_PTR(ret);
  2382. pinned = !(type & I915_MAP_OVERRIDE);
  2383. type &= ~I915_MAP_OVERRIDE;
  2384. if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
  2385. if (unlikely(!i915_gem_object_has_pages(obj))) {
  2386. GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
  2387. ret = ____i915_gem_object_get_pages(obj);
  2388. if (ret)
  2389. goto err_unlock;
  2390. smp_mb__before_atomic();
  2391. }
  2392. atomic_inc(&obj->mm.pages_pin_count);
  2393. pinned = false;
  2394. }
  2395. GEM_BUG_ON(!i915_gem_object_has_pages(obj));
  2396. ptr = page_unpack_bits(obj->mm.mapping, &has_type);
  2397. if (ptr && has_type != type) {
  2398. if (pinned) {
  2399. ret = -EBUSY;
  2400. goto err_unpin;
  2401. }
  2402. if (is_vmalloc_addr(ptr))
  2403. vunmap(ptr);
  2404. else
  2405. kunmap(kmap_to_page(ptr));
  2406. ptr = obj->mm.mapping = NULL;
  2407. }
  2408. if (!ptr) {
  2409. ptr = i915_gem_object_map(obj, type);
  2410. if (!ptr) {
  2411. ret = -ENOMEM;
  2412. goto err_unpin;
  2413. }
  2414. obj->mm.mapping = page_pack_bits(ptr, type);
  2415. }
  2416. out_unlock:
  2417. mutex_unlock(&obj->mm.lock);
  2418. return ptr;
  2419. err_unpin:
  2420. atomic_dec(&obj->mm.pages_pin_count);
  2421. err_unlock:
  2422. ptr = ERR_PTR(ret);
  2423. goto out_unlock;
  2424. }
  2425. static int
  2426. i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
  2427. const struct drm_i915_gem_pwrite *arg)
  2428. {
  2429. struct address_space *mapping = obj->base.filp->f_mapping;
  2430. char __user *user_data = u64_to_user_ptr(arg->data_ptr);
  2431. u64 remain, offset;
  2432. unsigned int pg;
  2433. /* Before we instantiate/pin the backing store for our use, we
  2434. * can prepopulate the shmemfs filp efficiently using a write into
  2435. * the pagecache. We avoid the penalty of instantiating all the
  2436. * pages, important if the user is just writing to a few and never
  2437. * uses the object on the GPU, and using a direct write into shmemfs
  2438. * allows it to avoid the cost of retrieving a page (either swapin
  2439. * or clearing-before-use) before it is overwritten.
  2440. */
  2441. if (i915_gem_object_has_pages(obj))
  2442. return -ENODEV;
  2443. if (obj->mm.madv != I915_MADV_WILLNEED)
  2444. return -EFAULT;
  2445. /* Before the pages are instantiated the object is treated as being
  2446. * in the CPU domain. The pages will be clflushed as required before
  2447. * use, and we can freely write into the pages directly. If userspace
  2448. * races pwrite with any other operation; corruption will ensue -
  2449. * that is userspace's prerogative!
  2450. */
  2451. remain = arg->size;
  2452. offset = arg->offset;
  2453. pg = offset_in_page(offset);
  2454. do {
  2455. unsigned int len, unwritten;
  2456. struct page *page;
  2457. void *data, *vaddr;
  2458. int err;
  2459. len = PAGE_SIZE - pg;
  2460. if (len > remain)
  2461. len = remain;
  2462. err = pagecache_write_begin(obj->base.filp, mapping,
  2463. offset, len, 0,
  2464. &page, &data);
  2465. if (err < 0)
  2466. return err;
  2467. vaddr = kmap(page);
  2468. unwritten = copy_from_user(vaddr + pg, user_data, len);
  2469. kunmap(page);
  2470. err = pagecache_write_end(obj->base.filp, mapping,
  2471. offset, len, len - unwritten,
  2472. page, data);
  2473. if (err < 0)
  2474. return err;
  2475. if (unwritten)
  2476. return -EFAULT;
  2477. remain -= len;
  2478. user_data += len;
  2479. offset += len;
  2480. pg = 0;
  2481. } while (remain);
  2482. return 0;
  2483. }
  2484. static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv,
  2485. const struct i915_gem_context *ctx)
  2486. {
  2487. unsigned int score;
  2488. unsigned long prev_hang;
  2489. if (i915_gem_context_is_banned(ctx))
  2490. score = I915_CLIENT_SCORE_CONTEXT_BAN;
  2491. else
  2492. score = 0;
  2493. prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
  2494. if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
  2495. score += I915_CLIENT_SCORE_HANG_FAST;
  2496. if (score) {
  2497. atomic_add(score, &file_priv->ban_score);
  2498. DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
  2499. ctx->name, score,
  2500. atomic_read(&file_priv->ban_score));
  2501. }
  2502. }
  2503. static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
  2504. {
  2505. unsigned int score;
  2506. bool banned, bannable;
  2507. atomic_inc(&ctx->guilty_count);
  2508. bannable = i915_gem_context_is_bannable(ctx);
  2509. score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
  2510. banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
  2511. /* Cool contexts don't accumulate client ban score */
  2512. if (!bannable)
  2513. return;
  2514. if (banned) {
  2515. DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n",
  2516. ctx->name, atomic_read(&ctx->guilty_count),
  2517. score);
  2518. i915_gem_context_set_banned(ctx);
  2519. }
  2520. if (!IS_ERR_OR_NULL(ctx->file_priv))
  2521. i915_gem_client_mark_guilty(ctx->file_priv, ctx);
  2522. }
  2523. static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
  2524. {
  2525. atomic_inc(&ctx->active_count);
  2526. }
  2527. struct i915_request *
  2528. i915_gem_find_active_request(struct intel_engine_cs *engine)
  2529. {
  2530. struct i915_request *request, *active = NULL;
  2531. unsigned long flags;
  2532. /*
  2533. * We are called by the error capture, reset and to dump engine
  2534. * state at random points in time. In particular, note that neither is
  2535. * crucially ordered with an interrupt. After a hang, the GPU is dead
  2536. * and we assume that no more writes can happen (we waited long enough
  2537. * for all writes that were in transaction to be flushed) - adding an
  2538. * extra delay for a recent interrupt is pointless. Hence, we do
  2539. * not need an engine->irq_seqno_barrier() before the seqno reads.
  2540. * At all other times, we must assume the GPU is still running, but
  2541. * we only care about the snapshot of this moment.
  2542. */
  2543. spin_lock_irqsave(&engine->timeline.lock, flags);
  2544. list_for_each_entry(request, &engine->timeline.requests, link) {
  2545. if (__i915_request_completed(request, request->global_seqno))
  2546. continue;
  2547. active = request;
  2548. break;
  2549. }
  2550. spin_unlock_irqrestore(&engine->timeline.lock, flags);
  2551. return active;
  2552. }
  2553. /*
  2554. * Ensure irq handler finishes, and not run again.
  2555. * Also return the active request so that we only search for it once.
  2556. */
  2557. struct i915_request *
  2558. i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
  2559. {
  2560. struct i915_request *request;
  2561. /*
  2562. * During the reset sequence, we must prevent the engine from
  2563. * entering RC6. As the context state is undefined until we restart
  2564. * the engine, if it does enter RC6 during the reset, the state
  2565. * written to the powercontext is undefined and so we may lose
  2566. * GPU state upon resume, i.e. fail to restart after a reset.
  2567. */
  2568. intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
  2569. request = engine->reset.prepare(engine);
  2570. if (request && request->fence.error == -EIO)
  2571. request = ERR_PTR(-EIO); /* Previous reset failed! */
  2572. return request;
  2573. }
  2574. int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
  2575. {
  2576. struct intel_engine_cs *engine;
  2577. struct i915_request *request;
  2578. enum intel_engine_id id;
  2579. int err = 0;
  2580. for_each_engine(engine, dev_priv, id) {
  2581. request = i915_gem_reset_prepare_engine(engine);
  2582. if (IS_ERR(request)) {
  2583. err = PTR_ERR(request);
  2584. continue;
  2585. }
  2586. engine->hangcheck.active_request = request;
  2587. }
  2588. i915_gem_revoke_fences(dev_priv);
  2589. intel_uc_sanitize(dev_priv);
  2590. return err;
  2591. }
  2592. static void engine_skip_context(struct i915_request *request)
  2593. {
  2594. struct intel_engine_cs *engine = request->engine;
  2595. struct i915_gem_context *hung_ctx = request->gem_context;
  2596. struct i915_timeline *timeline = request->timeline;
  2597. unsigned long flags;
  2598. GEM_BUG_ON(timeline == &engine->timeline);
  2599. spin_lock_irqsave(&engine->timeline.lock, flags);
  2600. spin_lock(&timeline->lock);
  2601. list_for_each_entry_continue(request, &engine->timeline.requests, link)
  2602. if (request->gem_context == hung_ctx)
  2603. i915_request_skip(request, -EIO);
  2604. list_for_each_entry(request, &timeline->requests, link)
  2605. i915_request_skip(request, -EIO);
  2606. spin_unlock(&timeline->lock);
  2607. spin_unlock_irqrestore(&engine->timeline.lock, flags);
  2608. }
  2609. /* Returns the request if it was guilty of the hang */
  2610. static struct i915_request *
  2611. i915_gem_reset_request(struct intel_engine_cs *engine,
  2612. struct i915_request *request,
  2613. bool stalled)
  2614. {
  2615. /* The guilty request will get skipped on a hung engine.
  2616. *
  2617. * Users of client default contexts do not rely on logical
  2618. * state preserved between batches so it is safe to execute
  2619. * queued requests following the hang. Non default contexts
  2620. * rely on preserved state, so skipping a batch loses the
  2621. * evolution of the state and it needs to be considered corrupted.
  2622. * Executing more queued batches on top of corrupted state is
  2623. * risky. But we take the risk by trying to advance through
  2624. * the queued requests in order to make the client behaviour
  2625. * more predictable around resets, by not throwing away random
  2626. * amount of batches it has prepared for execution. Sophisticated
  2627. * clients can use gem_reset_stats_ioctl and dma fence status
  2628. * (exported via sync_file info ioctl on explicit fences) to observe
  2629. * when it loses the context state and should rebuild accordingly.
  2630. *
  2631. * The context ban, and ultimately the client ban, mechanism are safety
  2632. * valves if client submission ends up resulting in nothing more than
  2633. * subsequent hangs.
  2634. */
  2635. if (i915_request_completed(request)) {
  2636. GEM_TRACE("%s pardoned global=%d (fence %llx:%d), current %d\n",
  2637. engine->name, request->global_seqno,
  2638. request->fence.context, request->fence.seqno,
  2639. intel_engine_get_seqno(engine));
  2640. stalled = false;
  2641. }
  2642. if (stalled) {
  2643. i915_gem_context_mark_guilty(request->gem_context);
  2644. i915_request_skip(request, -EIO);
  2645. /* If this context is now banned, skip all pending requests. */
  2646. if (i915_gem_context_is_banned(request->gem_context))
  2647. engine_skip_context(request);
  2648. } else {
  2649. /*
  2650. * Since this is not the hung engine, it may have advanced
  2651. * since the hang declaration. Double check by refinding
  2652. * the active request at the time of the reset.
  2653. */
  2654. request = i915_gem_find_active_request(engine);
  2655. if (request) {
  2656. unsigned long flags;
  2657. i915_gem_context_mark_innocent(request->gem_context);
  2658. dma_fence_set_error(&request->fence, -EAGAIN);
  2659. /* Rewind the engine to replay the incomplete rq */
  2660. spin_lock_irqsave(&engine->timeline.lock, flags);
  2661. request = list_prev_entry(request, link);
  2662. if (&request->link == &engine->timeline.requests)
  2663. request = NULL;
  2664. spin_unlock_irqrestore(&engine->timeline.lock, flags);
  2665. }
  2666. }
  2667. return request;
  2668. }
  2669. void i915_gem_reset_engine(struct intel_engine_cs *engine,
  2670. struct i915_request *request,
  2671. bool stalled)
  2672. {
  2673. /*
  2674. * Make sure this write is visible before we re-enable the interrupt
  2675. * handlers on another CPU, as tasklet_enable() resolves to just
  2676. * a compiler barrier which is insufficient for our purpose here.
  2677. */
  2678. smp_store_mb(engine->irq_posted, 0);
  2679. if (request)
  2680. request = i915_gem_reset_request(engine, request, stalled);
  2681. /* Setup the CS to resume from the breadcrumb of the hung request */
  2682. engine->reset.reset(engine, request);
  2683. }
  2684. void i915_gem_reset(struct drm_i915_private *dev_priv,
  2685. unsigned int stalled_mask)
  2686. {
  2687. struct intel_engine_cs *engine;
  2688. enum intel_engine_id id;
  2689. lockdep_assert_held(&dev_priv->drm.struct_mutex);
  2690. i915_retire_requests(dev_priv);
  2691. for_each_engine(engine, dev_priv, id) {
  2692. struct intel_context *ce;
  2693. i915_gem_reset_engine(engine,
  2694. engine->hangcheck.active_request,
  2695. stalled_mask & ENGINE_MASK(id));
  2696. ce = fetch_and_zero(&engine->last_retired_context);
  2697. if (ce)
  2698. intel_context_unpin(ce);
  2699. /*
  2700. * Ostensibily, we always want a context loaded for powersaving,
  2701. * so if the engine is idle after the reset, send a request
  2702. * to load our scratch kernel_context.
  2703. *
  2704. * More mysteriously, if we leave the engine idle after a reset,
  2705. * the next userspace batch may hang, with what appears to be
  2706. * an incoherent read by the CS (presumably stale TLB). An
  2707. * empty request appears sufficient to paper over the glitch.
  2708. */
  2709. if (intel_engine_is_idle(engine)) {
  2710. struct i915_request *rq;
  2711. rq = i915_request_alloc(engine,
  2712. dev_priv->kernel_context);
  2713. if (!IS_ERR(rq))
  2714. i915_request_add(rq);
  2715. }
  2716. }
  2717. i915_gem_restore_fences(dev_priv);
  2718. }
  2719. void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
  2720. {
  2721. engine->reset.finish(engine);
  2722. intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
  2723. }
  2724. void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
  2725. {
  2726. struct intel_engine_cs *engine;
  2727. enum intel_engine_id id;
  2728. lockdep_assert_held(&dev_priv->drm.struct_mutex);
  2729. for_each_engine(engine, dev_priv, id) {
  2730. engine->hangcheck.active_request = NULL;
  2731. i915_gem_reset_finish_engine(engine);
  2732. }
  2733. }
  2734. static void nop_submit_request(struct i915_request *request)
  2735. {
  2736. GEM_TRACE("%s fence %llx:%d -> -EIO\n",
  2737. request->engine->name,
  2738. request->fence.context, request->fence.seqno);
  2739. dma_fence_set_error(&request->fence, -EIO);
  2740. i915_request_submit(request);
  2741. }
  2742. static void nop_complete_submit_request(struct i915_request *request)
  2743. {
  2744. unsigned long flags;
  2745. GEM_TRACE("%s fence %llx:%d -> -EIO\n",
  2746. request->engine->name,
  2747. request->fence.context, request->fence.seqno);
  2748. dma_fence_set_error(&request->fence, -EIO);
  2749. spin_lock_irqsave(&request->engine->timeline.lock, flags);
  2750. __i915_request_submit(request);
  2751. intel_engine_init_global_seqno(request->engine, request->global_seqno);
  2752. spin_unlock_irqrestore(&request->engine->timeline.lock, flags);
  2753. }
  2754. void i915_gem_set_wedged(struct drm_i915_private *i915)
  2755. {
  2756. struct intel_engine_cs *engine;
  2757. enum intel_engine_id id;
  2758. GEM_TRACE("start\n");
  2759. if (GEM_SHOW_DEBUG()) {
  2760. struct drm_printer p = drm_debug_printer(__func__);
  2761. for_each_engine(engine, i915, id)
  2762. intel_engine_dump(engine, &p, "%s\n", engine->name);
  2763. }
  2764. set_bit(I915_WEDGED, &i915->gpu_error.flags);
  2765. smp_mb__after_atomic();
  2766. /*
  2767. * First, stop submission to hw, but do not yet complete requests by
  2768. * rolling the global seqno forward (since this would complete requests
  2769. * for which we haven't set the fence error to EIO yet).
  2770. */
  2771. for_each_engine(engine, i915, id) {
  2772. i915_gem_reset_prepare_engine(engine);
  2773. engine->submit_request = nop_submit_request;
  2774. engine->schedule = NULL;
  2775. }
  2776. i915->caps.scheduler = 0;
  2777. /* Even if the GPU reset fails, it should still stop the engines */
  2778. intel_gpu_reset(i915, ALL_ENGINES);
  2779. /*
  2780. * Make sure no one is running the old callback before we proceed with
  2781. * cancelling requests and resetting the completion tracking. Otherwise
  2782. * we might submit a request to the hardware which never completes.
  2783. */
  2784. synchronize_rcu();
  2785. for_each_engine(engine, i915, id) {
  2786. /* Mark all executing requests as skipped */
  2787. engine->cancel_requests(engine);
  2788. /*
  2789. * Only once we've force-cancelled all in-flight requests can we
  2790. * start to complete all requests.
  2791. */
  2792. engine->submit_request = nop_complete_submit_request;
  2793. }
  2794. /*
  2795. * Make sure no request can slip through without getting completed by
  2796. * either this call here to intel_engine_init_global_seqno, or the one
  2797. * in nop_complete_submit_request.
  2798. */
  2799. synchronize_rcu();
  2800. for_each_engine(engine, i915, id) {
  2801. unsigned long flags;
  2802. /*
  2803. * Mark all pending requests as complete so that any concurrent
  2804. * (lockless) lookup doesn't try and wait upon the request as we
  2805. * reset it.
  2806. */
  2807. spin_lock_irqsave(&engine->timeline.lock, flags);
  2808. intel_engine_init_global_seqno(engine,
  2809. intel_engine_last_submit(engine));
  2810. spin_unlock_irqrestore(&engine->timeline.lock, flags);
  2811. i915_gem_reset_finish_engine(engine);
  2812. }
  2813. GEM_TRACE("end\n");
  2814. wake_up_all(&i915->gpu_error.reset_queue);
  2815. }
  2816. bool i915_gem_unset_wedged(struct drm_i915_private *i915)
  2817. {
  2818. struct i915_timeline *tl;
  2819. lockdep_assert_held(&i915->drm.struct_mutex);
  2820. if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
  2821. return true;
  2822. GEM_TRACE("start\n");
  2823. /*
  2824. * Before unwedging, make sure that all pending operations
  2825. * are flushed and errored out - we may have requests waiting upon
  2826. * third party fences. We marked all inflight requests as EIO, and
  2827. * every execbuf since returned EIO, for consistency we want all
  2828. * the currently pending requests to also be marked as EIO, which
  2829. * is done inside our nop_submit_request - and so we must wait.
  2830. *
  2831. * No more can be submitted until we reset the wedged bit.
  2832. */
  2833. list_for_each_entry(tl, &i915->gt.timelines, link) {
  2834. struct i915_request *rq;
  2835. rq = i915_gem_active_peek(&tl->last_request,
  2836. &i915->drm.struct_mutex);
  2837. if (!rq)
  2838. continue;
  2839. /*
  2840. * We can't use our normal waiter as we want to
  2841. * avoid recursively trying to handle the current
  2842. * reset. The basic dma_fence_default_wait() installs
  2843. * a callback for dma_fence_signal(), which is
  2844. * triggered by our nop handler (indirectly, the
  2845. * callback enables the signaler thread which is
  2846. * woken by the nop_submit_request() advancing the seqno
  2847. * and when the seqno passes the fence, the signaler
  2848. * then signals the fence waking us up).
  2849. */
  2850. if (dma_fence_default_wait(&rq->fence, true,
  2851. MAX_SCHEDULE_TIMEOUT) < 0)
  2852. return false;
  2853. }
  2854. i915_retire_requests(i915);
  2855. GEM_BUG_ON(i915->gt.active_requests);
  2856. /*
  2857. * Undo nop_submit_request. We prevent all new i915 requests from
  2858. * being queued (by disallowing execbuf whilst wedged) so having
  2859. * waited for all active requests above, we know the system is idle
  2860. * and do not have to worry about a thread being inside
  2861. * engine->submit_request() as we swap over. So unlike installing
  2862. * the nop_submit_request on reset, we can do this from normal
  2863. * context and do not require stop_machine().
  2864. */
  2865. intel_engines_reset_default_submission(i915);
  2866. i915_gem_contexts_lost(i915);
  2867. GEM_TRACE("end\n");
  2868. smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
  2869. clear_bit(I915_WEDGED, &i915->gpu_error.flags);
  2870. return true;
  2871. }
  2872. static void
  2873. i915_gem_retire_work_handler(struct work_struct *work)
  2874. {
  2875. struct drm_i915_private *dev_priv =
  2876. container_of(work, typeof(*dev_priv), gt.retire_work.work);
  2877. struct drm_device *dev = &dev_priv->drm;
  2878. /* Come back later if the device is busy... */
  2879. if (mutex_trylock(&dev->struct_mutex)) {
  2880. i915_retire_requests(dev_priv);
  2881. mutex_unlock(&dev->struct_mutex);
  2882. }
  2883. /*
  2884. * Keep the retire handler running until we are finally idle.
  2885. * We do not need to do this test under locking as in the worst-case
  2886. * we queue the retire worker once too often.
  2887. */
  2888. if (READ_ONCE(dev_priv->gt.awake))
  2889. queue_delayed_work(dev_priv->wq,
  2890. &dev_priv->gt.retire_work,
  2891. round_jiffies_up_relative(HZ));
  2892. }
  2893. static void shrink_caches(struct drm_i915_private *i915)
  2894. {
  2895. /*
  2896. * kmem_cache_shrink() discards empty slabs and reorders partially
  2897. * filled slabs to prioritise allocating from the mostly full slabs,
  2898. * with the aim of reducing fragmentation.
  2899. */
  2900. kmem_cache_shrink(i915->priorities);
  2901. kmem_cache_shrink(i915->dependencies);
  2902. kmem_cache_shrink(i915->requests);
  2903. kmem_cache_shrink(i915->luts);
  2904. kmem_cache_shrink(i915->vmas);
  2905. kmem_cache_shrink(i915->objects);
  2906. }
  2907. struct sleep_rcu_work {
  2908. union {
  2909. struct rcu_head rcu;
  2910. struct work_struct work;
  2911. };
  2912. struct drm_i915_private *i915;
  2913. unsigned int epoch;
  2914. };
  2915. static inline bool
  2916. same_epoch(struct drm_i915_private *i915, unsigned int epoch)
  2917. {
  2918. /*
  2919. * There is a small chance that the epoch wrapped since we started
  2920. * sleeping. If we assume that epoch is at least a u32, then it will
  2921. * take at least 2^32 * 100ms for it to wrap, or about 326 years.
  2922. */
  2923. return epoch == READ_ONCE(i915->gt.epoch);
  2924. }
  2925. static void __sleep_work(struct work_struct *work)
  2926. {
  2927. struct sleep_rcu_work *s = container_of(work, typeof(*s), work);
  2928. struct drm_i915_private *i915 = s->i915;
  2929. unsigned int epoch = s->epoch;
  2930. kfree(s);
  2931. if (same_epoch(i915, epoch))
  2932. shrink_caches(i915);
  2933. }
  2934. static void __sleep_rcu(struct rcu_head *rcu)
  2935. {
  2936. struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
  2937. struct drm_i915_private *i915 = s->i915;
  2938. if (same_epoch(i915, s->epoch)) {
  2939. INIT_WORK(&s->work, __sleep_work);
  2940. queue_work(i915->wq, &s->work);
  2941. } else {
  2942. kfree(s);
  2943. }
  2944. }
  2945. static inline bool
  2946. new_requests_since_last_retire(const struct drm_i915_private *i915)
  2947. {
  2948. return (READ_ONCE(i915->gt.active_requests) ||
  2949. work_pending(&i915->gt.idle_work.work));
  2950. }
  2951. static void assert_kernel_context_is_current(struct drm_i915_private *i915)
  2952. {
  2953. struct intel_engine_cs *engine;
  2954. enum intel_engine_id id;
  2955. if (i915_terminally_wedged(&i915->gpu_error))
  2956. return;
  2957. GEM_BUG_ON(i915->gt.active_requests);
  2958. for_each_engine(engine, i915, id) {
  2959. GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
  2960. GEM_BUG_ON(engine->last_retired_context !=
  2961. to_intel_context(i915->kernel_context, engine));
  2962. }
  2963. }
  2964. static void
  2965. i915_gem_idle_work_handler(struct work_struct *work)
  2966. {
  2967. struct drm_i915_private *dev_priv =
  2968. container_of(work, typeof(*dev_priv), gt.idle_work.work);
  2969. unsigned int epoch = I915_EPOCH_INVALID;
  2970. bool rearm_hangcheck;
  2971. if (!READ_ONCE(dev_priv->gt.awake))
  2972. return;
  2973. if (READ_ONCE(dev_priv->gt.active_requests))
  2974. return;
  2975. /*
  2976. * Flush out the last user context, leaving only the pinned
  2977. * kernel context resident. When we are idling on the kernel_context,
  2978. * no more new requests (with a context switch) are emitted and we
  2979. * can finally rest. A consequence is that the idle work handler is
  2980. * always called at least twice before idling (and if the system is
  2981. * idle that implies a round trip through the retire worker).
  2982. */
  2983. mutex_lock(&dev_priv->drm.struct_mutex);
  2984. i915_gem_switch_to_kernel_context(dev_priv);
  2985. mutex_unlock(&dev_priv->drm.struct_mutex);
  2986. GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n",
  2987. READ_ONCE(dev_priv->gt.active_requests));
  2988. /*
  2989. * Wait for last execlists context complete, but bail out in case a
  2990. * new request is submitted. As we don't trust the hardware, we
  2991. * continue on if the wait times out. This is necessary to allow
  2992. * the machine to suspend even if the hardware dies, and we will
  2993. * try to recover in resume (after depriving the hardware of power,
  2994. * it may be in a better mmod).
  2995. */
  2996. __wait_for(if (new_requests_since_last_retire(dev_priv)) return,
  2997. intel_engines_are_idle(dev_priv),
  2998. I915_IDLE_ENGINES_TIMEOUT * 1000,
  2999. 10, 500);
  3000. rearm_hangcheck =
  3001. cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
  3002. if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
  3003. /* Currently busy, come back later */
  3004. mod_delayed_work(dev_priv->wq,
  3005. &dev_priv->gt.idle_work,
  3006. msecs_to_jiffies(50));
  3007. goto out_rearm;
  3008. }
  3009. /*
  3010. * New request retired after this work handler started, extend active
  3011. * period until next instance of the work.
  3012. */
  3013. if (new_requests_since_last_retire(dev_priv))
  3014. goto out_unlock;
  3015. epoch = __i915_gem_park(dev_priv);
  3016. assert_kernel_context_is_current(dev_priv);
  3017. rearm_hangcheck = false;
  3018. out_unlock:
  3019. mutex_unlock(&dev_priv->drm.struct_mutex);
  3020. out_rearm:
  3021. if (rearm_hangcheck) {
  3022. GEM_BUG_ON(!dev_priv->gt.awake);
  3023. i915_queue_hangcheck(dev_priv);
  3024. }
  3025. /*
  3026. * When we are idle, it is an opportune time to reap our caches.
  3027. * However, we have many objects that utilise RCU and the ordered
  3028. * i915->wq that this work is executing on. To try and flush any
  3029. * pending frees now we are idle, we first wait for an RCU grace
  3030. * period, and then queue a task (that will run last on the wq) to
  3031. * shrink and re-optimize the caches.
  3032. */
  3033. if (same_epoch(dev_priv, epoch)) {
  3034. struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
  3035. if (s) {
  3036. s->i915 = dev_priv;
  3037. s->epoch = epoch;
  3038. call_rcu(&s->rcu, __sleep_rcu);
  3039. }
  3040. }
  3041. }
  3042. void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
  3043. {
  3044. struct drm_i915_private *i915 = to_i915(gem->dev);
  3045. struct drm_i915_gem_object *obj = to_intel_bo(gem);
  3046. struct drm_i915_file_private *fpriv = file->driver_priv;
  3047. struct i915_lut_handle *lut, *ln;
  3048. mutex_lock(&i915->drm.struct_mutex);
  3049. list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
  3050. struct i915_gem_context *ctx = lut->ctx;
  3051. struct i915_vma *vma;
  3052. GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
  3053. if (ctx->file_priv != fpriv)
  3054. continue;
  3055. vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
  3056. GEM_BUG_ON(vma->obj != obj);
  3057. /* We allow the process to have multiple handles to the same
  3058. * vma, in the same fd namespace, by virtue of flink/open.
  3059. */
  3060. GEM_BUG_ON(!vma->open_count);
  3061. if (!--vma->open_count && !i915_vma_is_ggtt(vma))
  3062. i915_vma_close(vma);
  3063. list_del(&lut->obj_link);
  3064. list_del(&lut->ctx_link);
  3065. kmem_cache_free(i915->luts, lut);
  3066. __i915_gem_object_release_unless_active(obj);
  3067. }
  3068. mutex_unlock(&i915->drm.struct_mutex);
  3069. }
  3070. static unsigned long to_wait_timeout(s64 timeout_ns)
  3071. {
  3072. if (timeout_ns < 0)
  3073. return MAX_SCHEDULE_TIMEOUT;
  3074. if (timeout_ns == 0)
  3075. return 0;
  3076. return nsecs_to_jiffies_timeout(timeout_ns);
  3077. }
  3078. /**
  3079. * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
  3080. * @dev: drm device pointer
  3081. * @data: ioctl data blob
  3082. * @file: drm file pointer
  3083. *
  3084. * Returns 0 if successful, else an error is returned with the remaining time in
  3085. * the timeout parameter.
  3086. * -ETIME: object is still busy after timeout
  3087. * -ERESTARTSYS: signal interrupted the wait
  3088. * -ENONENT: object doesn't exist
  3089. * Also possible, but rare:
  3090. * -EAGAIN: incomplete, restart syscall
  3091. * -ENOMEM: damn
  3092. * -ENODEV: Internal IRQ fail
  3093. * -E?: The add request failed
  3094. *
  3095. * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
  3096. * non-zero timeout parameter the wait ioctl will wait for the given number of
  3097. * nanoseconds on an object becoming unbusy. Since the wait itself does so
  3098. * without holding struct_mutex the object may become re-busied before this
  3099. * function completes. A similar but shorter * race condition exists in the busy
  3100. * ioctl
  3101. */
  3102. int
  3103. i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
  3104. {
  3105. struct drm_i915_gem_wait *args = data;
  3106. struct drm_i915_gem_object *obj;
  3107. ktime_t start;
  3108. long ret;
  3109. if (args->flags != 0)
  3110. return -EINVAL;
  3111. obj = i915_gem_object_lookup(file, args->bo_handle);
  3112. if (!obj)
  3113. return -ENOENT;
  3114. start = ktime_get();
  3115. ret = i915_gem_object_wait(obj,
  3116. I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
  3117. to_wait_timeout(args->timeout_ns),
  3118. to_rps_client(file));
  3119. if (args->timeout_ns > 0) {
  3120. args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
  3121. if (args->timeout_ns < 0)
  3122. args->timeout_ns = 0;
  3123. /*
  3124. * Apparently ktime isn't accurate enough and occasionally has a
  3125. * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
  3126. * things up to make the test happy. We allow up to 1 jiffy.
  3127. *
  3128. * This is a regression from the timespec->ktime conversion.
  3129. */
  3130. if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
  3131. args->timeout_ns = 0;
  3132. /* Asked to wait beyond the jiffie/scheduler precision? */
  3133. if (ret == -ETIME && args->timeout_ns)
  3134. ret = -EAGAIN;
  3135. }
  3136. i915_gem_object_put(obj);
  3137. return ret;
  3138. }
  3139. static long wait_for_timeline(struct i915_timeline *tl,
  3140. unsigned int flags, long timeout)
  3141. {
  3142. struct i915_request *rq;
  3143. rq = i915_gem_active_get_unlocked(&tl->last_request);
  3144. if (!rq)
  3145. return timeout;
  3146. /*
  3147. * "Race-to-idle".
  3148. *
  3149. * Switching to the kernel context is often used a synchronous
  3150. * step prior to idling, e.g. in suspend for flushing all
  3151. * current operations to memory before sleeping. These we
  3152. * want to complete as quickly as possible to avoid prolonged
  3153. * stalls, so allow the gpu to boost to maximum clocks.
  3154. */
  3155. if (flags & I915_WAIT_FOR_IDLE_BOOST)
  3156. gen6_rps_boost(rq, NULL);
  3157. timeout = i915_request_wait(rq, flags, timeout);
  3158. i915_request_put(rq);
  3159. return timeout;
  3160. }
  3161. static int wait_for_engines(struct drm_i915_private *i915)
  3162. {
  3163. if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
  3164. dev_err(i915->drm.dev,
  3165. "Failed to idle engines, declaring wedged!\n");
  3166. GEM_TRACE_DUMP();
  3167. i915_gem_set_wedged(i915);
  3168. return -EIO;
  3169. }
  3170. return 0;
  3171. }
  3172. int i915_gem_wait_for_idle(struct drm_i915_private *i915,
  3173. unsigned int flags, long timeout)
  3174. {
  3175. GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
  3176. flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
  3177. timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
  3178. /* If the device is asleep, we have no requests outstanding */
  3179. if (!READ_ONCE(i915->gt.awake))
  3180. return 0;
  3181. if (flags & I915_WAIT_LOCKED) {
  3182. struct i915_timeline *tl;
  3183. int err;
  3184. lockdep_assert_held(&i915->drm.struct_mutex);
  3185. list_for_each_entry(tl, &i915->gt.timelines, link) {
  3186. timeout = wait_for_timeline(tl, flags, timeout);
  3187. if (timeout < 0)
  3188. return timeout;
  3189. }
  3190. err = wait_for_engines(i915);
  3191. if (err)
  3192. return err;
  3193. i915_retire_requests(i915);
  3194. GEM_BUG_ON(i915->gt.active_requests);
  3195. } else {
  3196. struct intel_engine_cs *engine;
  3197. enum intel_engine_id id;
  3198. for_each_engine(engine, i915, id) {
  3199. struct i915_timeline *tl = &engine->timeline;
  3200. timeout = wait_for_timeline(tl, flags, timeout);
  3201. if (timeout < 0)
  3202. return timeout;
  3203. }
  3204. }
  3205. return 0;
  3206. }
  3207. static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
  3208. {
  3209. /*
  3210. * We manually flush the CPU domain so that we can override and
  3211. * force the flush for the display, and perform it asyncrhonously.
  3212. */
  3213. flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
  3214. if (obj->cache_dirty)
  3215. i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
  3216. obj->write_domain = 0;
  3217. }
  3218. void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
  3219. {
  3220. if (!READ_ONCE(obj->pin_global))
  3221. return;
  3222. mutex_lock(&obj->base.dev->struct_mutex);
  3223. __i915_gem_object_flush_for_display(obj);
  3224. mutex_unlock(&obj->base.dev->struct_mutex);
  3225. }
  3226. /**
  3227. * Moves a single object to the WC read, and possibly write domain.
  3228. * @obj: object to act on
  3229. * @write: ask for write access or read only
  3230. *
  3231. * This function returns when the move is complete, including waiting on
  3232. * flushes to occur.
  3233. */
  3234. int
  3235. i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
  3236. {
  3237. int ret;
  3238. lockdep_assert_held(&obj->base.dev->struct_mutex);
  3239. ret = i915_gem_object_wait(obj,
  3240. I915_WAIT_INTERRUPTIBLE |
  3241. I915_WAIT_LOCKED |
  3242. (write ? I915_WAIT_ALL : 0),
  3243. MAX_SCHEDULE_TIMEOUT,
  3244. NULL);
  3245. if (ret)
  3246. return ret;
  3247. if (obj->write_domain == I915_GEM_DOMAIN_WC)
  3248. return 0;
  3249. /* Flush and acquire obj->pages so that we are coherent through
  3250. * direct access in memory with previous cached writes through
  3251. * shmemfs and that our cache domain tracking remains valid.
  3252. * For example, if the obj->filp was moved to swap without us
  3253. * being notified and releasing the pages, we would mistakenly
  3254. * continue to assume that the obj remained out of the CPU cached
  3255. * domain.
  3256. */
  3257. ret = i915_gem_object_pin_pages(obj);
  3258. if (ret)
  3259. return ret;
  3260. flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
  3261. /* Serialise direct access to this object with the barriers for
  3262. * coherent writes from the GPU, by effectively invalidating the
  3263. * WC domain upon first access.
  3264. */
  3265. if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
  3266. mb();
  3267. /* It should now be out of any other write domains, and we can update
  3268. * the domain values for our changes.
  3269. */
  3270. GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
  3271. obj->read_domains |= I915_GEM_DOMAIN_WC;
  3272. if (write) {
  3273. obj->read_domains = I915_GEM_DOMAIN_WC;
  3274. obj->write_domain = I915_GEM_DOMAIN_WC;
  3275. obj->mm.dirty = true;
  3276. }
  3277. i915_gem_object_unpin_pages(obj);
  3278. return 0;
  3279. }
  3280. /**
  3281. * Moves a single object to the GTT read, and possibly write domain.
  3282. * @obj: object to act on
  3283. * @write: ask for write access or read only
  3284. *
  3285. * This function returns when the move is complete, including waiting on
  3286. * flushes to occur.
  3287. */
  3288. int
  3289. i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
  3290. {
  3291. int ret;
  3292. lockdep_assert_held(&obj->base.dev->struct_mutex);
  3293. ret = i915_gem_object_wait(obj,
  3294. I915_WAIT_INTERRUPTIBLE |
  3295. I915_WAIT_LOCKED |
  3296. (write ? I915_WAIT_ALL : 0),
  3297. MAX_SCHEDULE_TIMEOUT,
  3298. NULL);
  3299. if (ret)
  3300. return ret;
  3301. if (obj->write_domain == I915_GEM_DOMAIN_GTT)
  3302. return 0;
  3303. /* Flush and acquire obj->pages so that we are coherent through
  3304. * direct access in memory with previous cached writes through
  3305. * shmemfs and that our cache domain tracking remains valid.
  3306. * For example, if the obj->filp was moved to swap without us
  3307. * being notified and releasing the pages, we would mistakenly
  3308. * continue to assume that the obj remained out of the CPU cached
  3309. * domain.
  3310. */
  3311. ret = i915_gem_object_pin_pages(obj);
  3312. if (ret)
  3313. return ret;
  3314. flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
  3315. /* Serialise direct access to this object with the barriers for
  3316. * coherent writes from the GPU, by effectively invalidating the
  3317. * GTT domain upon first access.
  3318. */
  3319. if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
  3320. mb();
  3321. /* It should now be out of any other write domains, and we can update
  3322. * the domain values for our changes.
  3323. */
  3324. GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
  3325. obj->read_domains |= I915_GEM_DOMAIN_GTT;
  3326. if (write) {
  3327. obj->read_domains = I915_GEM_DOMAIN_GTT;
  3328. obj->write_domain = I915_GEM_DOMAIN_GTT;
  3329. obj->mm.dirty = true;
  3330. }
  3331. i915_gem_object_unpin_pages(obj);
  3332. return 0;
  3333. }
  3334. /**
  3335. * Changes the cache-level of an object across all VMA.
  3336. * @obj: object to act on
  3337. * @cache_level: new cache level to set for the object
  3338. *
  3339. * After this function returns, the object will be in the new cache-level
  3340. * across all GTT and the contents of the backing storage will be coherent,
  3341. * with respect to the new cache-level. In order to keep the backing storage
  3342. * coherent for all users, we only allow a single cache level to be set
  3343. * globally on the object and prevent it from being changed whilst the
  3344. * hardware is reading from the object. That is if the object is currently
  3345. * on the scanout it will be set to uncached (or equivalent display
  3346. * cache coherency) and all non-MOCS GPU access will also be uncached so
  3347. * that all direct access to the scanout remains coherent.
  3348. */
  3349. int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
  3350. enum i915_cache_level cache_level)
  3351. {
  3352. struct i915_vma *vma;
  3353. int ret;
  3354. lockdep_assert_held(&obj->base.dev->struct_mutex);
  3355. if (obj->cache_level == cache_level)
  3356. return 0;
  3357. /* Inspect the list of currently bound VMA and unbind any that would
  3358. * be invalid given the new cache-level. This is principally to
  3359. * catch the issue of the CS prefetch crossing page boundaries and
  3360. * reading an invalid PTE on older architectures.
  3361. */
  3362. restart:
  3363. list_for_each_entry(vma, &obj->vma_list, obj_link) {
  3364. if (!drm_mm_node_allocated(&vma->node))
  3365. continue;
  3366. if (i915_vma_is_pinned(vma)) {
  3367. DRM_DEBUG("can not change the cache level of pinned objects\n");
  3368. return -EBUSY;
  3369. }
  3370. if (!i915_vma_is_closed(vma) &&
  3371. i915_gem_valid_gtt_space(vma, cache_level))
  3372. continue;
  3373. ret = i915_vma_unbind(vma);
  3374. if (ret)
  3375. return ret;
  3376. /* As unbinding may affect other elements in the
  3377. * obj->vma_list (due to side-effects from retiring
  3378. * an active vma), play safe and restart the iterator.
  3379. */
  3380. goto restart;
  3381. }
  3382. /* We can reuse the existing drm_mm nodes but need to change the
  3383. * cache-level on the PTE. We could simply unbind them all and
  3384. * rebind with the correct cache-level on next use. However since
  3385. * we already have a valid slot, dma mapping, pages etc, we may as
  3386. * rewrite the PTE in the belief that doing so tramples upon less
  3387. * state and so involves less work.
  3388. */
  3389. if (obj->bind_count) {
  3390. /* Before we change the PTE, the GPU must not be accessing it.
  3391. * If we wait upon the object, we know that all the bound
  3392. * VMA are no longer active.
  3393. */
  3394. ret = i915_gem_object_wait(obj,
  3395. I915_WAIT_INTERRUPTIBLE |
  3396. I915_WAIT_LOCKED |
  3397. I915_WAIT_ALL,
  3398. MAX_SCHEDULE_TIMEOUT,
  3399. NULL);
  3400. if (ret)
  3401. return ret;
  3402. if (!HAS_LLC(to_i915(obj->base.dev)) &&
  3403. cache_level != I915_CACHE_NONE) {
  3404. /* Access to snoopable pages through the GTT is
  3405. * incoherent and on some machines causes a hard
  3406. * lockup. Relinquish the CPU mmaping to force
  3407. * userspace to refault in the pages and we can
  3408. * then double check if the GTT mapping is still
  3409. * valid for that pointer access.
  3410. */
  3411. i915_gem_release_mmap(obj);
  3412. /* As we no longer need a fence for GTT access,
  3413. * we can relinquish it now (and so prevent having
  3414. * to steal a fence from someone else on the next
  3415. * fence request). Note GPU activity would have
  3416. * dropped the fence as all snoopable access is
  3417. * supposed to be linear.
  3418. */
  3419. for_each_ggtt_vma(vma, obj) {
  3420. ret = i915_vma_put_fence(vma);
  3421. if (ret)
  3422. return ret;
  3423. }
  3424. } else {
  3425. /* We either have incoherent backing store and
  3426. * so no GTT access or the architecture is fully
  3427. * coherent. In such cases, existing GTT mmaps
  3428. * ignore the cache bit in the PTE and we can
  3429. * rewrite it without confusing the GPU or having
  3430. * to force userspace to fault back in its mmaps.
  3431. */
  3432. }
  3433. list_for_each_entry(vma, &obj->vma_list, obj_link) {
  3434. if (!drm_mm_node_allocated(&vma->node))
  3435. continue;
  3436. ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
  3437. if (ret)
  3438. return ret;
  3439. }
  3440. }
  3441. list_for_each_entry(vma, &obj->vma_list, obj_link)
  3442. vma->node.color = cache_level;
  3443. i915_gem_object_set_cache_coherency(obj, cache_level);
  3444. obj->cache_dirty = true; /* Always invalidate stale cachelines */
  3445. return 0;
  3446. }
  3447. int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
  3448. struct drm_file *file)
  3449. {
  3450. struct drm_i915_gem_caching *args = data;
  3451. struct drm_i915_gem_object *obj;
  3452. int err = 0;
  3453. rcu_read_lock();
  3454. obj = i915_gem_object_lookup_rcu(file, args->handle);
  3455. if (!obj) {
  3456. err = -ENOENT;
  3457. goto out;
  3458. }
  3459. switch (obj->cache_level) {
  3460. case I915_CACHE_LLC:
  3461. case I915_CACHE_L3_LLC:
  3462. args->caching = I915_CACHING_CACHED;
  3463. break;
  3464. case I915_CACHE_WT:
  3465. args->caching = I915_CACHING_DISPLAY;
  3466. break;
  3467. default:
  3468. args->caching = I915_CACHING_NONE;
  3469. break;
  3470. }
  3471. out:
  3472. rcu_read_unlock();
  3473. return err;
  3474. }
  3475. int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
  3476. struct drm_file *file)
  3477. {
  3478. struct drm_i915_private *i915 = to_i915(dev);
  3479. struct drm_i915_gem_caching *args = data;
  3480. struct drm_i915_gem_object *obj;
  3481. enum i915_cache_level level;
  3482. int ret = 0;
  3483. switch (args->caching) {
  3484. case I915_CACHING_NONE:
  3485. level = I915_CACHE_NONE;
  3486. break;
  3487. case I915_CACHING_CACHED:
  3488. /*
  3489. * Due to a HW issue on BXT A stepping, GPU stores via a
  3490. * snooped mapping may leave stale data in a corresponding CPU
  3491. * cacheline, whereas normally such cachelines would get
  3492. * invalidated.
  3493. */
  3494. if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
  3495. return -ENODEV;
  3496. level = I915_CACHE_LLC;
  3497. break;
  3498. case I915_CACHING_DISPLAY:
  3499. level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
  3500. break;
  3501. default:
  3502. return -EINVAL;
  3503. }
  3504. obj = i915_gem_object_lookup(file, args->handle);
  3505. if (!obj)
  3506. return -ENOENT;
  3507. /*
  3508. * The caching mode of proxy object is handled by its generator, and
  3509. * not allowed to be changed by userspace.
  3510. */
  3511. if (i915_gem_object_is_proxy(obj)) {
  3512. ret = -ENXIO;
  3513. goto out;
  3514. }
  3515. if (obj->cache_level == level)
  3516. goto out;
  3517. ret = i915_gem_object_wait(obj,
  3518. I915_WAIT_INTERRUPTIBLE,
  3519. MAX_SCHEDULE_TIMEOUT,
  3520. to_rps_client(file));
  3521. if (ret)
  3522. goto out;
  3523. ret = i915_mutex_lock_interruptible(dev);
  3524. if (ret)
  3525. goto out;
  3526. ret = i915_gem_object_set_cache_level(obj, level);
  3527. mutex_unlock(&dev->struct_mutex);
  3528. out:
  3529. i915_gem_object_put(obj);
  3530. return ret;
  3531. }
  3532. /*
  3533. * Prepare buffer for display plane (scanout, cursors, etc). Can be called from
  3534. * an uninterruptible phase (modesetting) and allows any flushes to be pipelined
  3535. * (for pageflips). We only flush the caches while preparing the buffer for
  3536. * display, the callers are responsible for frontbuffer flush.
  3537. */
  3538. struct i915_vma *
  3539. i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
  3540. u32 alignment,
  3541. const struct i915_ggtt_view *view,
  3542. unsigned int flags)
  3543. {
  3544. struct i915_vma *vma;
  3545. int ret;
  3546. lockdep_assert_held(&obj->base.dev->struct_mutex);
  3547. /* Mark the global pin early so that we account for the
  3548. * display coherency whilst setting up the cache domains.
  3549. */
  3550. obj->pin_global++;
  3551. /* The display engine is not coherent with the LLC cache on gen6. As
  3552. * a result, we make sure that the pinning that is about to occur is
  3553. * done with uncached PTEs. This is lowest common denominator for all
  3554. * chipsets.
  3555. *
  3556. * However for gen6+, we could do better by using the GFDT bit instead
  3557. * of uncaching, which would allow us to flush all the LLC-cached data
  3558. * with that bit in the PTE to main memory with just one PIPE_CONTROL.
  3559. */
  3560. ret = i915_gem_object_set_cache_level(obj,
  3561. HAS_WT(to_i915(obj->base.dev)) ?
  3562. I915_CACHE_WT : I915_CACHE_NONE);
  3563. if (ret) {
  3564. vma = ERR_PTR(ret);
  3565. goto err_unpin_global;
  3566. }
  3567. /* As the user may map the buffer once pinned in the display plane
  3568. * (e.g. libkms for the bootup splash), we have to ensure that we
  3569. * always use map_and_fenceable for all scanout buffers. However,
  3570. * it may simply be too big to fit into mappable, in which case
  3571. * put it anyway and hope that userspace can cope (but always first
  3572. * try to preserve the existing ABI).
  3573. */
  3574. vma = ERR_PTR(-ENOSPC);
  3575. if ((flags & PIN_MAPPABLE) == 0 &&
  3576. (!view || view->type == I915_GGTT_VIEW_NORMAL))
  3577. vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
  3578. flags |
  3579. PIN_MAPPABLE |
  3580. PIN_NONBLOCK);
  3581. if (IS_ERR(vma))
  3582. vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
  3583. if (IS_ERR(vma))
  3584. goto err_unpin_global;
  3585. vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
  3586. __i915_gem_object_flush_for_display(obj);
  3587. /* It should now be out of any other write domains, and we can update
  3588. * the domain values for our changes.
  3589. */
  3590. obj->read_domains |= I915_GEM_DOMAIN_GTT;
  3591. return vma;
  3592. err_unpin_global:
  3593. obj->pin_global--;
  3594. return vma;
  3595. }
  3596. void
  3597. i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
  3598. {
  3599. lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
  3600. if (WARN_ON(vma->obj->pin_global == 0))
  3601. return;
  3602. if (--vma->obj->pin_global == 0)
  3603. vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
  3604. /* Bump the LRU to try and avoid premature eviction whilst flipping */
  3605. i915_gem_object_bump_inactive_ggtt(vma->obj);
  3606. i915_vma_unpin(vma);
  3607. }
  3608. /**
  3609. * Moves a single object to the CPU read, and possibly write domain.
  3610. * @obj: object to act on
  3611. * @write: requesting write or read-only access
  3612. *
  3613. * This function returns when the move is complete, including waiting on
  3614. * flushes to occur.
  3615. */
  3616. int
  3617. i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
  3618. {
  3619. int ret;
  3620. lockdep_assert_held(&obj->base.dev->struct_mutex);
  3621. ret = i915_gem_object_wait(obj,
  3622. I915_WAIT_INTERRUPTIBLE |
  3623. I915_WAIT_LOCKED |
  3624. (write ? I915_WAIT_ALL : 0),
  3625. MAX_SCHEDULE_TIMEOUT,
  3626. NULL);
  3627. if (ret)
  3628. return ret;
  3629. flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
  3630. /* Flush the CPU cache if it's still invalid. */
  3631. if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
  3632. i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
  3633. obj->read_domains |= I915_GEM_DOMAIN_CPU;
  3634. }
  3635. /* It should now be out of any other write domains, and we can update
  3636. * the domain values for our changes.
  3637. */
  3638. GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
  3639. /* If we're writing through the CPU, then the GPU read domains will
  3640. * need to be invalidated at next use.
  3641. */
  3642. if (write)
  3643. __start_cpu_write(obj);
  3644. return 0;
  3645. }
  3646. /* Throttle our rendering by waiting until the ring has completed our requests
  3647. * emitted over 20 msec ago.
  3648. *
  3649. * Note that if we were to use the current jiffies each time around the loop,
  3650. * we wouldn't escape the function with any frames outstanding if the time to
  3651. * render a frame was over 20ms.
  3652. *
  3653. * This should get us reasonable parallelism between CPU and GPU but also
  3654. * relatively low latency when blocking on a particular request to finish.
  3655. */
  3656. static int
  3657. i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
  3658. {
  3659. struct drm_i915_private *dev_priv = to_i915(dev);
  3660. struct drm_i915_file_private *file_priv = file->driver_priv;
  3661. unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
  3662. struct i915_request *request, *target = NULL;
  3663. long ret;
  3664. /* ABI: return -EIO if already wedged */
  3665. if (i915_terminally_wedged(&dev_priv->gpu_error))
  3666. return -EIO;
  3667. spin_lock(&file_priv->mm.lock);
  3668. list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
  3669. if (time_after_eq(request->emitted_jiffies, recent_enough))
  3670. break;
  3671. if (target) {
  3672. list_del(&target->client_link);
  3673. target->file_priv = NULL;
  3674. }
  3675. target = request;
  3676. }
  3677. if (target)
  3678. i915_request_get(target);
  3679. spin_unlock(&file_priv->mm.lock);
  3680. if (target == NULL)
  3681. return 0;
  3682. ret = i915_request_wait(target,
  3683. I915_WAIT_INTERRUPTIBLE,
  3684. MAX_SCHEDULE_TIMEOUT);
  3685. i915_request_put(target);
  3686. return ret < 0 ? ret : 0;
  3687. }
  3688. struct i915_vma *
  3689. i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
  3690. const struct i915_ggtt_view *view,
  3691. u64 size,
  3692. u64 alignment,
  3693. u64 flags)
  3694. {
  3695. struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
  3696. struct i915_address_space *vm = &dev_priv->ggtt.vm;
  3697. struct i915_vma *vma;
  3698. int ret;
  3699. lockdep_assert_held(&obj->base.dev->struct_mutex);
  3700. if (flags & PIN_MAPPABLE &&
  3701. (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
  3702. /* If the required space is larger than the available
  3703. * aperture, we will not able to find a slot for the
  3704. * object and unbinding the object now will be in
  3705. * vain. Worse, doing so may cause us to ping-pong
  3706. * the object in and out of the Global GTT and
  3707. * waste a lot of cycles under the mutex.
  3708. */
  3709. if (obj->base.size > dev_priv->ggtt.mappable_end)
  3710. return ERR_PTR(-E2BIG);
  3711. /* If NONBLOCK is set the caller is optimistically
  3712. * trying to cache the full object within the mappable
  3713. * aperture, and *must* have a fallback in place for
  3714. * situations where we cannot bind the object. We
  3715. * can be a little more lax here and use the fallback
  3716. * more often to avoid costly migrations of ourselves
  3717. * and other objects within the aperture.
  3718. *
  3719. * Half-the-aperture is used as a simple heuristic.
  3720. * More interesting would to do search for a free
  3721. * block prior to making the commitment to unbind.
  3722. * That caters for the self-harm case, and with a
  3723. * little more heuristics (e.g. NOFAULT, NOEVICT)
  3724. * we could try to minimise harm to others.
  3725. */
  3726. if (flags & PIN_NONBLOCK &&
  3727. obj->base.size > dev_priv->ggtt.mappable_end / 2)
  3728. return ERR_PTR(-ENOSPC);
  3729. }
  3730. vma = i915_vma_instance(obj, vm, view);
  3731. if (unlikely(IS_ERR(vma)))
  3732. return vma;
  3733. if (i915_vma_misplaced(vma, size, alignment, flags)) {
  3734. if (flags & PIN_NONBLOCK) {
  3735. if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
  3736. return ERR_PTR(-ENOSPC);
  3737. if (flags & PIN_MAPPABLE &&
  3738. vma->fence_size > dev_priv->ggtt.mappable_end / 2)
  3739. return ERR_PTR(-ENOSPC);
  3740. }
  3741. WARN(i915_vma_is_pinned(vma),
  3742. "bo is already pinned in ggtt with incorrect alignment:"
  3743. " offset=%08x, req.alignment=%llx,"
  3744. " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
  3745. i915_ggtt_offset(vma), alignment,
  3746. !!(flags & PIN_MAPPABLE),
  3747. i915_vma_is_map_and_fenceable(vma));
  3748. ret = i915_vma_unbind(vma);
  3749. if (ret)
  3750. return ERR_PTR(ret);
  3751. }
  3752. ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
  3753. if (ret)
  3754. return ERR_PTR(ret);
  3755. return vma;
  3756. }
  3757. static __always_inline unsigned int __busy_read_flag(unsigned int id)
  3758. {
  3759. /* Note that we could alias engines in the execbuf API, but
  3760. * that would be very unwise as it prevents userspace from
  3761. * fine control over engine selection. Ahem.
  3762. *
  3763. * This should be something like EXEC_MAX_ENGINE instead of
  3764. * I915_NUM_ENGINES.
  3765. */
  3766. BUILD_BUG_ON(I915_NUM_ENGINES > 16);
  3767. return 0x10000 << id;
  3768. }
  3769. static __always_inline unsigned int __busy_write_id(unsigned int id)
  3770. {
  3771. /* The uABI guarantees an active writer is also amongst the read
  3772. * engines. This would be true if we accessed the activity tracking
  3773. * under the lock, but as we perform the lookup of the object and
  3774. * its activity locklessly we can not guarantee that the last_write
  3775. * being active implies that we have set the same engine flag from
  3776. * last_read - hence we always set both read and write busy for
  3777. * last_write.
  3778. */
  3779. return id | __busy_read_flag(id);
  3780. }
  3781. static __always_inline unsigned int
  3782. __busy_set_if_active(const struct dma_fence *fence,
  3783. unsigned int (*flag)(unsigned int id))
  3784. {
  3785. struct i915_request *rq;
  3786. /* We have to check the current hw status of the fence as the uABI
  3787. * guarantees forward progress. We could rely on the idle worker
  3788. * to eventually flush us, but to minimise latency just ask the
  3789. * hardware.
  3790. *
  3791. * Note we only report on the status of native fences.
  3792. */
  3793. if (!dma_fence_is_i915(fence))
  3794. return 0;
  3795. /* opencode to_request() in order to avoid const warnings */
  3796. rq = container_of(fence, struct i915_request, fence);
  3797. if (i915_request_completed(rq))
  3798. return 0;
  3799. return flag(rq->engine->uabi_id);
  3800. }
  3801. static __always_inline unsigned int
  3802. busy_check_reader(const struct dma_fence *fence)
  3803. {
  3804. return __busy_set_if_active(fence, __busy_read_flag);
  3805. }
  3806. static __always_inline unsigned int
  3807. busy_check_writer(const struct dma_fence *fence)
  3808. {
  3809. if (!fence)
  3810. return 0;
  3811. return __busy_set_if_active(fence, __busy_write_id);
  3812. }
  3813. int
  3814. i915_gem_busy_ioctl(struct drm_device *dev, void *data,
  3815. struct drm_file *file)
  3816. {
  3817. struct drm_i915_gem_busy *args = data;
  3818. struct drm_i915_gem_object *obj;
  3819. struct reservation_object_list *list;
  3820. unsigned int seq;
  3821. int err;
  3822. err = -ENOENT;
  3823. rcu_read_lock();
  3824. obj = i915_gem_object_lookup_rcu(file, args->handle);
  3825. if (!obj)
  3826. goto out;
  3827. /* A discrepancy here is that we do not report the status of
  3828. * non-i915 fences, i.e. even though we may report the object as idle,
  3829. * a call to set-domain may still stall waiting for foreign rendering.
  3830. * This also means that wait-ioctl may report an object as busy,
  3831. * where busy-ioctl considers it idle.
  3832. *
  3833. * We trade the ability to warn of foreign fences to report on which
  3834. * i915 engines are active for the object.
  3835. *
  3836. * Alternatively, we can trade that extra information on read/write
  3837. * activity with
  3838. * args->busy =
  3839. * !reservation_object_test_signaled_rcu(obj->resv, true);
  3840. * to report the overall busyness. This is what the wait-ioctl does.
  3841. *
  3842. */
  3843. retry:
  3844. seq = raw_read_seqcount(&obj->resv->seq);
  3845. /* Translate the exclusive fence to the READ *and* WRITE engine */
  3846. args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
  3847. /* Translate shared fences to READ set of engines */
  3848. list = rcu_dereference(obj->resv->fence);
  3849. if (list) {
  3850. unsigned int shared_count = list->shared_count, i;
  3851. for (i = 0; i < shared_count; ++i) {
  3852. struct dma_fence *fence =
  3853. rcu_dereference(list->shared[i]);
  3854. args->busy |= busy_check_reader(fence);
  3855. }
  3856. }
  3857. if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
  3858. goto retry;
  3859. err = 0;
  3860. out:
  3861. rcu_read_unlock();
  3862. return err;
  3863. }
  3864. int
  3865. i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
  3866. struct drm_file *file_priv)
  3867. {
  3868. return i915_gem_ring_throttle(dev, file_priv);
  3869. }
  3870. int
  3871. i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
  3872. struct drm_file *file_priv)
  3873. {
  3874. struct drm_i915_private *dev_priv = to_i915(dev);
  3875. struct drm_i915_gem_madvise *args = data;
  3876. struct drm_i915_gem_object *obj;
  3877. int err;
  3878. switch (args->madv) {
  3879. case I915_MADV_DONTNEED:
  3880. case I915_MADV_WILLNEED:
  3881. break;
  3882. default:
  3883. return -EINVAL;
  3884. }
  3885. obj = i915_gem_object_lookup(file_priv, args->handle);
  3886. if (!obj)
  3887. return -ENOENT;
  3888. err = mutex_lock_interruptible(&obj->mm.lock);
  3889. if (err)
  3890. goto out;
  3891. if (i915_gem_object_has_pages(obj) &&
  3892. i915_gem_object_is_tiled(obj) &&
  3893. dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
  3894. if (obj->mm.madv == I915_MADV_WILLNEED) {
  3895. GEM_BUG_ON(!obj->mm.quirked);
  3896. __i915_gem_object_unpin_pages(obj);
  3897. obj->mm.quirked = false;
  3898. }
  3899. if (args->madv == I915_MADV_WILLNEED) {
  3900. GEM_BUG_ON(obj->mm.quirked);
  3901. __i915_gem_object_pin_pages(obj);
  3902. obj->mm.quirked = true;
  3903. }
  3904. }
  3905. if (obj->mm.madv != __I915_MADV_PURGED)
  3906. obj->mm.madv = args->madv;
  3907. /* if the object is no longer attached, discard its backing storage */
  3908. if (obj->mm.madv == I915_MADV_DONTNEED &&
  3909. !i915_gem_object_has_pages(obj))
  3910. i915_gem_object_truncate(obj);
  3911. args->retained = obj->mm.madv != __I915_MADV_PURGED;
  3912. mutex_unlock(&obj->mm.lock);
  3913. out:
  3914. i915_gem_object_put(obj);
  3915. return err;
  3916. }
  3917. static void
  3918. frontbuffer_retire(struct i915_gem_active *active, struct i915_request *request)
  3919. {
  3920. struct drm_i915_gem_object *obj =
  3921. container_of(active, typeof(*obj), frontbuffer_write);
  3922. intel_fb_obj_flush(obj, ORIGIN_CS);
  3923. }
  3924. void i915_gem_object_init(struct drm_i915_gem_object *obj,
  3925. const struct drm_i915_gem_object_ops *ops)
  3926. {
  3927. mutex_init(&obj->mm.lock);
  3928. INIT_LIST_HEAD(&obj->vma_list);
  3929. INIT_LIST_HEAD(&obj->lut_list);
  3930. INIT_LIST_HEAD(&obj->batch_pool_link);
  3931. obj->ops = ops;
  3932. reservation_object_init(&obj->__builtin_resv);
  3933. obj->resv = &obj->__builtin_resv;
  3934. obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
  3935. init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
  3936. obj->mm.madv = I915_MADV_WILLNEED;
  3937. INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
  3938. mutex_init(&obj->mm.get_page.lock);
  3939. i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
  3940. }
  3941. static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
  3942. .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
  3943. I915_GEM_OBJECT_IS_SHRINKABLE,
  3944. .get_pages = i915_gem_object_get_pages_gtt,
  3945. .put_pages = i915_gem_object_put_pages_gtt,
  3946. .pwrite = i915_gem_object_pwrite_gtt,
  3947. };
  3948. static int i915_gem_object_create_shmem(struct drm_device *dev,
  3949. struct drm_gem_object *obj,
  3950. size_t size)
  3951. {
  3952. struct drm_i915_private *i915 = to_i915(dev);
  3953. unsigned long flags = VM_NORESERVE;
  3954. struct file *filp;
  3955. drm_gem_private_object_init(dev, obj, size);
  3956. if (i915->mm.gemfs)
  3957. filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
  3958. flags);
  3959. else
  3960. filp = shmem_file_setup("i915", size, flags);
  3961. if (IS_ERR(filp))
  3962. return PTR_ERR(filp);
  3963. obj->filp = filp;
  3964. return 0;
  3965. }
  3966. struct drm_i915_gem_object *
  3967. i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
  3968. {
  3969. struct drm_i915_gem_object *obj;
  3970. struct address_space *mapping;
  3971. unsigned int cache_level;
  3972. gfp_t mask;
  3973. int ret;
  3974. /* There is a prevalence of the assumption that we fit the object's
  3975. * page count inside a 32bit _signed_ variable. Let's document this and
  3976. * catch if we ever need to fix it. In the meantime, if you do spot
  3977. * such a local variable, please consider fixing!
  3978. */
  3979. if (size >> PAGE_SHIFT > INT_MAX)
  3980. return ERR_PTR(-E2BIG);
  3981. if (overflows_type(size, obj->base.size))
  3982. return ERR_PTR(-E2BIG);
  3983. obj = i915_gem_object_alloc(dev_priv);
  3984. if (obj == NULL)
  3985. return ERR_PTR(-ENOMEM);
  3986. ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size);
  3987. if (ret)
  3988. goto fail;
  3989. mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
  3990. if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
  3991. /* 965gm cannot relocate objects above 4GiB. */
  3992. mask &= ~__GFP_HIGHMEM;
  3993. mask |= __GFP_DMA32;
  3994. }
  3995. mapping = obj->base.filp->f_mapping;
  3996. mapping_set_gfp_mask(mapping, mask);
  3997. GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
  3998. i915_gem_object_init(obj, &i915_gem_object_ops);
  3999. obj->write_domain = I915_GEM_DOMAIN_CPU;
  4000. obj->read_domains = I915_GEM_DOMAIN_CPU;
  4001. if (HAS_LLC(dev_priv))
  4002. /* On some devices, we can have the GPU use the LLC (the CPU
  4003. * cache) for about a 10% performance improvement
  4004. * compared to uncached. Graphics requests other than
  4005. * display scanout are coherent with the CPU in
  4006. * accessing this cache. This means in this mode we
  4007. * don't need to clflush on the CPU side, and on the
  4008. * GPU side we only need to flush internal caches to
  4009. * get data visible to the CPU.
  4010. *
  4011. * However, we maintain the display planes as UC, and so
  4012. * need to rebind when first used as such.
  4013. */
  4014. cache_level = I915_CACHE_LLC;
  4015. else
  4016. cache_level = I915_CACHE_NONE;
  4017. i915_gem_object_set_cache_coherency(obj, cache_level);
  4018. trace_i915_gem_object_create(obj);
  4019. return obj;
  4020. fail:
  4021. i915_gem_object_free(obj);
  4022. return ERR_PTR(ret);
  4023. }
  4024. static bool discard_backing_storage(struct drm_i915_gem_object *obj)
  4025. {
  4026. /* If we are the last user of the backing storage (be it shmemfs
  4027. * pages or stolen etc), we know that the pages are going to be
  4028. * immediately released. In this case, we can then skip copying
  4029. * back the contents from the GPU.
  4030. */
  4031. if (obj->mm.madv != I915_MADV_WILLNEED)
  4032. return false;
  4033. if (obj->base.filp == NULL)
  4034. return true;
  4035. /* At first glance, this looks racy, but then again so would be
  4036. * userspace racing mmap against close. However, the first external
  4037. * reference to the filp can only be obtained through the
  4038. * i915_gem_mmap_ioctl() which safeguards us against the user
  4039. * acquiring such a reference whilst we are in the middle of
  4040. * freeing the object.
  4041. */
  4042. return atomic_long_read(&obj->base.filp->f_count) == 1;
  4043. }
  4044. static void __i915_gem_free_objects(struct drm_i915_private *i915,
  4045. struct llist_node *freed)
  4046. {
  4047. struct drm_i915_gem_object *obj, *on;
  4048. intel_runtime_pm_get(i915);
  4049. llist_for_each_entry_safe(obj, on, freed, freed) {
  4050. struct i915_vma *vma, *vn;
  4051. trace_i915_gem_object_destroy(obj);
  4052. mutex_lock(&i915->drm.struct_mutex);
  4053. GEM_BUG_ON(i915_gem_object_is_active(obj));
  4054. list_for_each_entry_safe(vma, vn,
  4055. &obj->vma_list, obj_link) {
  4056. GEM_BUG_ON(i915_vma_is_active(vma));
  4057. vma->flags &= ~I915_VMA_PIN_MASK;
  4058. i915_vma_destroy(vma);
  4059. }
  4060. GEM_BUG_ON(!list_empty(&obj->vma_list));
  4061. GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
  4062. /* This serializes freeing with the shrinker. Since the free
  4063. * is delayed, first by RCU then by the workqueue, we want the
  4064. * shrinker to be able to free pages of unreferenced objects,
  4065. * or else we may oom whilst there are plenty of deferred
  4066. * freed objects.
  4067. */
  4068. if (i915_gem_object_has_pages(obj)) {
  4069. spin_lock(&i915->mm.obj_lock);
  4070. list_del_init(&obj->mm.link);
  4071. spin_unlock(&i915->mm.obj_lock);
  4072. }
  4073. mutex_unlock(&i915->drm.struct_mutex);
  4074. GEM_BUG_ON(obj->bind_count);
  4075. GEM_BUG_ON(obj->userfault_count);
  4076. GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
  4077. GEM_BUG_ON(!list_empty(&obj->lut_list));
  4078. if (obj->ops->release)
  4079. obj->ops->release(obj);
  4080. if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
  4081. atomic_set(&obj->mm.pages_pin_count, 0);
  4082. __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
  4083. GEM_BUG_ON(i915_gem_object_has_pages(obj));
  4084. if (obj->base.import_attach)
  4085. drm_prime_gem_destroy(&obj->base, NULL);
  4086. reservation_object_fini(&obj->__builtin_resv);
  4087. drm_gem_object_release(&obj->base);
  4088. i915_gem_info_remove_obj(i915, obj->base.size);
  4089. kfree(obj->bit_17);
  4090. i915_gem_object_free(obj);
  4091. GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
  4092. atomic_dec(&i915->mm.free_count);
  4093. if (on)
  4094. cond_resched();
  4095. }
  4096. intel_runtime_pm_put(i915);
  4097. }
  4098. static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
  4099. {
  4100. struct llist_node *freed;
  4101. /* Free the oldest, most stale object to keep the free_list short */
  4102. freed = NULL;
  4103. if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
  4104. /* Only one consumer of llist_del_first() allowed */
  4105. spin_lock(&i915->mm.free_lock);
  4106. freed = llist_del_first(&i915->mm.free_list);
  4107. spin_unlock(&i915->mm.free_lock);
  4108. }
  4109. if (unlikely(freed)) {
  4110. freed->next = NULL;
  4111. __i915_gem_free_objects(i915, freed);
  4112. }
  4113. }
  4114. static void __i915_gem_free_work(struct work_struct *work)
  4115. {
  4116. struct drm_i915_private *i915 =
  4117. container_of(work, struct drm_i915_private, mm.free_work);
  4118. struct llist_node *freed;
  4119. /*
  4120. * All file-owned VMA should have been released by this point through
  4121. * i915_gem_close_object(), or earlier by i915_gem_context_close().
  4122. * However, the object may also be bound into the global GTT (e.g.
  4123. * older GPUs without per-process support, or for direct access through
  4124. * the GTT either for the user or for scanout). Those VMA still need to
  4125. * unbound now.
  4126. */
  4127. spin_lock(&i915->mm.free_lock);
  4128. while ((freed = llist_del_all(&i915->mm.free_list))) {
  4129. spin_unlock(&i915->mm.free_lock);
  4130. __i915_gem_free_objects(i915, freed);
  4131. if (need_resched())
  4132. return;
  4133. spin_lock(&i915->mm.free_lock);
  4134. }
  4135. spin_unlock(&i915->mm.free_lock);
  4136. }
  4137. static void __i915_gem_free_object_rcu(struct rcu_head *head)
  4138. {
  4139. struct drm_i915_gem_object *obj =
  4140. container_of(head, typeof(*obj), rcu);
  4141. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  4142. /*
  4143. * Since we require blocking on struct_mutex to unbind the freed
  4144. * object from the GPU before releasing resources back to the
  4145. * system, we can not do that directly from the RCU callback (which may
  4146. * be a softirq context), but must instead then defer that work onto a
  4147. * kthread. We use the RCU callback rather than move the freed object
  4148. * directly onto the work queue so that we can mix between using the
  4149. * worker and performing frees directly from subsequent allocations for
  4150. * crude but effective memory throttling.
  4151. */
  4152. if (llist_add(&obj->freed, &i915->mm.free_list))
  4153. queue_work(i915->wq, &i915->mm.free_work);
  4154. }
  4155. void i915_gem_free_object(struct drm_gem_object *gem_obj)
  4156. {
  4157. struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
  4158. if (obj->mm.quirked)
  4159. __i915_gem_object_unpin_pages(obj);
  4160. if (discard_backing_storage(obj))
  4161. obj->mm.madv = I915_MADV_DONTNEED;
  4162. /*
  4163. * Before we free the object, make sure any pure RCU-only
  4164. * read-side critical sections are complete, e.g.
  4165. * i915_gem_busy_ioctl(). For the corresponding synchronized
  4166. * lookup see i915_gem_object_lookup_rcu().
  4167. */
  4168. atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
  4169. call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
  4170. }
  4171. void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
  4172. {
  4173. lockdep_assert_held(&obj->base.dev->struct_mutex);
  4174. if (!i915_gem_object_has_active_reference(obj) &&
  4175. i915_gem_object_is_active(obj))
  4176. i915_gem_object_set_active_reference(obj);
  4177. else
  4178. i915_gem_object_put(obj);
  4179. }
  4180. void i915_gem_sanitize(struct drm_i915_private *i915)
  4181. {
  4182. int err;
  4183. GEM_TRACE("\n");
  4184. mutex_lock(&i915->drm.struct_mutex);
  4185. intel_runtime_pm_get(i915);
  4186. intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
  4187. /*
  4188. * As we have just resumed the machine and woken the device up from
  4189. * deep PCI sleep (presumably D3_cold), assume the HW has been reset
  4190. * back to defaults, recovering from whatever wedged state we left it
  4191. * in and so worth trying to use the device once more.
  4192. */
  4193. if (i915_terminally_wedged(&i915->gpu_error))
  4194. i915_gem_unset_wedged(i915);
  4195. /*
  4196. * If we inherit context state from the BIOS or earlier occupants
  4197. * of the GPU, the GPU may be in an inconsistent state when we
  4198. * try to take over. The only way to remove the earlier state
  4199. * is by resetting. However, resetting on earlier gen is tricky as
  4200. * it may impact the display and we are uncertain about the stability
  4201. * of the reset, so this could be applied to even earlier gen.
  4202. */
  4203. err = -ENODEV;
  4204. if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
  4205. err = WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
  4206. if (!err)
  4207. intel_engines_sanitize(i915);
  4208. intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
  4209. intel_runtime_pm_put(i915);
  4210. i915_gem_contexts_lost(i915);
  4211. mutex_unlock(&i915->drm.struct_mutex);
  4212. }
  4213. int i915_gem_suspend(struct drm_i915_private *i915)
  4214. {
  4215. int ret;
  4216. GEM_TRACE("\n");
  4217. intel_runtime_pm_get(i915);
  4218. intel_suspend_gt_powersave(i915);
  4219. mutex_lock(&i915->drm.struct_mutex);
  4220. /*
  4221. * We have to flush all the executing contexts to main memory so
  4222. * that they can saved in the hibernation image. To ensure the last
  4223. * context image is coherent, we have to switch away from it. That
  4224. * leaves the i915->kernel_context still active when
  4225. * we actually suspend, and its image in memory may not match the GPU
  4226. * state. Fortunately, the kernel_context is disposable and we do
  4227. * not rely on its state.
  4228. */
  4229. if (!i915_terminally_wedged(&i915->gpu_error)) {
  4230. ret = i915_gem_switch_to_kernel_context(i915);
  4231. if (ret)
  4232. goto err_unlock;
  4233. ret = i915_gem_wait_for_idle(i915,
  4234. I915_WAIT_INTERRUPTIBLE |
  4235. I915_WAIT_LOCKED |
  4236. I915_WAIT_FOR_IDLE_BOOST,
  4237. MAX_SCHEDULE_TIMEOUT);
  4238. if (ret && ret != -EIO)
  4239. goto err_unlock;
  4240. assert_kernel_context_is_current(i915);
  4241. }
  4242. i915_retire_requests(i915); /* ensure we flush after wedging */
  4243. mutex_unlock(&i915->drm.struct_mutex);
  4244. intel_uc_suspend(i915);
  4245. cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
  4246. cancel_delayed_work_sync(&i915->gt.retire_work);
  4247. /*
  4248. * As the idle_work is rearming if it detects a race, play safe and
  4249. * repeat the flush until it is definitely idle.
  4250. */
  4251. drain_delayed_work(&i915->gt.idle_work);
  4252. /*
  4253. * Assert that we successfully flushed all the work and
  4254. * reset the GPU back to its idle, low power state.
  4255. */
  4256. WARN_ON(i915->gt.awake);
  4257. if (WARN_ON(!intel_engines_are_idle(i915)))
  4258. i915_gem_set_wedged(i915); /* no hope, discard everything */
  4259. intel_runtime_pm_put(i915);
  4260. return 0;
  4261. err_unlock:
  4262. mutex_unlock(&i915->drm.struct_mutex);
  4263. intel_runtime_pm_put(i915);
  4264. return ret;
  4265. }
  4266. void i915_gem_suspend_late(struct drm_i915_private *i915)
  4267. {
  4268. struct drm_i915_gem_object *obj;
  4269. struct list_head *phases[] = {
  4270. &i915->mm.unbound_list,
  4271. &i915->mm.bound_list,
  4272. NULL
  4273. }, **phase;
  4274. /*
  4275. * Neither the BIOS, ourselves or any other kernel
  4276. * expects the system to be in execlists mode on startup,
  4277. * so we need to reset the GPU back to legacy mode. And the only
  4278. * known way to disable logical contexts is through a GPU reset.
  4279. *
  4280. * So in order to leave the system in a known default configuration,
  4281. * always reset the GPU upon unload and suspend. Afterwards we then
  4282. * clean up the GEM state tracking, flushing off the requests and
  4283. * leaving the system in a known idle state.
  4284. *
  4285. * Note that is of the upmost importance that the GPU is idle and
  4286. * all stray writes are flushed *before* we dismantle the backing
  4287. * storage for the pinned objects.
  4288. *
  4289. * However, since we are uncertain that resetting the GPU on older
  4290. * machines is a good idea, we don't - just in case it leaves the
  4291. * machine in an unusable condition.
  4292. */
  4293. mutex_lock(&i915->drm.struct_mutex);
  4294. for (phase = phases; *phase; phase++) {
  4295. list_for_each_entry(obj, *phase, mm.link)
  4296. WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
  4297. }
  4298. mutex_unlock(&i915->drm.struct_mutex);
  4299. intel_uc_sanitize(i915);
  4300. i915_gem_sanitize(i915);
  4301. }
  4302. void i915_gem_resume(struct drm_i915_private *i915)
  4303. {
  4304. GEM_TRACE("\n");
  4305. WARN_ON(i915->gt.awake);
  4306. mutex_lock(&i915->drm.struct_mutex);
  4307. intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
  4308. i915_gem_restore_gtt_mappings(i915);
  4309. i915_gem_restore_fences(i915);
  4310. /*
  4311. * As we didn't flush the kernel context before suspend, we cannot
  4312. * guarantee that the context image is complete. So let's just reset
  4313. * it and start again.
  4314. */
  4315. i915->gt.resume(i915);
  4316. if (i915_gem_init_hw(i915))
  4317. goto err_wedged;
  4318. intel_uc_resume(i915);
  4319. /* Always reload a context for powersaving. */
  4320. if (i915_gem_switch_to_kernel_context(i915))
  4321. goto err_wedged;
  4322. out_unlock:
  4323. intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
  4324. mutex_unlock(&i915->drm.struct_mutex);
  4325. return;
  4326. err_wedged:
  4327. if (!i915_terminally_wedged(&i915->gpu_error)) {
  4328. DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
  4329. i915_gem_set_wedged(i915);
  4330. }
  4331. goto out_unlock;
  4332. }
  4333. void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
  4334. {
  4335. if (INTEL_GEN(dev_priv) < 5 ||
  4336. dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
  4337. return;
  4338. I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
  4339. DISP_TILE_SURFACE_SWIZZLING);
  4340. if (IS_GEN5(dev_priv))
  4341. return;
  4342. I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
  4343. if (IS_GEN6(dev_priv))
  4344. I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
  4345. else if (IS_GEN7(dev_priv))
  4346. I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
  4347. else if (IS_GEN8(dev_priv))
  4348. I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
  4349. else
  4350. BUG();
  4351. }
  4352. static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
  4353. {
  4354. I915_WRITE(RING_CTL(base), 0);
  4355. I915_WRITE(RING_HEAD(base), 0);
  4356. I915_WRITE(RING_TAIL(base), 0);
  4357. I915_WRITE(RING_START(base), 0);
  4358. }
  4359. static void init_unused_rings(struct drm_i915_private *dev_priv)
  4360. {
  4361. if (IS_I830(dev_priv)) {
  4362. init_unused_ring(dev_priv, PRB1_BASE);
  4363. init_unused_ring(dev_priv, SRB0_BASE);
  4364. init_unused_ring(dev_priv, SRB1_BASE);
  4365. init_unused_ring(dev_priv, SRB2_BASE);
  4366. init_unused_ring(dev_priv, SRB3_BASE);
  4367. } else if (IS_GEN2(dev_priv)) {
  4368. init_unused_ring(dev_priv, SRB0_BASE);
  4369. init_unused_ring(dev_priv, SRB1_BASE);
  4370. } else if (IS_GEN3(dev_priv)) {
  4371. init_unused_ring(dev_priv, PRB1_BASE);
  4372. init_unused_ring(dev_priv, PRB2_BASE);
  4373. }
  4374. }
  4375. static int __i915_gem_restart_engines(void *data)
  4376. {
  4377. struct drm_i915_private *i915 = data;
  4378. struct intel_engine_cs *engine;
  4379. enum intel_engine_id id;
  4380. int err;
  4381. for_each_engine(engine, i915, id) {
  4382. err = engine->init_hw(engine);
  4383. if (err) {
  4384. DRM_ERROR("Failed to restart %s (%d)\n",
  4385. engine->name, err);
  4386. return err;
  4387. }
  4388. }
  4389. return 0;
  4390. }
  4391. int i915_gem_init_hw(struct drm_i915_private *dev_priv)
  4392. {
  4393. int ret;
  4394. dev_priv->gt.last_init_time = ktime_get();
  4395. /* Double layer security blanket, see i915_gem_init() */
  4396. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  4397. if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
  4398. I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
  4399. if (IS_HASWELL(dev_priv))
  4400. I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
  4401. LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
  4402. if (HAS_PCH_NOP(dev_priv)) {
  4403. if (IS_IVYBRIDGE(dev_priv)) {
  4404. u32 temp = I915_READ(GEN7_MSG_CTL);
  4405. temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
  4406. I915_WRITE(GEN7_MSG_CTL, temp);
  4407. } else if (INTEL_GEN(dev_priv) >= 7) {
  4408. u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
  4409. temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
  4410. I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
  4411. }
  4412. }
  4413. intel_gt_workarounds_apply(dev_priv);
  4414. i915_gem_init_swizzling(dev_priv);
  4415. /*
  4416. * At least 830 can leave some of the unused rings
  4417. * "active" (ie. head != tail) after resume which
  4418. * will prevent c3 entry. Makes sure all unused rings
  4419. * are totally idle.
  4420. */
  4421. init_unused_rings(dev_priv);
  4422. BUG_ON(!dev_priv->kernel_context);
  4423. if (i915_terminally_wedged(&dev_priv->gpu_error)) {
  4424. ret = -EIO;
  4425. goto out;
  4426. }
  4427. ret = i915_ppgtt_init_hw(dev_priv);
  4428. if (ret) {
  4429. DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
  4430. goto out;
  4431. }
  4432. ret = intel_wopcm_init_hw(&dev_priv->wopcm);
  4433. if (ret) {
  4434. DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
  4435. goto out;
  4436. }
  4437. /* We can't enable contexts until all firmware is loaded */
  4438. ret = intel_uc_init_hw(dev_priv);
  4439. if (ret) {
  4440. DRM_ERROR("Enabling uc failed (%d)\n", ret);
  4441. goto out;
  4442. }
  4443. intel_mocs_init_l3cc_table(dev_priv);
  4444. /* Only when the HW is re-initialised, can we replay the requests */
  4445. ret = __i915_gem_restart_engines(dev_priv);
  4446. if (ret)
  4447. goto cleanup_uc;
  4448. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  4449. return 0;
  4450. cleanup_uc:
  4451. intel_uc_fini_hw(dev_priv);
  4452. out:
  4453. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  4454. return ret;
  4455. }
  4456. static int __intel_engines_record_defaults(struct drm_i915_private *i915)
  4457. {
  4458. struct i915_gem_context *ctx;
  4459. struct intel_engine_cs *engine;
  4460. enum intel_engine_id id;
  4461. int err;
  4462. /*
  4463. * As we reset the gpu during very early sanitisation, the current
  4464. * register state on the GPU should reflect its defaults values.
  4465. * We load a context onto the hw (with restore-inhibit), then switch
  4466. * over to a second context to save that default register state. We
  4467. * can then prime every new context with that state so they all start
  4468. * from the same default HW values.
  4469. */
  4470. ctx = i915_gem_context_create_kernel(i915, 0);
  4471. if (IS_ERR(ctx))
  4472. return PTR_ERR(ctx);
  4473. for_each_engine(engine, i915, id) {
  4474. struct i915_request *rq;
  4475. rq = i915_request_alloc(engine, ctx);
  4476. if (IS_ERR(rq)) {
  4477. err = PTR_ERR(rq);
  4478. goto out_ctx;
  4479. }
  4480. err = 0;
  4481. if (engine->init_context)
  4482. err = engine->init_context(rq);
  4483. i915_request_add(rq);
  4484. if (err)
  4485. goto err_active;
  4486. }
  4487. err = i915_gem_switch_to_kernel_context(i915);
  4488. if (err)
  4489. goto err_active;
  4490. if (i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, HZ / 5)) {
  4491. i915_gem_set_wedged(i915);
  4492. err = -EIO; /* Caller will declare us wedged */
  4493. goto err_active;
  4494. }
  4495. assert_kernel_context_is_current(i915);
  4496. for_each_engine(engine, i915, id) {
  4497. struct i915_vma *state;
  4498. state = to_intel_context(ctx, engine)->state;
  4499. if (!state)
  4500. continue;
  4501. /*
  4502. * As we will hold a reference to the logical state, it will
  4503. * not be torn down with the context, and importantly the
  4504. * object will hold onto its vma (making it possible for a
  4505. * stray GTT write to corrupt our defaults). Unmap the vma
  4506. * from the GTT to prevent such accidents and reclaim the
  4507. * space.
  4508. */
  4509. err = i915_vma_unbind(state);
  4510. if (err)
  4511. goto err_active;
  4512. err = i915_gem_object_set_to_cpu_domain(state->obj, false);
  4513. if (err)
  4514. goto err_active;
  4515. engine->default_state = i915_gem_object_get(state->obj);
  4516. }
  4517. if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
  4518. unsigned int found = intel_engines_has_context_isolation(i915);
  4519. /*
  4520. * Make sure that classes with multiple engine instances all
  4521. * share the same basic configuration.
  4522. */
  4523. for_each_engine(engine, i915, id) {
  4524. unsigned int bit = BIT(engine->uabi_class);
  4525. unsigned int expected = engine->default_state ? bit : 0;
  4526. if ((found & bit) != expected) {
  4527. DRM_ERROR("mismatching default context state for class %d on engine %s\n",
  4528. engine->uabi_class, engine->name);
  4529. }
  4530. }
  4531. }
  4532. out_ctx:
  4533. i915_gem_context_set_closed(ctx);
  4534. i915_gem_context_put(ctx);
  4535. return err;
  4536. err_active:
  4537. /*
  4538. * If we have to abandon now, we expect the engines to be idle
  4539. * and ready to be torn-down. First try to flush any remaining
  4540. * request, ensure we are pointing at the kernel context and
  4541. * then remove it.
  4542. */
  4543. if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
  4544. goto out_ctx;
  4545. if (WARN_ON(i915_gem_wait_for_idle(i915,
  4546. I915_WAIT_LOCKED,
  4547. MAX_SCHEDULE_TIMEOUT)))
  4548. goto out_ctx;
  4549. i915_gem_contexts_lost(i915);
  4550. goto out_ctx;
  4551. }
  4552. int i915_gem_init(struct drm_i915_private *dev_priv)
  4553. {
  4554. int ret;
  4555. /* We need to fallback to 4K pages if host doesn't support huge gtt. */
  4556. if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
  4557. mkwrite_device_info(dev_priv)->page_sizes =
  4558. I915_GTT_PAGE_SIZE_4K;
  4559. dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
  4560. if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
  4561. dev_priv->gt.resume = intel_lr_context_resume;
  4562. dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
  4563. } else {
  4564. dev_priv->gt.resume = intel_legacy_submission_resume;
  4565. dev_priv->gt.cleanup_engine = intel_engine_cleanup;
  4566. }
  4567. ret = i915_gem_init_userptr(dev_priv);
  4568. if (ret)
  4569. return ret;
  4570. ret = intel_uc_init_misc(dev_priv);
  4571. if (ret)
  4572. return ret;
  4573. ret = intel_wopcm_init(&dev_priv->wopcm);
  4574. if (ret)
  4575. goto err_uc_misc;
  4576. /* This is just a security blanket to placate dragons.
  4577. * On some systems, we very sporadically observe that the first TLBs
  4578. * used by the CS may be stale, despite us poking the TLB reset. If
  4579. * we hold the forcewake during initialisation these problems
  4580. * just magically go away.
  4581. */
  4582. mutex_lock(&dev_priv->drm.struct_mutex);
  4583. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  4584. ret = i915_gem_init_ggtt(dev_priv);
  4585. if (ret) {
  4586. GEM_BUG_ON(ret == -EIO);
  4587. goto err_unlock;
  4588. }
  4589. ret = i915_gem_contexts_init(dev_priv);
  4590. if (ret) {
  4591. GEM_BUG_ON(ret == -EIO);
  4592. goto err_ggtt;
  4593. }
  4594. ret = intel_engines_init(dev_priv);
  4595. if (ret) {
  4596. GEM_BUG_ON(ret == -EIO);
  4597. goto err_context;
  4598. }
  4599. intel_init_gt_powersave(dev_priv);
  4600. ret = intel_uc_init(dev_priv);
  4601. if (ret)
  4602. goto err_pm;
  4603. ret = i915_gem_init_hw(dev_priv);
  4604. if (ret)
  4605. goto err_uc_init;
  4606. /*
  4607. * Despite its name intel_init_clock_gating applies both display
  4608. * clock gating workarounds; GT mmio workarounds and the occasional
  4609. * GT power context workaround. Worse, sometimes it includes a context
  4610. * register workaround which we need to apply before we record the
  4611. * default HW state for all contexts.
  4612. *
  4613. * FIXME: break up the workarounds and apply them at the right time!
  4614. */
  4615. intel_init_clock_gating(dev_priv);
  4616. ret = __intel_engines_record_defaults(dev_priv);
  4617. if (ret)
  4618. goto err_init_hw;
  4619. if (i915_inject_load_failure()) {
  4620. ret = -ENODEV;
  4621. goto err_init_hw;
  4622. }
  4623. if (i915_inject_load_failure()) {
  4624. ret = -EIO;
  4625. goto err_init_hw;
  4626. }
  4627. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  4628. mutex_unlock(&dev_priv->drm.struct_mutex);
  4629. return 0;
  4630. /*
  4631. * Unwinding is complicated by that we want to handle -EIO to mean
  4632. * disable GPU submission but keep KMS alive. We want to mark the
  4633. * HW as irrevisibly wedged, but keep enough state around that the
  4634. * driver doesn't explode during runtime.
  4635. */
  4636. err_init_hw:
  4637. mutex_unlock(&dev_priv->drm.struct_mutex);
  4638. WARN_ON(i915_gem_suspend(dev_priv));
  4639. i915_gem_suspend_late(dev_priv);
  4640. i915_gem_drain_workqueue(dev_priv);
  4641. mutex_lock(&dev_priv->drm.struct_mutex);
  4642. intel_uc_fini_hw(dev_priv);
  4643. err_uc_init:
  4644. intel_uc_fini(dev_priv);
  4645. err_pm:
  4646. if (ret != -EIO) {
  4647. intel_cleanup_gt_powersave(dev_priv);
  4648. i915_gem_cleanup_engines(dev_priv);
  4649. }
  4650. err_context:
  4651. if (ret != -EIO)
  4652. i915_gem_contexts_fini(dev_priv);
  4653. err_ggtt:
  4654. err_unlock:
  4655. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  4656. mutex_unlock(&dev_priv->drm.struct_mutex);
  4657. err_uc_misc:
  4658. intel_uc_fini_misc(dev_priv);
  4659. if (ret != -EIO)
  4660. i915_gem_cleanup_userptr(dev_priv);
  4661. if (ret == -EIO) {
  4662. /*
  4663. * Allow engine initialisation to fail by marking the GPU as
  4664. * wedged. But we only want to do this where the GPU is angry,
  4665. * for all other failure, such as an allocation failure, bail.
  4666. */
  4667. if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
  4668. i915_load_error(dev_priv,
  4669. "Failed to initialize GPU, declaring it wedged!\n");
  4670. i915_gem_set_wedged(dev_priv);
  4671. }
  4672. ret = 0;
  4673. }
  4674. i915_gem_drain_freed_objects(dev_priv);
  4675. return ret;
  4676. }
  4677. void i915_gem_fini(struct drm_i915_private *dev_priv)
  4678. {
  4679. i915_gem_suspend_late(dev_priv);
  4680. /* Flush any outstanding unpin_work. */
  4681. i915_gem_drain_workqueue(dev_priv);
  4682. mutex_lock(&dev_priv->drm.struct_mutex);
  4683. intel_uc_fini_hw(dev_priv);
  4684. intel_uc_fini(dev_priv);
  4685. i915_gem_cleanup_engines(dev_priv);
  4686. i915_gem_contexts_fini(dev_priv);
  4687. mutex_unlock(&dev_priv->drm.struct_mutex);
  4688. intel_uc_fini_misc(dev_priv);
  4689. i915_gem_cleanup_userptr(dev_priv);
  4690. i915_gem_drain_freed_objects(dev_priv);
  4691. WARN_ON(!list_empty(&dev_priv->contexts.list));
  4692. }
  4693. void i915_gem_init_mmio(struct drm_i915_private *i915)
  4694. {
  4695. i915_gem_sanitize(i915);
  4696. }
  4697. void
  4698. i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
  4699. {
  4700. struct intel_engine_cs *engine;
  4701. enum intel_engine_id id;
  4702. for_each_engine(engine, dev_priv, id)
  4703. dev_priv->gt.cleanup_engine(engine);
  4704. }
  4705. void
  4706. i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
  4707. {
  4708. int i;
  4709. if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
  4710. !IS_CHERRYVIEW(dev_priv))
  4711. dev_priv->num_fence_regs = 32;
  4712. else if (INTEL_GEN(dev_priv) >= 4 ||
  4713. IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
  4714. IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
  4715. dev_priv->num_fence_regs = 16;
  4716. else
  4717. dev_priv->num_fence_regs = 8;
  4718. if (intel_vgpu_active(dev_priv))
  4719. dev_priv->num_fence_regs =
  4720. I915_READ(vgtif_reg(avail_rs.fence_num));
  4721. /* Initialize fence registers to zero */
  4722. for (i = 0; i < dev_priv->num_fence_regs; i++) {
  4723. struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
  4724. fence->i915 = dev_priv;
  4725. fence->id = i;
  4726. list_add_tail(&fence->link, &dev_priv->mm.fence_list);
  4727. }
  4728. i915_gem_restore_fences(dev_priv);
  4729. i915_gem_detect_bit_6_swizzle(dev_priv);
  4730. }
  4731. static void i915_gem_init__mm(struct drm_i915_private *i915)
  4732. {
  4733. spin_lock_init(&i915->mm.object_stat_lock);
  4734. spin_lock_init(&i915->mm.obj_lock);
  4735. spin_lock_init(&i915->mm.free_lock);
  4736. init_llist_head(&i915->mm.free_list);
  4737. INIT_LIST_HEAD(&i915->mm.unbound_list);
  4738. INIT_LIST_HEAD(&i915->mm.bound_list);
  4739. INIT_LIST_HEAD(&i915->mm.fence_list);
  4740. INIT_LIST_HEAD(&i915->mm.userfault_list);
  4741. INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
  4742. }
  4743. int i915_gem_init_early(struct drm_i915_private *dev_priv)
  4744. {
  4745. int err = -ENOMEM;
  4746. dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
  4747. if (!dev_priv->objects)
  4748. goto err_out;
  4749. dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
  4750. if (!dev_priv->vmas)
  4751. goto err_objects;
  4752. dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
  4753. if (!dev_priv->luts)
  4754. goto err_vmas;
  4755. dev_priv->requests = KMEM_CACHE(i915_request,
  4756. SLAB_HWCACHE_ALIGN |
  4757. SLAB_RECLAIM_ACCOUNT |
  4758. SLAB_TYPESAFE_BY_RCU);
  4759. if (!dev_priv->requests)
  4760. goto err_luts;
  4761. dev_priv->dependencies = KMEM_CACHE(i915_dependency,
  4762. SLAB_HWCACHE_ALIGN |
  4763. SLAB_RECLAIM_ACCOUNT);
  4764. if (!dev_priv->dependencies)
  4765. goto err_requests;
  4766. dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
  4767. if (!dev_priv->priorities)
  4768. goto err_dependencies;
  4769. INIT_LIST_HEAD(&dev_priv->gt.timelines);
  4770. INIT_LIST_HEAD(&dev_priv->gt.active_rings);
  4771. INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
  4772. i915_gem_init__mm(dev_priv);
  4773. INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
  4774. i915_gem_retire_work_handler);
  4775. INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
  4776. i915_gem_idle_work_handler);
  4777. init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
  4778. init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
  4779. atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
  4780. spin_lock_init(&dev_priv->fb_tracking.lock);
  4781. err = i915_gemfs_init(dev_priv);
  4782. if (err)
  4783. DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
  4784. return 0;
  4785. err_dependencies:
  4786. kmem_cache_destroy(dev_priv->dependencies);
  4787. err_requests:
  4788. kmem_cache_destroy(dev_priv->requests);
  4789. err_luts:
  4790. kmem_cache_destroy(dev_priv->luts);
  4791. err_vmas:
  4792. kmem_cache_destroy(dev_priv->vmas);
  4793. err_objects:
  4794. kmem_cache_destroy(dev_priv->objects);
  4795. err_out:
  4796. return err;
  4797. }
  4798. void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
  4799. {
  4800. i915_gem_drain_freed_objects(dev_priv);
  4801. GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
  4802. GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
  4803. WARN_ON(dev_priv->mm.object_count);
  4804. WARN_ON(!list_empty(&dev_priv->gt.timelines));
  4805. kmem_cache_destroy(dev_priv->priorities);
  4806. kmem_cache_destroy(dev_priv->dependencies);
  4807. kmem_cache_destroy(dev_priv->requests);
  4808. kmem_cache_destroy(dev_priv->luts);
  4809. kmem_cache_destroy(dev_priv->vmas);
  4810. kmem_cache_destroy(dev_priv->objects);
  4811. /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
  4812. rcu_barrier();
  4813. i915_gemfs_fini(dev_priv);
  4814. }
  4815. int i915_gem_freeze(struct drm_i915_private *dev_priv)
  4816. {
  4817. /* Discard all purgeable objects, let userspace recover those as
  4818. * required after resuming.
  4819. */
  4820. i915_gem_shrink_all(dev_priv);
  4821. return 0;
  4822. }
  4823. int i915_gem_freeze_late(struct drm_i915_private *i915)
  4824. {
  4825. struct drm_i915_gem_object *obj;
  4826. struct list_head *phases[] = {
  4827. &i915->mm.unbound_list,
  4828. &i915->mm.bound_list,
  4829. NULL
  4830. }, **phase;
  4831. /*
  4832. * Called just before we write the hibernation image.
  4833. *
  4834. * We need to update the domain tracking to reflect that the CPU
  4835. * will be accessing all the pages to create and restore from the
  4836. * hibernation, and so upon restoration those pages will be in the
  4837. * CPU domain.
  4838. *
  4839. * To make sure the hibernation image contains the latest state,
  4840. * we update that state just before writing out the image.
  4841. *
  4842. * To try and reduce the hibernation image, we manually shrink
  4843. * the objects as well, see i915_gem_freeze()
  4844. */
  4845. i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
  4846. i915_gem_drain_freed_objects(i915);
  4847. mutex_lock(&i915->drm.struct_mutex);
  4848. for (phase = phases; *phase; phase++) {
  4849. list_for_each_entry(obj, *phase, mm.link)
  4850. WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
  4851. }
  4852. mutex_unlock(&i915->drm.struct_mutex);
  4853. return 0;
  4854. }
  4855. void i915_gem_release(struct drm_device *dev, struct drm_file *file)
  4856. {
  4857. struct drm_i915_file_private *file_priv = file->driver_priv;
  4858. struct i915_request *request;
  4859. /* Clean up our request list when the client is going away, so that
  4860. * later retire_requests won't dereference our soon-to-be-gone
  4861. * file_priv.
  4862. */
  4863. spin_lock(&file_priv->mm.lock);
  4864. list_for_each_entry(request, &file_priv->mm.request_list, client_link)
  4865. request->file_priv = NULL;
  4866. spin_unlock(&file_priv->mm.lock);
  4867. }
  4868. int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
  4869. {
  4870. struct drm_i915_file_private *file_priv;
  4871. int ret;
  4872. DRM_DEBUG("\n");
  4873. file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
  4874. if (!file_priv)
  4875. return -ENOMEM;
  4876. file->driver_priv = file_priv;
  4877. file_priv->dev_priv = i915;
  4878. file_priv->file = file;
  4879. spin_lock_init(&file_priv->mm.lock);
  4880. INIT_LIST_HEAD(&file_priv->mm.request_list);
  4881. file_priv->bsd_engine = -1;
  4882. file_priv->hang_timestamp = jiffies;
  4883. ret = i915_gem_context_open(i915, file);
  4884. if (ret)
  4885. kfree(file_priv);
  4886. return ret;
  4887. }
  4888. /**
  4889. * i915_gem_track_fb - update frontbuffer tracking
  4890. * @old: current GEM buffer for the frontbuffer slots
  4891. * @new: new GEM buffer for the frontbuffer slots
  4892. * @frontbuffer_bits: bitmask of frontbuffer slots
  4893. *
  4894. * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
  4895. * from @old and setting them in @new. Both @old and @new can be NULL.
  4896. */
  4897. void i915_gem_track_fb(struct drm_i915_gem_object *old,
  4898. struct drm_i915_gem_object *new,
  4899. unsigned frontbuffer_bits)
  4900. {
  4901. /* Control of individual bits within the mask are guarded by
  4902. * the owning plane->mutex, i.e. we can never see concurrent
  4903. * manipulation of individual bits. But since the bitfield as a whole
  4904. * is updated using RMW, we need to use atomics in order to update
  4905. * the bits.
  4906. */
  4907. BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
  4908. sizeof(atomic_t) * BITS_PER_BYTE);
  4909. if (old) {
  4910. WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
  4911. atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
  4912. }
  4913. if (new) {
  4914. WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
  4915. atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
  4916. }
  4917. }
  4918. /* Allocate a new GEM object and fill it with the supplied data */
  4919. struct drm_i915_gem_object *
  4920. i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
  4921. const void *data, size_t size)
  4922. {
  4923. struct drm_i915_gem_object *obj;
  4924. struct file *file;
  4925. size_t offset;
  4926. int err;
  4927. obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
  4928. if (IS_ERR(obj))
  4929. return obj;
  4930. GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
  4931. file = obj->base.filp;
  4932. offset = 0;
  4933. do {
  4934. unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
  4935. struct page *page;
  4936. void *pgdata, *vaddr;
  4937. err = pagecache_write_begin(file, file->f_mapping,
  4938. offset, len, 0,
  4939. &page, &pgdata);
  4940. if (err < 0)
  4941. goto fail;
  4942. vaddr = kmap(page);
  4943. memcpy(vaddr, data, len);
  4944. kunmap(page);
  4945. err = pagecache_write_end(file, file->f_mapping,
  4946. offset, len, len,
  4947. page, pgdata);
  4948. if (err < 0)
  4949. goto fail;
  4950. size -= len;
  4951. data += len;
  4952. offset += len;
  4953. } while (size);
  4954. return obj;
  4955. fail:
  4956. i915_gem_object_put(obj);
  4957. return ERR_PTR(err);
  4958. }
  4959. struct scatterlist *
  4960. i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
  4961. unsigned int n,
  4962. unsigned int *offset)
  4963. {
  4964. struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
  4965. struct scatterlist *sg;
  4966. unsigned int idx, count;
  4967. might_sleep();
  4968. GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
  4969. GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
  4970. /* As we iterate forward through the sg, we record each entry in a
  4971. * radixtree for quick repeated (backwards) lookups. If we have seen
  4972. * this index previously, we will have an entry for it.
  4973. *
  4974. * Initial lookup is O(N), but this is amortized to O(1) for
  4975. * sequential page access (where each new request is consecutive
  4976. * to the previous one). Repeated lookups are O(lg(obj->base.size)),
  4977. * i.e. O(1) with a large constant!
  4978. */
  4979. if (n < READ_ONCE(iter->sg_idx))
  4980. goto lookup;
  4981. mutex_lock(&iter->lock);
  4982. /* We prefer to reuse the last sg so that repeated lookup of this
  4983. * (or the subsequent) sg are fast - comparing against the last
  4984. * sg is faster than going through the radixtree.
  4985. */
  4986. sg = iter->sg_pos;
  4987. idx = iter->sg_idx;
  4988. count = __sg_page_count(sg);
  4989. while (idx + count <= n) {
  4990. unsigned long exception, i;
  4991. int ret;
  4992. /* If we cannot allocate and insert this entry, or the
  4993. * individual pages from this range, cancel updating the
  4994. * sg_idx so that on this lookup we are forced to linearly
  4995. * scan onwards, but on future lookups we will try the
  4996. * insertion again (in which case we need to be careful of
  4997. * the error return reporting that we have already inserted
  4998. * this index).
  4999. */
  5000. ret = radix_tree_insert(&iter->radix, idx, sg);
  5001. if (ret && ret != -EEXIST)
  5002. goto scan;
  5003. exception =
  5004. RADIX_TREE_EXCEPTIONAL_ENTRY |
  5005. idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
  5006. for (i = 1; i < count; i++) {
  5007. ret = radix_tree_insert(&iter->radix, idx + i,
  5008. (void *)exception);
  5009. if (ret && ret != -EEXIST)
  5010. goto scan;
  5011. }
  5012. idx += count;
  5013. sg = ____sg_next(sg);
  5014. count = __sg_page_count(sg);
  5015. }
  5016. scan:
  5017. iter->sg_pos = sg;
  5018. iter->sg_idx = idx;
  5019. mutex_unlock(&iter->lock);
  5020. if (unlikely(n < idx)) /* insertion completed by another thread */
  5021. goto lookup;
  5022. /* In case we failed to insert the entry into the radixtree, we need
  5023. * to look beyond the current sg.
  5024. */
  5025. while (idx + count <= n) {
  5026. idx += count;
  5027. sg = ____sg_next(sg);
  5028. count = __sg_page_count(sg);
  5029. }
  5030. *offset = n - idx;
  5031. return sg;
  5032. lookup:
  5033. rcu_read_lock();
  5034. sg = radix_tree_lookup(&iter->radix, n);
  5035. GEM_BUG_ON(!sg);
  5036. /* If this index is in the middle of multi-page sg entry,
  5037. * the radixtree will contain an exceptional entry that points
  5038. * to the start of that range. We will return the pointer to
  5039. * the base page and the offset of this page within the
  5040. * sg entry's range.
  5041. */
  5042. *offset = 0;
  5043. if (unlikely(radix_tree_exception(sg))) {
  5044. unsigned long base =
  5045. (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
  5046. sg = radix_tree_lookup(&iter->radix, base);
  5047. GEM_BUG_ON(!sg);
  5048. *offset = n - base;
  5049. }
  5050. rcu_read_unlock();
  5051. return sg;
  5052. }
  5053. struct page *
  5054. i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
  5055. {
  5056. struct scatterlist *sg;
  5057. unsigned int offset;
  5058. GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
  5059. sg = i915_gem_object_get_sg(obj, n, &offset);
  5060. return nth_page(sg_page(sg), offset);
  5061. }
  5062. /* Like i915_gem_object_get_page(), but mark the returned page dirty */
  5063. struct page *
  5064. i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
  5065. unsigned int n)
  5066. {
  5067. struct page *page;
  5068. page = i915_gem_object_get_page(obj, n);
  5069. if (!obj->mm.dirty)
  5070. set_page_dirty(page);
  5071. return page;
  5072. }
  5073. dma_addr_t
  5074. i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
  5075. unsigned long n)
  5076. {
  5077. struct scatterlist *sg;
  5078. unsigned int offset;
  5079. sg = i915_gem_object_get_sg(obj, n, &offset);
  5080. return sg_dma_address(sg) + (offset << PAGE_SHIFT);
  5081. }
  5082. int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
  5083. {
  5084. struct sg_table *pages;
  5085. int err;
  5086. if (align > obj->base.size)
  5087. return -EINVAL;
  5088. if (obj->ops == &i915_gem_phys_ops)
  5089. return 0;
  5090. if (obj->ops != &i915_gem_object_ops)
  5091. return -EINVAL;
  5092. err = i915_gem_object_unbind(obj);
  5093. if (err)
  5094. return err;
  5095. mutex_lock(&obj->mm.lock);
  5096. if (obj->mm.madv != I915_MADV_WILLNEED) {
  5097. err = -EFAULT;
  5098. goto err_unlock;
  5099. }
  5100. if (obj->mm.quirked) {
  5101. err = -EFAULT;
  5102. goto err_unlock;
  5103. }
  5104. if (obj->mm.mapping) {
  5105. err = -EBUSY;
  5106. goto err_unlock;
  5107. }
  5108. pages = __i915_gem_object_unset_pages(obj);
  5109. obj->ops = &i915_gem_phys_ops;
  5110. err = ____i915_gem_object_get_pages(obj);
  5111. if (err)
  5112. goto err_xfer;
  5113. /* Perma-pin (until release) the physical set of pages */
  5114. __i915_gem_object_pin_pages(obj);
  5115. if (!IS_ERR_OR_NULL(pages))
  5116. i915_gem_object_ops.put_pages(obj, pages);
  5117. mutex_unlock(&obj->mm.lock);
  5118. return 0;
  5119. err_xfer:
  5120. obj->ops = &i915_gem_object_ops;
  5121. if (!IS_ERR_OR_NULL(pages)) {
  5122. unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
  5123. __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
  5124. }
  5125. err_unlock:
  5126. mutex_unlock(&obj->mm.lock);
  5127. return err;
  5128. }
  5129. #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
  5130. #include "selftests/scatterlist.c"
  5131. #include "selftests/mock_gem_device.c"
  5132. #include "selftests/huge_gem_object.c"
  5133. #include "selftests/huge_pages.c"
  5134. #include "selftests/i915_gem_object.c"
  5135. #include "selftests/i915_gem_coherency.c"
  5136. #endif