i915_gem.c 162 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082
  1. /*
  2. * Copyright © 2008-2015 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. *
  26. */
  27. #include <drm/drmP.h>
  28. #include <drm/drm_vma_manager.h>
  29. #include <drm/i915_drm.h>
  30. #include "i915_drv.h"
  31. #include "i915_gem_clflush.h"
  32. #include "i915_vgpu.h"
  33. #include "i915_trace.h"
  34. #include "intel_drv.h"
  35. #include "intel_frontbuffer.h"
  36. #include "intel_mocs.h"
  37. #include "intel_workarounds.h"
  38. #include "i915_gemfs.h"
  39. #include <linux/dma-fence-array.h>
  40. #include <linux/kthread.h>
  41. #include <linux/reservation.h>
  42. #include <linux/shmem_fs.h>
  43. #include <linux/slab.h>
  44. #include <linux/stop_machine.h>
  45. #include <linux/swap.h>
  46. #include <linux/pci.h>
  47. #include <linux/dma-buf.h>
  48. static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
  49. static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
  50. {
  51. if (obj->cache_dirty)
  52. return false;
  53. if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
  54. return true;
  55. return obj->pin_global; /* currently in use by HW, keep flushed */
  56. }
  57. static int
  58. insert_mappable_node(struct i915_ggtt *ggtt,
  59. struct drm_mm_node *node, u32 size)
  60. {
  61. memset(node, 0, sizeof(*node));
  62. return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
  63. size, 0, I915_COLOR_UNEVICTABLE,
  64. 0, ggtt->mappable_end,
  65. DRM_MM_INSERT_LOW);
  66. }
  67. static void
  68. remove_mappable_node(struct drm_mm_node *node)
  69. {
  70. drm_mm_remove_node(node);
  71. }
  72. /* some bookkeeping */
  73. static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
  74. u64 size)
  75. {
  76. spin_lock(&dev_priv->mm.object_stat_lock);
  77. dev_priv->mm.object_count++;
  78. dev_priv->mm.object_memory += size;
  79. spin_unlock(&dev_priv->mm.object_stat_lock);
  80. }
  81. static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
  82. u64 size)
  83. {
  84. spin_lock(&dev_priv->mm.object_stat_lock);
  85. dev_priv->mm.object_count--;
  86. dev_priv->mm.object_memory -= size;
  87. spin_unlock(&dev_priv->mm.object_stat_lock);
  88. }
  89. static int
  90. i915_gem_wait_for_error(struct i915_gpu_error *error)
  91. {
  92. int ret;
  93. might_sleep();
  94. /*
  95. * Only wait 10 seconds for the gpu reset to complete to avoid hanging
  96. * userspace. If it takes that long something really bad is going on and
  97. * we should simply try to bail out and fail as gracefully as possible.
  98. */
  99. ret = wait_event_interruptible_timeout(error->reset_queue,
  100. !i915_reset_backoff(error),
  101. I915_RESET_TIMEOUT);
  102. if (ret == 0) {
  103. DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
  104. return -EIO;
  105. } else if (ret < 0) {
  106. return ret;
  107. } else {
  108. return 0;
  109. }
  110. }
  111. int i915_mutex_lock_interruptible(struct drm_device *dev)
  112. {
  113. struct drm_i915_private *dev_priv = to_i915(dev);
  114. int ret;
  115. ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
  116. if (ret)
  117. return ret;
  118. ret = mutex_lock_interruptible(&dev->struct_mutex);
  119. if (ret)
  120. return ret;
  121. return 0;
  122. }
  123. static u32 __i915_gem_park(struct drm_i915_private *i915)
  124. {
  125. lockdep_assert_held(&i915->drm.struct_mutex);
  126. GEM_BUG_ON(i915->gt.active_requests);
  127. GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
  128. if (!i915->gt.awake)
  129. return I915_EPOCH_INVALID;
  130. GEM_BUG_ON(i915->gt.epoch == I915_EPOCH_INVALID);
  131. /*
  132. * Be paranoid and flush a concurrent interrupt to make sure
  133. * we don't reactivate any irq tasklets after parking.
  134. *
  135. * FIXME: Note that even though we have waited for execlists to be idle,
  136. * there may still be an in-flight interrupt even though the CSB
  137. * is now empty. synchronize_irq() makes sure that a residual interrupt
  138. * is completed before we continue, but it doesn't prevent the HW from
  139. * raising a spurious interrupt later. To complete the shield we should
  140. * coordinate disabling the CS irq with flushing the interrupts.
  141. */
  142. synchronize_irq(i915->drm.irq);
  143. intel_engines_park(i915);
  144. i915_timelines_park(i915);
  145. i915_pmu_gt_parked(i915);
  146. i915_vma_parked(i915);
  147. i915->gt.awake = false;
  148. if (INTEL_GEN(i915) >= 6)
  149. gen6_rps_idle(i915);
  150. intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ);
  151. intel_runtime_pm_put(i915);
  152. return i915->gt.epoch;
  153. }
  154. void i915_gem_park(struct drm_i915_private *i915)
  155. {
  156. lockdep_assert_held(&i915->drm.struct_mutex);
  157. GEM_BUG_ON(i915->gt.active_requests);
  158. if (!i915->gt.awake)
  159. return;
  160. /* Defer the actual call to __i915_gem_park() to prevent ping-pongs */
  161. mod_delayed_work(i915->wq, &i915->gt.idle_work, msecs_to_jiffies(100));
  162. }
  163. void i915_gem_unpark(struct drm_i915_private *i915)
  164. {
  165. lockdep_assert_held(&i915->drm.struct_mutex);
  166. GEM_BUG_ON(!i915->gt.active_requests);
  167. if (i915->gt.awake)
  168. return;
  169. intel_runtime_pm_get_noresume(i915);
  170. /*
  171. * It seems that the DMC likes to transition between the DC states a lot
  172. * when there are no connected displays (no active power domains) during
  173. * command submission.
  174. *
  175. * This activity has negative impact on the performance of the chip with
  176. * huge latencies observed in the interrupt handler and elsewhere.
  177. *
  178. * Work around it by grabbing a GT IRQ power domain whilst there is any
  179. * GT activity, preventing any DC state transitions.
  180. */
  181. intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
  182. i915->gt.awake = true;
  183. if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
  184. i915->gt.epoch = 1;
  185. intel_enable_gt_powersave(i915);
  186. i915_update_gfx_val(i915);
  187. if (INTEL_GEN(i915) >= 6)
  188. gen6_rps_busy(i915);
  189. i915_pmu_gt_unparked(i915);
  190. intel_engines_unpark(i915);
  191. i915_queue_hangcheck(i915);
  192. queue_delayed_work(i915->wq,
  193. &i915->gt.retire_work,
  194. round_jiffies_up_relative(HZ));
  195. }
  196. int
  197. i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  198. struct drm_file *file)
  199. {
  200. struct drm_i915_private *dev_priv = to_i915(dev);
  201. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  202. struct drm_i915_gem_get_aperture *args = data;
  203. struct i915_vma *vma;
  204. u64 pinned;
  205. pinned = ggtt->base.reserved;
  206. mutex_lock(&dev->struct_mutex);
  207. list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
  208. if (i915_vma_is_pinned(vma))
  209. pinned += vma->node.size;
  210. list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
  211. if (i915_vma_is_pinned(vma))
  212. pinned += vma->node.size;
  213. mutex_unlock(&dev->struct_mutex);
  214. args->aper_size = ggtt->base.total;
  215. args->aper_available_size = args->aper_size - pinned;
  216. return 0;
  217. }
  218. static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
  219. {
  220. struct address_space *mapping = obj->base.filp->f_mapping;
  221. drm_dma_handle_t *phys;
  222. struct sg_table *st;
  223. struct scatterlist *sg;
  224. char *vaddr;
  225. int i;
  226. int err;
  227. if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
  228. return -EINVAL;
  229. /* Always aligning to the object size, allows a single allocation
  230. * to handle all possible callers, and given typical object sizes,
  231. * the alignment of the buddy allocation will naturally match.
  232. */
  233. phys = drm_pci_alloc(obj->base.dev,
  234. roundup_pow_of_two(obj->base.size),
  235. roundup_pow_of_two(obj->base.size));
  236. if (!phys)
  237. return -ENOMEM;
  238. vaddr = phys->vaddr;
  239. for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
  240. struct page *page;
  241. char *src;
  242. page = shmem_read_mapping_page(mapping, i);
  243. if (IS_ERR(page)) {
  244. err = PTR_ERR(page);
  245. goto err_phys;
  246. }
  247. src = kmap_atomic(page);
  248. memcpy(vaddr, src, PAGE_SIZE);
  249. drm_clflush_virt_range(vaddr, PAGE_SIZE);
  250. kunmap_atomic(src);
  251. put_page(page);
  252. vaddr += PAGE_SIZE;
  253. }
  254. i915_gem_chipset_flush(to_i915(obj->base.dev));
  255. st = kmalloc(sizeof(*st), GFP_KERNEL);
  256. if (!st) {
  257. err = -ENOMEM;
  258. goto err_phys;
  259. }
  260. if (sg_alloc_table(st, 1, GFP_KERNEL)) {
  261. kfree(st);
  262. err = -ENOMEM;
  263. goto err_phys;
  264. }
  265. sg = st->sgl;
  266. sg->offset = 0;
  267. sg->length = obj->base.size;
  268. sg_dma_address(sg) = phys->busaddr;
  269. sg_dma_len(sg) = obj->base.size;
  270. obj->phys_handle = phys;
  271. __i915_gem_object_set_pages(obj, st, sg->length);
  272. return 0;
  273. err_phys:
  274. drm_pci_free(obj->base.dev, phys);
  275. return err;
  276. }
  277. static void __start_cpu_write(struct drm_i915_gem_object *obj)
  278. {
  279. obj->read_domains = I915_GEM_DOMAIN_CPU;
  280. obj->write_domain = I915_GEM_DOMAIN_CPU;
  281. if (cpu_write_needs_clflush(obj))
  282. obj->cache_dirty = true;
  283. }
  284. static void
  285. __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
  286. struct sg_table *pages,
  287. bool needs_clflush)
  288. {
  289. GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
  290. if (obj->mm.madv == I915_MADV_DONTNEED)
  291. obj->mm.dirty = false;
  292. if (needs_clflush &&
  293. (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
  294. !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
  295. drm_clflush_sg(pages);
  296. __start_cpu_write(obj);
  297. }
  298. static void
  299. i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
  300. struct sg_table *pages)
  301. {
  302. __i915_gem_object_release_shmem(obj, pages, false);
  303. if (obj->mm.dirty) {
  304. struct address_space *mapping = obj->base.filp->f_mapping;
  305. char *vaddr = obj->phys_handle->vaddr;
  306. int i;
  307. for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
  308. struct page *page;
  309. char *dst;
  310. page = shmem_read_mapping_page(mapping, i);
  311. if (IS_ERR(page))
  312. continue;
  313. dst = kmap_atomic(page);
  314. drm_clflush_virt_range(vaddr, PAGE_SIZE);
  315. memcpy(dst, vaddr, PAGE_SIZE);
  316. kunmap_atomic(dst);
  317. set_page_dirty(page);
  318. if (obj->mm.madv == I915_MADV_WILLNEED)
  319. mark_page_accessed(page);
  320. put_page(page);
  321. vaddr += PAGE_SIZE;
  322. }
  323. obj->mm.dirty = false;
  324. }
  325. sg_free_table(pages);
  326. kfree(pages);
  327. drm_pci_free(obj->base.dev, obj->phys_handle);
  328. }
  329. static void
  330. i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
  331. {
  332. i915_gem_object_unpin_pages(obj);
  333. }
  334. static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
  335. .get_pages = i915_gem_object_get_pages_phys,
  336. .put_pages = i915_gem_object_put_pages_phys,
  337. .release = i915_gem_object_release_phys,
  338. };
  339. static const struct drm_i915_gem_object_ops i915_gem_object_ops;
  340. int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
  341. {
  342. struct i915_vma *vma;
  343. LIST_HEAD(still_in_list);
  344. int ret;
  345. lockdep_assert_held(&obj->base.dev->struct_mutex);
  346. /* Closed vma are removed from the obj->vma_list - but they may
  347. * still have an active binding on the object. To remove those we
  348. * must wait for all rendering to complete to the object (as unbinding
  349. * must anyway), and retire the requests.
  350. */
  351. ret = i915_gem_object_set_to_cpu_domain(obj, false);
  352. if (ret)
  353. return ret;
  354. while ((vma = list_first_entry_or_null(&obj->vma_list,
  355. struct i915_vma,
  356. obj_link))) {
  357. list_move_tail(&vma->obj_link, &still_in_list);
  358. ret = i915_vma_unbind(vma);
  359. if (ret)
  360. break;
  361. }
  362. list_splice(&still_in_list, &obj->vma_list);
  363. return ret;
  364. }
  365. static long
  366. i915_gem_object_wait_fence(struct dma_fence *fence,
  367. unsigned int flags,
  368. long timeout,
  369. struct intel_rps_client *rps_client)
  370. {
  371. struct i915_request *rq;
  372. BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
  373. if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
  374. return timeout;
  375. if (!dma_fence_is_i915(fence))
  376. return dma_fence_wait_timeout(fence,
  377. flags & I915_WAIT_INTERRUPTIBLE,
  378. timeout);
  379. rq = to_request(fence);
  380. if (i915_request_completed(rq))
  381. goto out;
  382. /*
  383. * This client is about to stall waiting for the GPU. In many cases
  384. * this is undesirable and limits the throughput of the system, as
  385. * many clients cannot continue processing user input/output whilst
  386. * blocked. RPS autotuning may take tens of milliseconds to respond
  387. * to the GPU load and thus incurs additional latency for the client.
  388. * We can circumvent that by promoting the GPU frequency to maximum
  389. * before we wait. This makes the GPU throttle up much more quickly
  390. * (good for benchmarks and user experience, e.g. window animations),
  391. * but at a cost of spending more power processing the workload
  392. * (bad for battery). Not all clients even want their results
  393. * immediately and for them we should just let the GPU select its own
  394. * frequency to maximise efficiency. To prevent a single client from
  395. * forcing the clocks too high for the whole system, we only allow
  396. * each client to waitboost once in a busy period.
  397. */
  398. if (rps_client && !i915_request_started(rq)) {
  399. if (INTEL_GEN(rq->i915) >= 6)
  400. gen6_rps_boost(rq, rps_client);
  401. }
  402. timeout = i915_request_wait(rq, flags, timeout);
  403. out:
  404. if (flags & I915_WAIT_LOCKED && i915_request_completed(rq))
  405. i915_request_retire_upto(rq);
  406. return timeout;
  407. }
  408. static long
  409. i915_gem_object_wait_reservation(struct reservation_object *resv,
  410. unsigned int flags,
  411. long timeout,
  412. struct intel_rps_client *rps_client)
  413. {
  414. unsigned int seq = __read_seqcount_begin(&resv->seq);
  415. struct dma_fence *excl;
  416. bool prune_fences = false;
  417. if (flags & I915_WAIT_ALL) {
  418. struct dma_fence **shared;
  419. unsigned int count, i;
  420. int ret;
  421. ret = reservation_object_get_fences_rcu(resv,
  422. &excl, &count, &shared);
  423. if (ret)
  424. return ret;
  425. for (i = 0; i < count; i++) {
  426. timeout = i915_gem_object_wait_fence(shared[i],
  427. flags, timeout,
  428. rps_client);
  429. if (timeout < 0)
  430. break;
  431. dma_fence_put(shared[i]);
  432. }
  433. for (; i < count; i++)
  434. dma_fence_put(shared[i]);
  435. kfree(shared);
  436. /*
  437. * If both shared fences and an exclusive fence exist,
  438. * then by construction the shared fences must be later
  439. * than the exclusive fence. If we successfully wait for
  440. * all the shared fences, we know that the exclusive fence
  441. * must all be signaled. If all the shared fences are
  442. * signaled, we can prune the array and recover the
  443. * floating references on the fences/requests.
  444. */
  445. prune_fences = count && timeout >= 0;
  446. } else {
  447. excl = reservation_object_get_excl_rcu(resv);
  448. }
  449. if (excl && timeout >= 0)
  450. timeout = i915_gem_object_wait_fence(excl, flags, timeout,
  451. rps_client);
  452. dma_fence_put(excl);
  453. /*
  454. * Opportunistically prune the fences iff we know they have *all* been
  455. * signaled and that the reservation object has not been changed (i.e.
  456. * no new fences have been added).
  457. */
  458. if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
  459. if (reservation_object_trylock(resv)) {
  460. if (!__read_seqcount_retry(&resv->seq, seq))
  461. reservation_object_add_excl_fence(resv, NULL);
  462. reservation_object_unlock(resv);
  463. }
  464. }
  465. return timeout;
  466. }
  467. static void __fence_set_priority(struct dma_fence *fence,
  468. const struct i915_sched_attr *attr)
  469. {
  470. struct i915_request *rq;
  471. struct intel_engine_cs *engine;
  472. if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
  473. return;
  474. rq = to_request(fence);
  475. engine = rq->engine;
  476. local_bh_disable();
  477. rcu_read_lock(); /* RCU serialisation for set-wedged protection */
  478. if (engine->schedule)
  479. engine->schedule(rq, attr);
  480. rcu_read_unlock();
  481. local_bh_enable(); /* kick the tasklets if queues were reprioritised */
  482. }
  483. static void fence_set_priority(struct dma_fence *fence,
  484. const struct i915_sched_attr *attr)
  485. {
  486. /* Recurse once into a fence-array */
  487. if (dma_fence_is_array(fence)) {
  488. struct dma_fence_array *array = to_dma_fence_array(fence);
  489. int i;
  490. for (i = 0; i < array->num_fences; i++)
  491. __fence_set_priority(array->fences[i], attr);
  492. } else {
  493. __fence_set_priority(fence, attr);
  494. }
  495. }
  496. int
  497. i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
  498. unsigned int flags,
  499. const struct i915_sched_attr *attr)
  500. {
  501. struct dma_fence *excl;
  502. if (flags & I915_WAIT_ALL) {
  503. struct dma_fence **shared;
  504. unsigned int count, i;
  505. int ret;
  506. ret = reservation_object_get_fences_rcu(obj->resv,
  507. &excl, &count, &shared);
  508. if (ret)
  509. return ret;
  510. for (i = 0; i < count; i++) {
  511. fence_set_priority(shared[i], attr);
  512. dma_fence_put(shared[i]);
  513. }
  514. kfree(shared);
  515. } else {
  516. excl = reservation_object_get_excl_rcu(obj->resv);
  517. }
  518. if (excl) {
  519. fence_set_priority(excl, attr);
  520. dma_fence_put(excl);
  521. }
  522. return 0;
  523. }
  524. /**
  525. * Waits for rendering to the object to be completed
  526. * @obj: i915 gem object
  527. * @flags: how to wait (under a lock, for all rendering or just for writes etc)
  528. * @timeout: how long to wait
  529. * @rps_client: client (user process) to charge for any waitboosting
  530. */
  531. int
  532. i915_gem_object_wait(struct drm_i915_gem_object *obj,
  533. unsigned int flags,
  534. long timeout,
  535. struct intel_rps_client *rps_client)
  536. {
  537. might_sleep();
  538. #if IS_ENABLED(CONFIG_LOCKDEP)
  539. GEM_BUG_ON(debug_locks &&
  540. !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
  541. !!(flags & I915_WAIT_LOCKED));
  542. #endif
  543. GEM_BUG_ON(timeout < 0);
  544. timeout = i915_gem_object_wait_reservation(obj->resv,
  545. flags, timeout,
  546. rps_client);
  547. return timeout < 0 ? timeout : 0;
  548. }
  549. static struct intel_rps_client *to_rps_client(struct drm_file *file)
  550. {
  551. struct drm_i915_file_private *fpriv = file->driver_priv;
  552. return &fpriv->rps_client;
  553. }
  554. static int
  555. i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
  556. struct drm_i915_gem_pwrite *args,
  557. struct drm_file *file)
  558. {
  559. void *vaddr = obj->phys_handle->vaddr + args->offset;
  560. char __user *user_data = u64_to_user_ptr(args->data_ptr);
  561. /* We manually control the domain here and pretend that it
  562. * remains coherent i.e. in the GTT domain, like shmem_pwrite.
  563. */
  564. intel_fb_obj_invalidate(obj, ORIGIN_CPU);
  565. if (copy_from_user(vaddr, user_data, args->size))
  566. return -EFAULT;
  567. drm_clflush_virt_range(vaddr, args->size);
  568. i915_gem_chipset_flush(to_i915(obj->base.dev));
  569. intel_fb_obj_flush(obj, ORIGIN_CPU);
  570. return 0;
  571. }
  572. void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
  573. {
  574. return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
  575. }
  576. void i915_gem_object_free(struct drm_i915_gem_object *obj)
  577. {
  578. struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
  579. kmem_cache_free(dev_priv->objects, obj);
  580. }
  581. static int
  582. i915_gem_create(struct drm_file *file,
  583. struct drm_i915_private *dev_priv,
  584. uint64_t size,
  585. uint32_t *handle_p)
  586. {
  587. struct drm_i915_gem_object *obj;
  588. int ret;
  589. u32 handle;
  590. size = roundup(size, PAGE_SIZE);
  591. if (size == 0)
  592. return -EINVAL;
  593. /* Allocate the new object */
  594. obj = i915_gem_object_create(dev_priv, size);
  595. if (IS_ERR(obj))
  596. return PTR_ERR(obj);
  597. ret = drm_gem_handle_create(file, &obj->base, &handle);
  598. /* drop reference from allocate - handle holds it now */
  599. i915_gem_object_put(obj);
  600. if (ret)
  601. return ret;
  602. *handle_p = handle;
  603. return 0;
  604. }
  605. int
  606. i915_gem_dumb_create(struct drm_file *file,
  607. struct drm_device *dev,
  608. struct drm_mode_create_dumb *args)
  609. {
  610. /* have to work out size/pitch and return them */
  611. args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
  612. args->size = args->pitch * args->height;
  613. return i915_gem_create(file, to_i915(dev),
  614. args->size, &args->handle);
  615. }
  616. static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
  617. {
  618. return !(obj->cache_level == I915_CACHE_NONE ||
  619. obj->cache_level == I915_CACHE_WT);
  620. }
  621. /**
  622. * Creates a new mm object and returns a handle to it.
  623. * @dev: drm device pointer
  624. * @data: ioctl data blob
  625. * @file: drm file pointer
  626. */
  627. int
  628. i915_gem_create_ioctl(struct drm_device *dev, void *data,
  629. struct drm_file *file)
  630. {
  631. struct drm_i915_private *dev_priv = to_i915(dev);
  632. struct drm_i915_gem_create *args = data;
  633. i915_gem_flush_free_objects(dev_priv);
  634. return i915_gem_create(file, dev_priv,
  635. args->size, &args->handle);
  636. }
  637. static inline enum fb_op_origin
  638. fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
  639. {
  640. return (domain == I915_GEM_DOMAIN_GTT ?
  641. obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
  642. }
  643. void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
  644. {
  645. /*
  646. * No actual flushing is required for the GTT write domain for reads
  647. * from the GTT domain. Writes to it "immediately" go to main memory
  648. * as far as we know, so there's no chipset flush. It also doesn't
  649. * land in the GPU render cache.
  650. *
  651. * However, we do have to enforce the order so that all writes through
  652. * the GTT land before any writes to the device, such as updates to
  653. * the GATT itself.
  654. *
  655. * We also have to wait a bit for the writes to land from the GTT.
  656. * An uncached read (i.e. mmio) seems to be ideal for the round-trip
  657. * timing. This issue has only been observed when switching quickly
  658. * between GTT writes and CPU reads from inside the kernel on recent hw,
  659. * and it appears to only affect discrete GTT blocks (i.e. on LLC
  660. * system agents we cannot reproduce this behaviour, until Cannonlake
  661. * that was!).
  662. */
  663. wmb();
  664. intel_runtime_pm_get(dev_priv);
  665. spin_lock_irq(&dev_priv->uncore.lock);
  666. POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
  667. spin_unlock_irq(&dev_priv->uncore.lock);
  668. intel_runtime_pm_put(dev_priv);
  669. }
  670. static void
  671. flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
  672. {
  673. struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
  674. struct i915_vma *vma;
  675. if (!(obj->write_domain & flush_domains))
  676. return;
  677. switch (obj->write_domain) {
  678. case I915_GEM_DOMAIN_GTT:
  679. i915_gem_flush_ggtt_writes(dev_priv);
  680. intel_fb_obj_flush(obj,
  681. fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
  682. for_each_ggtt_vma(vma, obj) {
  683. if (vma->iomap)
  684. continue;
  685. i915_vma_unset_ggtt_write(vma);
  686. }
  687. break;
  688. case I915_GEM_DOMAIN_CPU:
  689. i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
  690. break;
  691. case I915_GEM_DOMAIN_RENDER:
  692. if (gpu_write_needs_clflush(obj))
  693. obj->cache_dirty = true;
  694. break;
  695. }
  696. obj->write_domain = 0;
  697. }
  698. static inline int
  699. __copy_to_user_swizzled(char __user *cpu_vaddr,
  700. const char *gpu_vaddr, int gpu_offset,
  701. int length)
  702. {
  703. int ret, cpu_offset = 0;
  704. while (length > 0) {
  705. int cacheline_end = ALIGN(gpu_offset + 1, 64);
  706. int this_length = min(cacheline_end - gpu_offset, length);
  707. int swizzled_gpu_offset = gpu_offset ^ 64;
  708. ret = __copy_to_user(cpu_vaddr + cpu_offset,
  709. gpu_vaddr + swizzled_gpu_offset,
  710. this_length);
  711. if (ret)
  712. return ret + length;
  713. cpu_offset += this_length;
  714. gpu_offset += this_length;
  715. length -= this_length;
  716. }
  717. return 0;
  718. }
  719. static inline int
  720. __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
  721. const char __user *cpu_vaddr,
  722. int length)
  723. {
  724. int ret, cpu_offset = 0;
  725. while (length > 0) {
  726. int cacheline_end = ALIGN(gpu_offset + 1, 64);
  727. int this_length = min(cacheline_end - gpu_offset, length);
  728. int swizzled_gpu_offset = gpu_offset ^ 64;
  729. ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
  730. cpu_vaddr + cpu_offset,
  731. this_length);
  732. if (ret)
  733. return ret + length;
  734. cpu_offset += this_length;
  735. gpu_offset += this_length;
  736. length -= this_length;
  737. }
  738. return 0;
  739. }
  740. /*
  741. * Pins the specified object's pages and synchronizes the object with
  742. * GPU accesses. Sets needs_clflush to non-zero if the caller should
  743. * flush the object from the CPU cache.
  744. */
  745. int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
  746. unsigned int *needs_clflush)
  747. {
  748. int ret;
  749. lockdep_assert_held(&obj->base.dev->struct_mutex);
  750. *needs_clflush = 0;
  751. if (!i915_gem_object_has_struct_page(obj))
  752. return -ENODEV;
  753. ret = i915_gem_object_wait(obj,
  754. I915_WAIT_INTERRUPTIBLE |
  755. I915_WAIT_LOCKED,
  756. MAX_SCHEDULE_TIMEOUT,
  757. NULL);
  758. if (ret)
  759. return ret;
  760. ret = i915_gem_object_pin_pages(obj);
  761. if (ret)
  762. return ret;
  763. if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
  764. !static_cpu_has(X86_FEATURE_CLFLUSH)) {
  765. ret = i915_gem_object_set_to_cpu_domain(obj, false);
  766. if (ret)
  767. goto err_unpin;
  768. else
  769. goto out;
  770. }
  771. flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
  772. /* If we're not in the cpu read domain, set ourself into the gtt
  773. * read domain and manually flush cachelines (if required). This
  774. * optimizes for the case when the gpu will dirty the data
  775. * anyway again before the next pread happens.
  776. */
  777. if (!obj->cache_dirty &&
  778. !(obj->read_domains & I915_GEM_DOMAIN_CPU))
  779. *needs_clflush = CLFLUSH_BEFORE;
  780. out:
  781. /* return with the pages pinned */
  782. return 0;
  783. err_unpin:
  784. i915_gem_object_unpin_pages(obj);
  785. return ret;
  786. }
  787. int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
  788. unsigned int *needs_clflush)
  789. {
  790. int ret;
  791. lockdep_assert_held(&obj->base.dev->struct_mutex);
  792. *needs_clflush = 0;
  793. if (!i915_gem_object_has_struct_page(obj))
  794. return -ENODEV;
  795. ret = i915_gem_object_wait(obj,
  796. I915_WAIT_INTERRUPTIBLE |
  797. I915_WAIT_LOCKED |
  798. I915_WAIT_ALL,
  799. MAX_SCHEDULE_TIMEOUT,
  800. NULL);
  801. if (ret)
  802. return ret;
  803. ret = i915_gem_object_pin_pages(obj);
  804. if (ret)
  805. return ret;
  806. if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
  807. !static_cpu_has(X86_FEATURE_CLFLUSH)) {
  808. ret = i915_gem_object_set_to_cpu_domain(obj, true);
  809. if (ret)
  810. goto err_unpin;
  811. else
  812. goto out;
  813. }
  814. flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
  815. /* If we're not in the cpu write domain, set ourself into the
  816. * gtt write domain and manually flush cachelines (as required).
  817. * This optimizes for the case when the gpu will use the data
  818. * right away and we therefore have to clflush anyway.
  819. */
  820. if (!obj->cache_dirty) {
  821. *needs_clflush |= CLFLUSH_AFTER;
  822. /*
  823. * Same trick applies to invalidate partially written
  824. * cachelines read before writing.
  825. */
  826. if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
  827. *needs_clflush |= CLFLUSH_BEFORE;
  828. }
  829. out:
  830. intel_fb_obj_invalidate(obj, ORIGIN_CPU);
  831. obj->mm.dirty = true;
  832. /* return with the pages pinned */
  833. return 0;
  834. err_unpin:
  835. i915_gem_object_unpin_pages(obj);
  836. return ret;
  837. }
  838. static void
  839. shmem_clflush_swizzled_range(char *addr, unsigned long length,
  840. bool swizzled)
  841. {
  842. if (unlikely(swizzled)) {
  843. unsigned long start = (unsigned long) addr;
  844. unsigned long end = (unsigned long) addr + length;
  845. /* For swizzling simply ensure that we always flush both
  846. * channels. Lame, but simple and it works. Swizzled
  847. * pwrite/pread is far from a hotpath - current userspace
  848. * doesn't use it at all. */
  849. start = round_down(start, 128);
  850. end = round_up(end, 128);
  851. drm_clflush_virt_range((void *)start, end - start);
  852. } else {
  853. drm_clflush_virt_range(addr, length);
  854. }
  855. }
  856. /* Only difference to the fast-path function is that this can handle bit17
  857. * and uses non-atomic copy and kmap functions. */
  858. static int
  859. shmem_pread_slow(struct page *page, int offset, int length,
  860. char __user *user_data,
  861. bool page_do_bit17_swizzling, bool needs_clflush)
  862. {
  863. char *vaddr;
  864. int ret;
  865. vaddr = kmap(page);
  866. if (needs_clflush)
  867. shmem_clflush_swizzled_range(vaddr + offset, length,
  868. page_do_bit17_swizzling);
  869. if (page_do_bit17_swizzling)
  870. ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
  871. else
  872. ret = __copy_to_user(user_data, vaddr + offset, length);
  873. kunmap(page);
  874. return ret ? - EFAULT : 0;
  875. }
  876. static int
  877. shmem_pread(struct page *page, int offset, int length, char __user *user_data,
  878. bool page_do_bit17_swizzling, bool needs_clflush)
  879. {
  880. int ret;
  881. ret = -ENODEV;
  882. if (!page_do_bit17_swizzling) {
  883. char *vaddr = kmap_atomic(page);
  884. if (needs_clflush)
  885. drm_clflush_virt_range(vaddr + offset, length);
  886. ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
  887. kunmap_atomic(vaddr);
  888. }
  889. if (ret == 0)
  890. return 0;
  891. return shmem_pread_slow(page, offset, length, user_data,
  892. page_do_bit17_swizzling, needs_clflush);
  893. }
  894. static int
  895. i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
  896. struct drm_i915_gem_pread *args)
  897. {
  898. char __user *user_data;
  899. u64 remain;
  900. unsigned int obj_do_bit17_swizzling;
  901. unsigned int needs_clflush;
  902. unsigned int idx, offset;
  903. int ret;
  904. obj_do_bit17_swizzling = 0;
  905. if (i915_gem_object_needs_bit17_swizzle(obj))
  906. obj_do_bit17_swizzling = BIT(17);
  907. ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
  908. if (ret)
  909. return ret;
  910. ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
  911. mutex_unlock(&obj->base.dev->struct_mutex);
  912. if (ret)
  913. return ret;
  914. remain = args->size;
  915. user_data = u64_to_user_ptr(args->data_ptr);
  916. offset = offset_in_page(args->offset);
  917. for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
  918. struct page *page = i915_gem_object_get_page(obj, idx);
  919. int length;
  920. length = remain;
  921. if (offset + length > PAGE_SIZE)
  922. length = PAGE_SIZE - offset;
  923. ret = shmem_pread(page, offset, length, user_data,
  924. page_to_phys(page) & obj_do_bit17_swizzling,
  925. needs_clflush);
  926. if (ret)
  927. break;
  928. remain -= length;
  929. user_data += length;
  930. offset = 0;
  931. }
  932. i915_gem_obj_finish_shmem_access(obj);
  933. return ret;
  934. }
  935. static inline bool
  936. gtt_user_read(struct io_mapping *mapping,
  937. loff_t base, int offset,
  938. char __user *user_data, int length)
  939. {
  940. void __iomem *vaddr;
  941. unsigned long unwritten;
  942. /* We can use the cpu mem copy function because this is X86. */
  943. vaddr = io_mapping_map_atomic_wc(mapping, base);
  944. unwritten = __copy_to_user_inatomic(user_data,
  945. (void __force *)vaddr + offset,
  946. length);
  947. io_mapping_unmap_atomic(vaddr);
  948. if (unwritten) {
  949. vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
  950. unwritten = copy_to_user(user_data,
  951. (void __force *)vaddr + offset,
  952. length);
  953. io_mapping_unmap(vaddr);
  954. }
  955. return unwritten;
  956. }
  957. static int
  958. i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
  959. const struct drm_i915_gem_pread *args)
  960. {
  961. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  962. struct i915_ggtt *ggtt = &i915->ggtt;
  963. struct drm_mm_node node;
  964. struct i915_vma *vma;
  965. void __user *user_data;
  966. u64 remain, offset;
  967. int ret;
  968. ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
  969. if (ret)
  970. return ret;
  971. intel_runtime_pm_get(i915);
  972. vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
  973. PIN_MAPPABLE |
  974. PIN_NONFAULT |
  975. PIN_NONBLOCK);
  976. if (!IS_ERR(vma)) {
  977. node.start = i915_ggtt_offset(vma);
  978. node.allocated = false;
  979. ret = i915_vma_put_fence(vma);
  980. if (ret) {
  981. i915_vma_unpin(vma);
  982. vma = ERR_PTR(ret);
  983. }
  984. }
  985. if (IS_ERR(vma)) {
  986. ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
  987. if (ret)
  988. goto out_unlock;
  989. GEM_BUG_ON(!node.allocated);
  990. }
  991. ret = i915_gem_object_set_to_gtt_domain(obj, false);
  992. if (ret)
  993. goto out_unpin;
  994. mutex_unlock(&i915->drm.struct_mutex);
  995. user_data = u64_to_user_ptr(args->data_ptr);
  996. remain = args->size;
  997. offset = args->offset;
  998. while (remain > 0) {
  999. /* Operation in this page
  1000. *
  1001. * page_base = page offset within aperture
  1002. * page_offset = offset within page
  1003. * page_length = bytes to copy for this page
  1004. */
  1005. u32 page_base = node.start;
  1006. unsigned page_offset = offset_in_page(offset);
  1007. unsigned page_length = PAGE_SIZE - page_offset;
  1008. page_length = remain < page_length ? remain : page_length;
  1009. if (node.allocated) {
  1010. wmb();
  1011. ggtt->base.insert_page(&ggtt->base,
  1012. i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
  1013. node.start, I915_CACHE_NONE, 0);
  1014. wmb();
  1015. } else {
  1016. page_base += offset & PAGE_MASK;
  1017. }
  1018. if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
  1019. user_data, page_length)) {
  1020. ret = -EFAULT;
  1021. break;
  1022. }
  1023. remain -= page_length;
  1024. user_data += page_length;
  1025. offset += page_length;
  1026. }
  1027. mutex_lock(&i915->drm.struct_mutex);
  1028. out_unpin:
  1029. if (node.allocated) {
  1030. wmb();
  1031. ggtt->base.clear_range(&ggtt->base,
  1032. node.start, node.size);
  1033. remove_mappable_node(&node);
  1034. } else {
  1035. i915_vma_unpin(vma);
  1036. }
  1037. out_unlock:
  1038. intel_runtime_pm_put(i915);
  1039. mutex_unlock(&i915->drm.struct_mutex);
  1040. return ret;
  1041. }
  1042. /**
  1043. * Reads data from the object referenced by handle.
  1044. * @dev: drm device pointer
  1045. * @data: ioctl data blob
  1046. * @file: drm file pointer
  1047. *
  1048. * On error, the contents of *data are undefined.
  1049. */
  1050. int
  1051. i915_gem_pread_ioctl(struct drm_device *dev, void *data,
  1052. struct drm_file *file)
  1053. {
  1054. struct drm_i915_gem_pread *args = data;
  1055. struct drm_i915_gem_object *obj;
  1056. int ret;
  1057. if (args->size == 0)
  1058. return 0;
  1059. if (!access_ok(VERIFY_WRITE,
  1060. u64_to_user_ptr(args->data_ptr),
  1061. args->size))
  1062. return -EFAULT;
  1063. obj = i915_gem_object_lookup(file, args->handle);
  1064. if (!obj)
  1065. return -ENOENT;
  1066. /* Bounds check source. */
  1067. if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
  1068. ret = -EINVAL;
  1069. goto out;
  1070. }
  1071. trace_i915_gem_object_pread(obj, args->offset, args->size);
  1072. ret = i915_gem_object_wait(obj,
  1073. I915_WAIT_INTERRUPTIBLE,
  1074. MAX_SCHEDULE_TIMEOUT,
  1075. to_rps_client(file));
  1076. if (ret)
  1077. goto out;
  1078. ret = i915_gem_object_pin_pages(obj);
  1079. if (ret)
  1080. goto out;
  1081. ret = i915_gem_shmem_pread(obj, args);
  1082. if (ret == -EFAULT || ret == -ENODEV)
  1083. ret = i915_gem_gtt_pread(obj, args);
  1084. i915_gem_object_unpin_pages(obj);
  1085. out:
  1086. i915_gem_object_put(obj);
  1087. return ret;
  1088. }
  1089. /* This is the fast write path which cannot handle
  1090. * page faults in the source data
  1091. */
  1092. static inline bool
  1093. ggtt_write(struct io_mapping *mapping,
  1094. loff_t base, int offset,
  1095. char __user *user_data, int length)
  1096. {
  1097. void __iomem *vaddr;
  1098. unsigned long unwritten;
  1099. /* We can use the cpu mem copy function because this is X86. */
  1100. vaddr = io_mapping_map_atomic_wc(mapping, base);
  1101. unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
  1102. user_data, length);
  1103. io_mapping_unmap_atomic(vaddr);
  1104. if (unwritten) {
  1105. vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
  1106. unwritten = copy_from_user((void __force *)vaddr + offset,
  1107. user_data, length);
  1108. io_mapping_unmap(vaddr);
  1109. }
  1110. return unwritten;
  1111. }
  1112. /**
  1113. * This is the fast pwrite path, where we copy the data directly from the
  1114. * user into the GTT, uncached.
  1115. * @obj: i915 GEM object
  1116. * @args: pwrite arguments structure
  1117. */
  1118. static int
  1119. i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
  1120. const struct drm_i915_gem_pwrite *args)
  1121. {
  1122. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  1123. struct i915_ggtt *ggtt = &i915->ggtt;
  1124. struct drm_mm_node node;
  1125. struct i915_vma *vma;
  1126. u64 remain, offset;
  1127. void __user *user_data;
  1128. int ret;
  1129. ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
  1130. if (ret)
  1131. return ret;
  1132. if (i915_gem_object_has_struct_page(obj)) {
  1133. /*
  1134. * Avoid waking the device up if we can fallback, as
  1135. * waking/resuming is very slow (worst-case 10-100 ms
  1136. * depending on PCI sleeps and our own resume time).
  1137. * This easily dwarfs any performance advantage from
  1138. * using the cache bypass of indirect GGTT access.
  1139. */
  1140. if (!intel_runtime_pm_get_if_in_use(i915)) {
  1141. ret = -EFAULT;
  1142. goto out_unlock;
  1143. }
  1144. } else {
  1145. /* No backing pages, no fallback, we must force GGTT access */
  1146. intel_runtime_pm_get(i915);
  1147. }
  1148. vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
  1149. PIN_MAPPABLE |
  1150. PIN_NONFAULT |
  1151. PIN_NONBLOCK);
  1152. if (!IS_ERR(vma)) {
  1153. node.start = i915_ggtt_offset(vma);
  1154. node.allocated = false;
  1155. ret = i915_vma_put_fence(vma);
  1156. if (ret) {
  1157. i915_vma_unpin(vma);
  1158. vma = ERR_PTR(ret);
  1159. }
  1160. }
  1161. if (IS_ERR(vma)) {
  1162. ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
  1163. if (ret)
  1164. goto out_rpm;
  1165. GEM_BUG_ON(!node.allocated);
  1166. }
  1167. ret = i915_gem_object_set_to_gtt_domain(obj, true);
  1168. if (ret)
  1169. goto out_unpin;
  1170. mutex_unlock(&i915->drm.struct_mutex);
  1171. intel_fb_obj_invalidate(obj, ORIGIN_CPU);
  1172. user_data = u64_to_user_ptr(args->data_ptr);
  1173. offset = args->offset;
  1174. remain = args->size;
  1175. while (remain) {
  1176. /* Operation in this page
  1177. *
  1178. * page_base = page offset within aperture
  1179. * page_offset = offset within page
  1180. * page_length = bytes to copy for this page
  1181. */
  1182. u32 page_base = node.start;
  1183. unsigned int page_offset = offset_in_page(offset);
  1184. unsigned int page_length = PAGE_SIZE - page_offset;
  1185. page_length = remain < page_length ? remain : page_length;
  1186. if (node.allocated) {
  1187. wmb(); /* flush the write before we modify the GGTT */
  1188. ggtt->base.insert_page(&ggtt->base,
  1189. i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
  1190. node.start, I915_CACHE_NONE, 0);
  1191. wmb(); /* flush modifications to the GGTT (insert_page) */
  1192. } else {
  1193. page_base += offset & PAGE_MASK;
  1194. }
  1195. /* If we get a fault while copying data, then (presumably) our
  1196. * source page isn't available. Return the error and we'll
  1197. * retry in the slow path.
  1198. * If the object is non-shmem backed, we retry again with the
  1199. * path that handles page fault.
  1200. */
  1201. if (ggtt_write(&ggtt->iomap, page_base, page_offset,
  1202. user_data, page_length)) {
  1203. ret = -EFAULT;
  1204. break;
  1205. }
  1206. remain -= page_length;
  1207. user_data += page_length;
  1208. offset += page_length;
  1209. }
  1210. intel_fb_obj_flush(obj, ORIGIN_CPU);
  1211. mutex_lock(&i915->drm.struct_mutex);
  1212. out_unpin:
  1213. if (node.allocated) {
  1214. wmb();
  1215. ggtt->base.clear_range(&ggtt->base,
  1216. node.start, node.size);
  1217. remove_mappable_node(&node);
  1218. } else {
  1219. i915_vma_unpin(vma);
  1220. }
  1221. out_rpm:
  1222. intel_runtime_pm_put(i915);
  1223. out_unlock:
  1224. mutex_unlock(&i915->drm.struct_mutex);
  1225. return ret;
  1226. }
  1227. static int
  1228. shmem_pwrite_slow(struct page *page, int offset, int length,
  1229. char __user *user_data,
  1230. bool page_do_bit17_swizzling,
  1231. bool needs_clflush_before,
  1232. bool needs_clflush_after)
  1233. {
  1234. char *vaddr;
  1235. int ret;
  1236. vaddr = kmap(page);
  1237. if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
  1238. shmem_clflush_swizzled_range(vaddr + offset, length,
  1239. page_do_bit17_swizzling);
  1240. if (page_do_bit17_swizzling)
  1241. ret = __copy_from_user_swizzled(vaddr, offset, user_data,
  1242. length);
  1243. else
  1244. ret = __copy_from_user(vaddr + offset, user_data, length);
  1245. if (needs_clflush_after)
  1246. shmem_clflush_swizzled_range(vaddr + offset, length,
  1247. page_do_bit17_swizzling);
  1248. kunmap(page);
  1249. return ret ? -EFAULT : 0;
  1250. }
  1251. /* Per-page copy function for the shmem pwrite fastpath.
  1252. * Flushes invalid cachelines before writing to the target if
  1253. * needs_clflush_before is set and flushes out any written cachelines after
  1254. * writing if needs_clflush is set.
  1255. */
  1256. static int
  1257. shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
  1258. bool page_do_bit17_swizzling,
  1259. bool needs_clflush_before,
  1260. bool needs_clflush_after)
  1261. {
  1262. int ret;
  1263. ret = -ENODEV;
  1264. if (!page_do_bit17_swizzling) {
  1265. char *vaddr = kmap_atomic(page);
  1266. if (needs_clflush_before)
  1267. drm_clflush_virt_range(vaddr + offset, len);
  1268. ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
  1269. if (needs_clflush_after)
  1270. drm_clflush_virt_range(vaddr + offset, len);
  1271. kunmap_atomic(vaddr);
  1272. }
  1273. if (ret == 0)
  1274. return ret;
  1275. return shmem_pwrite_slow(page, offset, len, user_data,
  1276. page_do_bit17_swizzling,
  1277. needs_clflush_before,
  1278. needs_clflush_after);
  1279. }
  1280. static int
  1281. i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
  1282. const struct drm_i915_gem_pwrite *args)
  1283. {
  1284. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  1285. void __user *user_data;
  1286. u64 remain;
  1287. unsigned int obj_do_bit17_swizzling;
  1288. unsigned int partial_cacheline_write;
  1289. unsigned int needs_clflush;
  1290. unsigned int offset, idx;
  1291. int ret;
  1292. ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
  1293. if (ret)
  1294. return ret;
  1295. ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
  1296. mutex_unlock(&i915->drm.struct_mutex);
  1297. if (ret)
  1298. return ret;
  1299. obj_do_bit17_swizzling = 0;
  1300. if (i915_gem_object_needs_bit17_swizzle(obj))
  1301. obj_do_bit17_swizzling = BIT(17);
  1302. /* If we don't overwrite a cacheline completely we need to be
  1303. * careful to have up-to-date data by first clflushing. Don't
  1304. * overcomplicate things and flush the entire patch.
  1305. */
  1306. partial_cacheline_write = 0;
  1307. if (needs_clflush & CLFLUSH_BEFORE)
  1308. partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
  1309. user_data = u64_to_user_ptr(args->data_ptr);
  1310. remain = args->size;
  1311. offset = offset_in_page(args->offset);
  1312. for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
  1313. struct page *page = i915_gem_object_get_page(obj, idx);
  1314. int length;
  1315. length = remain;
  1316. if (offset + length > PAGE_SIZE)
  1317. length = PAGE_SIZE - offset;
  1318. ret = shmem_pwrite(page, offset, length, user_data,
  1319. page_to_phys(page) & obj_do_bit17_swizzling,
  1320. (offset | length) & partial_cacheline_write,
  1321. needs_clflush & CLFLUSH_AFTER);
  1322. if (ret)
  1323. break;
  1324. remain -= length;
  1325. user_data += length;
  1326. offset = 0;
  1327. }
  1328. intel_fb_obj_flush(obj, ORIGIN_CPU);
  1329. i915_gem_obj_finish_shmem_access(obj);
  1330. return ret;
  1331. }
  1332. /**
  1333. * Writes data to the object referenced by handle.
  1334. * @dev: drm device
  1335. * @data: ioctl data blob
  1336. * @file: drm file
  1337. *
  1338. * On error, the contents of the buffer that were to be modified are undefined.
  1339. */
  1340. int
  1341. i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  1342. struct drm_file *file)
  1343. {
  1344. struct drm_i915_gem_pwrite *args = data;
  1345. struct drm_i915_gem_object *obj;
  1346. int ret;
  1347. if (args->size == 0)
  1348. return 0;
  1349. if (!access_ok(VERIFY_READ,
  1350. u64_to_user_ptr(args->data_ptr),
  1351. args->size))
  1352. return -EFAULT;
  1353. obj = i915_gem_object_lookup(file, args->handle);
  1354. if (!obj)
  1355. return -ENOENT;
  1356. /* Bounds check destination. */
  1357. if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
  1358. ret = -EINVAL;
  1359. goto err;
  1360. }
  1361. trace_i915_gem_object_pwrite(obj, args->offset, args->size);
  1362. ret = -ENODEV;
  1363. if (obj->ops->pwrite)
  1364. ret = obj->ops->pwrite(obj, args);
  1365. if (ret != -ENODEV)
  1366. goto err;
  1367. ret = i915_gem_object_wait(obj,
  1368. I915_WAIT_INTERRUPTIBLE |
  1369. I915_WAIT_ALL,
  1370. MAX_SCHEDULE_TIMEOUT,
  1371. to_rps_client(file));
  1372. if (ret)
  1373. goto err;
  1374. ret = i915_gem_object_pin_pages(obj);
  1375. if (ret)
  1376. goto err;
  1377. ret = -EFAULT;
  1378. /* We can only do the GTT pwrite on untiled buffers, as otherwise
  1379. * it would end up going through the fenced access, and we'll get
  1380. * different detiling behavior between reading and writing.
  1381. * pread/pwrite currently are reading and writing from the CPU
  1382. * perspective, requiring manual detiling by the client.
  1383. */
  1384. if (!i915_gem_object_has_struct_page(obj) ||
  1385. cpu_write_needs_clflush(obj))
  1386. /* Note that the gtt paths might fail with non-page-backed user
  1387. * pointers (e.g. gtt mappings when moving data between
  1388. * textures). Fallback to the shmem path in that case.
  1389. */
  1390. ret = i915_gem_gtt_pwrite_fast(obj, args);
  1391. if (ret == -EFAULT || ret == -ENOSPC) {
  1392. if (obj->phys_handle)
  1393. ret = i915_gem_phys_pwrite(obj, args, file);
  1394. else
  1395. ret = i915_gem_shmem_pwrite(obj, args);
  1396. }
  1397. i915_gem_object_unpin_pages(obj);
  1398. err:
  1399. i915_gem_object_put(obj);
  1400. return ret;
  1401. }
  1402. static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
  1403. {
  1404. struct drm_i915_private *i915;
  1405. struct list_head *list;
  1406. struct i915_vma *vma;
  1407. GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
  1408. for_each_ggtt_vma(vma, obj) {
  1409. if (i915_vma_is_active(vma))
  1410. continue;
  1411. if (!drm_mm_node_allocated(&vma->node))
  1412. continue;
  1413. list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
  1414. }
  1415. i915 = to_i915(obj->base.dev);
  1416. spin_lock(&i915->mm.obj_lock);
  1417. list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
  1418. list_move_tail(&obj->mm.link, list);
  1419. spin_unlock(&i915->mm.obj_lock);
  1420. }
  1421. /**
  1422. * Called when user space prepares to use an object with the CPU, either
  1423. * through the mmap ioctl's mapping or a GTT mapping.
  1424. * @dev: drm device
  1425. * @data: ioctl data blob
  1426. * @file: drm file
  1427. */
  1428. int
  1429. i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  1430. struct drm_file *file)
  1431. {
  1432. struct drm_i915_gem_set_domain *args = data;
  1433. struct drm_i915_gem_object *obj;
  1434. uint32_t read_domains = args->read_domains;
  1435. uint32_t write_domain = args->write_domain;
  1436. int err;
  1437. /* Only handle setting domains to types used by the CPU. */
  1438. if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
  1439. return -EINVAL;
  1440. /* Having something in the write domain implies it's in the read
  1441. * domain, and only that read domain. Enforce that in the request.
  1442. */
  1443. if (write_domain != 0 && read_domains != write_domain)
  1444. return -EINVAL;
  1445. obj = i915_gem_object_lookup(file, args->handle);
  1446. if (!obj)
  1447. return -ENOENT;
  1448. /* Try to flush the object off the GPU without holding the lock.
  1449. * We will repeat the flush holding the lock in the normal manner
  1450. * to catch cases where we are gazumped.
  1451. */
  1452. err = i915_gem_object_wait(obj,
  1453. I915_WAIT_INTERRUPTIBLE |
  1454. (write_domain ? I915_WAIT_ALL : 0),
  1455. MAX_SCHEDULE_TIMEOUT,
  1456. to_rps_client(file));
  1457. if (err)
  1458. goto out;
  1459. /*
  1460. * Proxy objects do not control access to the backing storage, ergo
  1461. * they cannot be used as a means to manipulate the cache domain
  1462. * tracking for that backing storage. The proxy object is always
  1463. * considered to be outside of any cache domain.
  1464. */
  1465. if (i915_gem_object_is_proxy(obj)) {
  1466. err = -ENXIO;
  1467. goto out;
  1468. }
  1469. /*
  1470. * Flush and acquire obj->pages so that we are coherent through
  1471. * direct access in memory with previous cached writes through
  1472. * shmemfs and that our cache domain tracking remains valid.
  1473. * For example, if the obj->filp was moved to swap without us
  1474. * being notified and releasing the pages, we would mistakenly
  1475. * continue to assume that the obj remained out of the CPU cached
  1476. * domain.
  1477. */
  1478. err = i915_gem_object_pin_pages(obj);
  1479. if (err)
  1480. goto out;
  1481. err = i915_mutex_lock_interruptible(dev);
  1482. if (err)
  1483. goto out_unpin;
  1484. if (read_domains & I915_GEM_DOMAIN_WC)
  1485. err = i915_gem_object_set_to_wc_domain(obj, write_domain);
  1486. else if (read_domains & I915_GEM_DOMAIN_GTT)
  1487. err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
  1488. else
  1489. err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
  1490. /* And bump the LRU for this access */
  1491. i915_gem_object_bump_inactive_ggtt(obj);
  1492. mutex_unlock(&dev->struct_mutex);
  1493. if (write_domain != 0)
  1494. intel_fb_obj_invalidate(obj,
  1495. fb_write_origin(obj, write_domain));
  1496. out_unpin:
  1497. i915_gem_object_unpin_pages(obj);
  1498. out:
  1499. i915_gem_object_put(obj);
  1500. return err;
  1501. }
  1502. /**
  1503. * Called when user space has done writes to this buffer
  1504. * @dev: drm device
  1505. * @data: ioctl data blob
  1506. * @file: drm file
  1507. */
  1508. int
  1509. i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
  1510. struct drm_file *file)
  1511. {
  1512. struct drm_i915_gem_sw_finish *args = data;
  1513. struct drm_i915_gem_object *obj;
  1514. obj = i915_gem_object_lookup(file, args->handle);
  1515. if (!obj)
  1516. return -ENOENT;
  1517. /*
  1518. * Proxy objects are barred from CPU access, so there is no
  1519. * need to ban sw_finish as it is a nop.
  1520. */
  1521. /* Pinned buffers may be scanout, so flush the cache */
  1522. i915_gem_object_flush_if_display(obj);
  1523. i915_gem_object_put(obj);
  1524. return 0;
  1525. }
  1526. /**
  1527. * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
  1528. * it is mapped to.
  1529. * @dev: drm device
  1530. * @data: ioctl data blob
  1531. * @file: drm file
  1532. *
  1533. * While the mapping holds a reference on the contents of the object, it doesn't
  1534. * imply a ref on the object itself.
  1535. *
  1536. * IMPORTANT:
  1537. *
  1538. * DRM driver writers who look a this function as an example for how to do GEM
  1539. * mmap support, please don't implement mmap support like here. The modern way
  1540. * to implement DRM mmap support is with an mmap offset ioctl (like
  1541. * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
  1542. * That way debug tooling like valgrind will understand what's going on, hiding
  1543. * the mmap call in a driver private ioctl will break that. The i915 driver only
  1544. * does cpu mmaps this way because we didn't know better.
  1545. */
  1546. int
  1547. i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  1548. struct drm_file *file)
  1549. {
  1550. struct drm_i915_gem_mmap *args = data;
  1551. struct drm_i915_gem_object *obj;
  1552. unsigned long addr;
  1553. if (args->flags & ~(I915_MMAP_WC))
  1554. return -EINVAL;
  1555. if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
  1556. return -ENODEV;
  1557. obj = i915_gem_object_lookup(file, args->handle);
  1558. if (!obj)
  1559. return -ENOENT;
  1560. /* prime objects have no backing filp to GEM mmap
  1561. * pages from.
  1562. */
  1563. if (!obj->base.filp) {
  1564. i915_gem_object_put(obj);
  1565. return -ENXIO;
  1566. }
  1567. addr = vm_mmap(obj->base.filp, 0, args->size,
  1568. PROT_READ | PROT_WRITE, MAP_SHARED,
  1569. args->offset);
  1570. if (args->flags & I915_MMAP_WC) {
  1571. struct mm_struct *mm = current->mm;
  1572. struct vm_area_struct *vma;
  1573. if (down_write_killable(&mm->mmap_sem)) {
  1574. i915_gem_object_put(obj);
  1575. return -EINTR;
  1576. }
  1577. vma = find_vma(mm, addr);
  1578. if (vma)
  1579. vma->vm_page_prot =
  1580. pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  1581. else
  1582. addr = -ENOMEM;
  1583. up_write(&mm->mmap_sem);
  1584. /* This may race, but that's ok, it only gets set */
  1585. WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
  1586. }
  1587. i915_gem_object_put(obj);
  1588. if (IS_ERR((void *)addr))
  1589. return addr;
  1590. args->addr_ptr = (uint64_t) addr;
  1591. return 0;
  1592. }
  1593. static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
  1594. {
  1595. return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
  1596. }
  1597. /**
  1598. * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
  1599. *
  1600. * A history of the GTT mmap interface:
  1601. *
  1602. * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
  1603. * aligned and suitable for fencing, and still fit into the available
  1604. * mappable space left by the pinned display objects. A classic problem
  1605. * we called the page-fault-of-doom where we would ping-pong between
  1606. * two objects that could not fit inside the GTT and so the memcpy
  1607. * would page one object in at the expense of the other between every
  1608. * single byte.
  1609. *
  1610. * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
  1611. * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
  1612. * object is too large for the available space (or simply too large
  1613. * for the mappable aperture!), a view is created instead and faulted
  1614. * into userspace. (This view is aligned and sized appropriately for
  1615. * fenced access.)
  1616. *
  1617. * 2 - Recognise WC as a separate cache domain so that we can flush the
  1618. * delayed writes via GTT before performing direct access via WC.
  1619. *
  1620. * Restrictions:
  1621. *
  1622. * * snoopable objects cannot be accessed via the GTT. It can cause machine
  1623. * hangs on some architectures, corruption on others. An attempt to service
  1624. * a GTT page fault from a snoopable object will generate a SIGBUS.
  1625. *
  1626. * * the object must be able to fit into RAM (physical memory, though no
  1627. * limited to the mappable aperture).
  1628. *
  1629. *
  1630. * Caveats:
  1631. *
  1632. * * a new GTT page fault will synchronize rendering from the GPU and flush
  1633. * all data to system memory. Subsequent access will not be synchronized.
  1634. *
  1635. * * all mappings are revoked on runtime device suspend.
  1636. *
  1637. * * there are only 8, 16 or 32 fence registers to share between all users
  1638. * (older machines require fence register for display and blitter access
  1639. * as well). Contention of the fence registers will cause the previous users
  1640. * to be unmapped and any new access will generate new page faults.
  1641. *
  1642. * * running out of memory while servicing a fault may generate a SIGBUS,
  1643. * rather than the expected SIGSEGV.
  1644. */
  1645. int i915_gem_mmap_gtt_version(void)
  1646. {
  1647. return 2;
  1648. }
  1649. static inline struct i915_ggtt_view
  1650. compute_partial_view(struct drm_i915_gem_object *obj,
  1651. pgoff_t page_offset,
  1652. unsigned int chunk)
  1653. {
  1654. struct i915_ggtt_view view;
  1655. if (i915_gem_object_is_tiled(obj))
  1656. chunk = roundup(chunk, tile_row_pages(obj));
  1657. view.type = I915_GGTT_VIEW_PARTIAL;
  1658. view.partial.offset = rounddown(page_offset, chunk);
  1659. view.partial.size =
  1660. min_t(unsigned int, chunk,
  1661. (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
  1662. /* If the partial covers the entire object, just create a normal VMA. */
  1663. if (chunk >= obj->base.size >> PAGE_SHIFT)
  1664. view.type = I915_GGTT_VIEW_NORMAL;
  1665. return view;
  1666. }
  1667. /**
  1668. * i915_gem_fault - fault a page into the GTT
  1669. * @vmf: fault info
  1670. *
  1671. * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
  1672. * from userspace. The fault handler takes care of binding the object to
  1673. * the GTT (if needed), allocating and programming a fence register (again,
  1674. * only if needed based on whether the old reg is still valid or the object
  1675. * is tiled) and inserting a new PTE into the faulting process.
  1676. *
  1677. * Note that the faulting process may involve evicting existing objects
  1678. * from the GTT and/or fence registers to make room. So performance may
  1679. * suffer if the GTT working set is large or there are few fence registers
  1680. * left.
  1681. *
  1682. * The current feature set supported by i915_gem_fault() and thus GTT mmaps
  1683. * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
  1684. */
  1685. int i915_gem_fault(struct vm_fault *vmf)
  1686. {
  1687. #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
  1688. struct vm_area_struct *area = vmf->vma;
  1689. struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
  1690. struct drm_device *dev = obj->base.dev;
  1691. struct drm_i915_private *dev_priv = to_i915(dev);
  1692. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  1693. bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
  1694. struct i915_vma *vma;
  1695. pgoff_t page_offset;
  1696. unsigned int flags;
  1697. int ret;
  1698. /* We don't use vmf->pgoff since that has the fake offset */
  1699. page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
  1700. trace_i915_gem_object_fault(obj, page_offset, true, write);
  1701. /* Try to flush the object off the GPU first without holding the lock.
  1702. * Upon acquiring the lock, we will perform our sanity checks and then
  1703. * repeat the flush holding the lock in the normal manner to catch cases
  1704. * where we are gazumped.
  1705. */
  1706. ret = i915_gem_object_wait(obj,
  1707. I915_WAIT_INTERRUPTIBLE,
  1708. MAX_SCHEDULE_TIMEOUT,
  1709. NULL);
  1710. if (ret)
  1711. goto err;
  1712. ret = i915_gem_object_pin_pages(obj);
  1713. if (ret)
  1714. goto err;
  1715. intel_runtime_pm_get(dev_priv);
  1716. ret = i915_mutex_lock_interruptible(dev);
  1717. if (ret)
  1718. goto err_rpm;
  1719. /* Access to snoopable pages through the GTT is incoherent. */
  1720. if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
  1721. ret = -EFAULT;
  1722. goto err_unlock;
  1723. }
  1724. /* If the object is smaller than a couple of partial vma, it is
  1725. * not worth only creating a single partial vma - we may as well
  1726. * clear enough space for the full object.
  1727. */
  1728. flags = PIN_MAPPABLE;
  1729. if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
  1730. flags |= PIN_NONBLOCK | PIN_NONFAULT;
  1731. /* Now pin it into the GTT as needed */
  1732. vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
  1733. if (IS_ERR(vma)) {
  1734. /* Use a partial view if it is bigger than available space */
  1735. struct i915_ggtt_view view =
  1736. compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
  1737. /* Userspace is now writing through an untracked VMA, abandon
  1738. * all hope that the hardware is able to track future writes.
  1739. */
  1740. obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
  1741. vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
  1742. }
  1743. if (IS_ERR(vma)) {
  1744. ret = PTR_ERR(vma);
  1745. goto err_unlock;
  1746. }
  1747. ret = i915_gem_object_set_to_gtt_domain(obj, write);
  1748. if (ret)
  1749. goto err_unpin;
  1750. ret = i915_vma_pin_fence(vma);
  1751. if (ret)
  1752. goto err_unpin;
  1753. /* Finally, remap it using the new GTT offset */
  1754. ret = remap_io_mapping(area,
  1755. area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
  1756. (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
  1757. min_t(u64, vma->size, area->vm_end - area->vm_start),
  1758. &ggtt->iomap);
  1759. if (ret)
  1760. goto err_fence;
  1761. /* Mark as being mmapped into userspace for later revocation */
  1762. assert_rpm_wakelock_held(dev_priv);
  1763. if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
  1764. list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
  1765. GEM_BUG_ON(!obj->userfault_count);
  1766. i915_vma_set_ggtt_write(vma);
  1767. err_fence:
  1768. i915_vma_unpin_fence(vma);
  1769. err_unpin:
  1770. __i915_vma_unpin(vma);
  1771. err_unlock:
  1772. mutex_unlock(&dev->struct_mutex);
  1773. err_rpm:
  1774. intel_runtime_pm_put(dev_priv);
  1775. i915_gem_object_unpin_pages(obj);
  1776. err:
  1777. switch (ret) {
  1778. case -EIO:
  1779. /*
  1780. * We eat errors when the gpu is terminally wedged to avoid
  1781. * userspace unduly crashing (gl has no provisions for mmaps to
  1782. * fail). But any other -EIO isn't ours (e.g. swap in failure)
  1783. * and so needs to be reported.
  1784. */
  1785. if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
  1786. ret = VM_FAULT_SIGBUS;
  1787. break;
  1788. }
  1789. case -EAGAIN:
  1790. /*
  1791. * EAGAIN means the gpu is hung and we'll wait for the error
  1792. * handler to reset everything when re-faulting in
  1793. * i915_mutex_lock_interruptible.
  1794. */
  1795. case 0:
  1796. case -ERESTARTSYS:
  1797. case -EINTR:
  1798. case -EBUSY:
  1799. /*
  1800. * EBUSY is ok: this just means that another thread
  1801. * already did the job.
  1802. */
  1803. ret = VM_FAULT_NOPAGE;
  1804. break;
  1805. case -ENOMEM:
  1806. ret = VM_FAULT_OOM;
  1807. break;
  1808. case -ENOSPC:
  1809. case -EFAULT:
  1810. ret = VM_FAULT_SIGBUS;
  1811. break;
  1812. default:
  1813. WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
  1814. ret = VM_FAULT_SIGBUS;
  1815. break;
  1816. }
  1817. return ret;
  1818. }
  1819. static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
  1820. {
  1821. struct i915_vma *vma;
  1822. GEM_BUG_ON(!obj->userfault_count);
  1823. obj->userfault_count = 0;
  1824. list_del(&obj->userfault_link);
  1825. drm_vma_node_unmap(&obj->base.vma_node,
  1826. obj->base.dev->anon_inode->i_mapping);
  1827. for_each_ggtt_vma(vma, obj)
  1828. i915_vma_unset_userfault(vma);
  1829. }
  1830. /**
  1831. * i915_gem_release_mmap - remove physical page mappings
  1832. * @obj: obj in question
  1833. *
  1834. * Preserve the reservation of the mmapping with the DRM core code, but
  1835. * relinquish ownership of the pages back to the system.
  1836. *
  1837. * It is vital that we remove the page mapping if we have mapped a tiled
  1838. * object through the GTT and then lose the fence register due to
  1839. * resource pressure. Similarly if the object has been moved out of the
  1840. * aperture, than pages mapped into userspace must be revoked. Removing the
  1841. * mapping will then trigger a page fault on the next user access, allowing
  1842. * fixup by i915_gem_fault().
  1843. */
  1844. void
  1845. i915_gem_release_mmap(struct drm_i915_gem_object *obj)
  1846. {
  1847. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  1848. /* Serialisation between user GTT access and our code depends upon
  1849. * revoking the CPU's PTE whilst the mutex is held. The next user
  1850. * pagefault then has to wait until we release the mutex.
  1851. *
  1852. * Note that RPM complicates somewhat by adding an additional
  1853. * requirement that operations to the GGTT be made holding the RPM
  1854. * wakeref.
  1855. */
  1856. lockdep_assert_held(&i915->drm.struct_mutex);
  1857. intel_runtime_pm_get(i915);
  1858. if (!obj->userfault_count)
  1859. goto out;
  1860. __i915_gem_object_release_mmap(obj);
  1861. /* Ensure that the CPU's PTE are revoked and there are not outstanding
  1862. * memory transactions from userspace before we return. The TLB
  1863. * flushing implied above by changing the PTE above *should* be
  1864. * sufficient, an extra barrier here just provides us with a bit
  1865. * of paranoid documentation about our requirement to serialise
  1866. * memory writes before touching registers / GSM.
  1867. */
  1868. wmb();
  1869. out:
  1870. intel_runtime_pm_put(i915);
  1871. }
  1872. void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
  1873. {
  1874. struct drm_i915_gem_object *obj, *on;
  1875. int i;
  1876. /*
  1877. * Only called during RPM suspend. All users of the userfault_list
  1878. * must be holding an RPM wakeref to ensure that this can not
  1879. * run concurrently with themselves (and use the struct_mutex for
  1880. * protection between themselves).
  1881. */
  1882. list_for_each_entry_safe(obj, on,
  1883. &dev_priv->mm.userfault_list, userfault_link)
  1884. __i915_gem_object_release_mmap(obj);
  1885. /* The fence will be lost when the device powers down. If any were
  1886. * in use by hardware (i.e. they are pinned), we should not be powering
  1887. * down! All other fences will be reacquired by the user upon waking.
  1888. */
  1889. for (i = 0; i < dev_priv->num_fence_regs; i++) {
  1890. struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
  1891. /* Ideally we want to assert that the fence register is not
  1892. * live at this point (i.e. that no piece of code will be
  1893. * trying to write through fence + GTT, as that both violates
  1894. * our tracking of activity and associated locking/barriers,
  1895. * but also is illegal given that the hw is powered down).
  1896. *
  1897. * Previously we used reg->pin_count as a "liveness" indicator.
  1898. * That is not sufficient, and we need a more fine-grained
  1899. * tool if we want to have a sanity check here.
  1900. */
  1901. if (!reg->vma)
  1902. continue;
  1903. GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
  1904. reg->dirty = true;
  1905. }
  1906. }
  1907. static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
  1908. {
  1909. struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
  1910. int err;
  1911. err = drm_gem_create_mmap_offset(&obj->base);
  1912. if (likely(!err))
  1913. return 0;
  1914. /* Attempt to reap some mmap space from dead objects */
  1915. do {
  1916. err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
  1917. if (err)
  1918. break;
  1919. i915_gem_drain_freed_objects(dev_priv);
  1920. err = drm_gem_create_mmap_offset(&obj->base);
  1921. if (!err)
  1922. break;
  1923. } while (flush_delayed_work(&dev_priv->gt.retire_work));
  1924. return err;
  1925. }
  1926. static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
  1927. {
  1928. drm_gem_free_mmap_offset(&obj->base);
  1929. }
  1930. int
  1931. i915_gem_mmap_gtt(struct drm_file *file,
  1932. struct drm_device *dev,
  1933. uint32_t handle,
  1934. uint64_t *offset)
  1935. {
  1936. struct drm_i915_gem_object *obj;
  1937. int ret;
  1938. obj = i915_gem_object_lookup(file, handle);
  1939. if (!obj)
  1940. return -ENOENT;
  1941. ret = i915_gem_object_create_mmap_offset(obj);
  1942. if (ret == 0)
  1943. *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
  1944. i915_gem_object_put(obj);
  1945. return ret;
  1946. }
  1947. /**
  1948. * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
  1949. * @dev: DRM device
  1950. * @data: GTT mapping ioctl data
  1951. * @file: GEM object info
  1952. *
  1953. * Simply returns the fake offset to userspace so it can mmap it.
  1954. * The mmap call will end up in drm_gem_mmap(), which will set things
  1955. * up so we can get faults in the handler above.
  1956. *
  1957. * The fault handler will take care of binding the object into the GTT
  1958. * (since it may have been evicted to make room for something), allocating
  1959. * a fence register, and mapping the appropriate aperture address into
  1960. * userspace.
  1961. */
  1962. int
  1963. i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
  1964. struct drm_file *file)
  1965. {
  1966. struct drm_i915_gem_mmap_gtt *args = data;
  1967. return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
  1968. }
  1969. /* Immediately discard the backing storage */
  1970. static void
  1971. i915_gem_object_truncate(struct drm_i915_gem_object *obj)
  1972. {
  1973. i915_gem_object_free_mmap_offset(obj);
  1974. if (obj->base.filp == NULL)
  1975. return;
  1976. /* Our goal here is to return as much of the memory as
  1977. * is possible back to the system as we are called from OOM.
  1978. * To do this we must instruct the shmfs to drop all of its
  1979. * backing pages, *now*.
  1980. */
  1981. shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
  1982. obj->mm.madv = __I915_MADV_PURGED;
  1983. obj->mm.pages = ERR_PTR(-EFAULT);
  1984. }
  1985. /* Try to discard unwanted pages */
  1986. void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
  1987. {
  1988. struct address_space *mapping;
  1989. lockdep_assert_held(&obj->mm.lock);
  1990. GEM_BUG_ON(i915_gem_object_has_pages(obj));
  1991. switch (obj->mm.madv) {
  1992. case I915_MADV_DONTNEED:
  1993. i915_gem_object_truncate(obj);
  1994. case __I915_MADV_PURGED:
  1995. return;
  1996. }
  1997. if (obj->base.filp == NULL)
  1998. return;
  1999. mapping = obj->base.filp->f_mapping,
  2000. invalidate_mapping_pages(mapping, 0, (loff_t)-1);
  2001. }
  2002. static void
  2003. i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
  2004. struct sg_table *pages)
  2005. {
  2006. struct sgt_iter sgt_iter;
  2007. struct page *page;
  2008. __i915_gem_object_release_shmem(obj, pages, true);
  2009. i915_gem_gtt_finish_pages(obj, pages);
  2010. if (i915_gem_object_needs_bit17_swizzle(obj))
  2011. i915_gem_object_save_bit_17_swizzle(obj, pages);
  2012. for_each_sgt_page(page, sgt_iter, pages) {
  2013. if (obj->mm.dirty)
  2014. set_page_dirty(page);
  2015. if (obj->mm.madv == I915_MADV_WILLNEED)
  2016. mark_page_accessed(page);
  2017. put_page(page);
  2018. }
  2019. obj->mm.dirty = false;
  2020. sg_free_table(pages);
  2021. kfree(pages);
  2022. }
  2023. static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
  2024. {
  2025. struct radix_tree_iter iter;
  2026. void __rcu **slot;
  2027. rcu_read_lock();
  2028. radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
  2029. radix_tree_delete(&obj->mm.get_page.radix, iter.index);
  2030. rcu_read_unlock();
  2031. }
  2032. void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
  2033. enum i915_mm_subclass subclass)
  2034. {
  2035. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  2036. struct sg_table *pages;
  2037. if (i915_gem_object_has_pinned_pages(obj))
  2038. return;
  2039. GEM_BUG_ON(obj->bind_count);
  2040. if (!i915_gem_object_has_pages(obj))
  2041. return;
  2042. /* May be called by shrinker from within get_pages() (on another bo) */
  2043. mutex_lock_nested(&obj->mm.lock, subclass);
  2044. if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
  2045. goto unlock;
  2046. /* ->put_pages might need to allocate memory for the bit17 swizzle
  2047. * array, hence protect them from being reaped by removing them from gtt
  2048. * lists early. */
  2049. pages = fetch_and_zero(&obj->mm.pages);
  2050. GEM_BUG_ON(!pages);
  2051. spin_lock(&i915->mm.obj_lock);
  2052. list_del(&obj->mm.link);
  2053. spin_unlock(&i915->mm.obj_lock);
  2054. if (obj->mm.mapping) {
  2055. void *ptr;
  2056. ptr = page_mask_bits(obj->mm.mapping);
  2057. if (is_vmalloc_addr(ptr))
  2058. vunmap(ptr);
  2059. else
  2060. kunmap(kmap_to_page(ptr));
  2061. obj->mm.mapping = NULL;
  2062. }
  2063. __i915_gem_object_reset_page_iter(obj);
  2064. if (!IS_ERR(pages))
  2065. obj->ops->put_pages(obj, pages);
  2066. obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
  2067. unlock:
  2068. mutex_unlock(&obj->mm.lock);
  2069. }
  2070. static bool i915_sg_trim(struct sg_table *orig_st)
  2071. {
  2072. struct sg_table new_st;
  2073. struct scatterlist *sg, *new_sg;
  2074. unsigned int i;
  2075. if (orig_st->nents == orig_st->orig_nents)
  2076. return false;
  2077. if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
  2078. return false;
  2079. new_sg = new_st.sgl;
  2080. for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
  2081. sg_set_page(new_sg, sg_page(sg), sg->length, 0);
  2082. /* called before being DMA mapped, no need to copy sg->dma_* */
  2083. new_sg = sg_next(new_sg);
  2084. }
  2085. GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
  2086. sg_free_table(orig_st);
  2087. *orig_st = new_st;
  2088. return true;
  2089. }
  2090. static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
  2091. {
  2092. struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
  2093. const unsigned long page_count = obj->base.size / PAGE_SIZE;
  2094. unsigned long i;
  2095. struct address_space *mapping;
  2096. struct sg_table *st;
  2097. struct scatterlist *sg;
  2098. struct sgt_iter sgt_iter;
  2099. struct page *page;
  2100. unsigned long last_pfn = 0; /* suppress gcc warning */
  2101. unsigned int max_segment = i915_sg_segment_size();
  2102. unsigned int sg_page_sizes;
  2103. gfp_t noreclaim;
  2104. int ret;
  2105. /* Assert that the object is not currently in any GPU domain. As it
  2106. * wasn't in the GTT, there shouldn't be any way it could have been in
  2107. * a GPU cache
  2108. */
  2109. GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
  2110. GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
  2111. st = kmalloc(sizeof(*st), GFP_KERNEL);
  2112. if (st == NULL)
  2113. return -ENOMEM;
  2114. rebuild_st:
  2115. if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
  2116. kfree(st);
  2117. return -ENOMEM;
  2118. }
  2119. /* Get the list of pages out of our struct file. They'll be pinned
  2120. * at this point until we release them.
  2121. *
  2122. * Fail silently without starting the shrinker
  2123. */
  2124. mapping = obj->base.filp->f_mapping;
  2125. noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
  2126. noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
  2127. sg = st->sgl;
  2128. st->nents = 0;
  2129. sg_page_sizes = 0;
  2130. for (i = 0; i < page_count; i++) {
  2131. const unsigned int shrink[] = {
  2132. I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
  2133. 0,
  2134. }, *s = shrink;
  2135. gfp_t gfp = noreclaim;
  2136. do {
  2137. page = shmem_read_mapping_page_gfp(mapping, i, gfp);
  2138. if (likely(!IS_ERR(page)))
  2139. break;
  2140. if (!*s) {
  2141. ret = PTR_ERR(page);
  2142. goto err_sg;
  2143. }
  2144. i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
  2145. cond_resched();
  2146. /* We've tried hard to allocate the memory by reaping
  2147. * our own buffer, now let the real VM do its job and
  2148. * go down in flames if truly OOM.
  2149. *
  2150. * However, since graphics tend to be disposable,
  2151. * defer the oom here by reporting the ENOMEM back
  2152. * to userspace.
  2153. */
  2154. if (!*s) {
  2155. /* reclaim and warn, but no oom */
  2156. gfp = mapping_gfp_mask(mapping);
  2157. /* Our bo are always dirty and so we require
  2158. * kswapd to reclaim our pages (direct reclaim
  2159. * does not effectively begin pageout of our
  2160. * buffers on its own). However, direct reclaim
  2161. * only waits for kswapd when under allocation
  2162. * congestion. So as a result __GFP_RECLAIM is
  2163. * unreliable and fails to actually reclaim our
  2164. * dirty pages -- unless you try over and over
  2165. * again with !__GFP_NORETRY. However, we still
  2166. * want to fail this allocation rather than
  2167. * trigger the out-of-memory killer and for
  2168. * this we want __GFP_RETRY_MAYFAIL.
  2169. */
  2170. gfp |= __GFP_RETRY_MAYFAIL;
  2171. }
  2172. } while (1);
  2173. if (!i ||
  2174. sg->length >= max_segment ||
  2175. page_to_pfn(page) != last_pfn + 1) {
  2176. if (i) {
  2177. sg_page_sizes |= sg->length;
  2178. sg = sg_next(sg);
  2179. }
  2180. st->nents++;
  2181. sg_set_page(sg, page, PAGE_SIZE, 0);
  2182. } else {
  2183. sg->length += PAGE_SIZE;
  2184. }
  2185. last_pfn = page_to_pfn(page);
  2186. /* Check that the i965g/gm workaround works. */
  2187. WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
  2188. }
  2189. if (sg) { /* loop terminated early; short sg table */
  2190. sg_page_sizes |= sg->length;
  2191. sg_mark_end(sg);
  2192. }
  2193. /* Trim unused sg entries to avoid wasting memory. */
  2194. i915_sg_trim(st);
  2195. ret = i915_gem_gtt_prepare_pages(obj, st);
  2196. if (ret) {
  2197. /* DMA remapping failed? One possible cause is that
  2198. * it could not reserve enough large entries, asking
  2199. * for PAGE_SIZE chunks instead may be helpful.
  2200. */
  2201. if (max_segment > PAGE_SIZE) {
  2202. for_each_sgt_page(page, sgt_iter, st)
  2203. put_page(page);
  2204. sg_free_table(st);
  2205. max_segment = PAGE_SIZE;
  2206. goto rebuild_st;
  2207. } else {
  2208. dev_warn(&dev_priv->drm.pdev->dev,
  2209. "Failed to DMA remap %lu pages\n",
  2210. page_count);
  2211. goto err_pages;
  2212. }
  2213. }
  2214. if (i915_gem_object_needs_bit17_swizzle(obj))
  2215. i915_gem_object_do_bit_17_swizzle(obj, st);
  2216. __i915_gem_object_set_pages(obj, st, sg_page_sizes);
  2217. return 0;
  2218. err_sg:
  2219. sg_mark_end(sg);
  2220. err_pages:
  2221. for_each_sgt_page(page, sgt_iter, st)
  2222. put_page(page);
  2223. sg_free_table(st);
  2224. kfree(st);
  2225. /* shmemfs first checks if there is enough memory to allocate the page
  2226. * and reports ENOSPC should there be insufficient, along with the usual
  2227. * ENOMEM for a genuine allocation failure.
  2228. *
  2229. * We use ENOSPC in our driver to mean that we have run out of aperture
  2230. * space and so want to translate the error from shmemfs back to our
  2231. * usual understanding of ENOMEM.
  2232. */
  2233. if (ret == -ENOSPC)
  2234. ret = -ENOMEM;
  2235. return ret;
  2236. }
  2237. void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
  2238. struct sg_table *pages,
  2239. unsigned int sg_page_sizes)
  2240. {
  2241. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  2242. unsigned long supported = INTEL_INFO(i915)->page_sizes;
  2243. int i;
  2244. lockdep_assert_held(&obj->mm.lock);
  2245. obj->mm.get_page.sg_pos = pages->sgl;
  2246. obj->mm.get_page.sg_idx = 0;
  2247. obj->mm.pages = pages;
  2248. if (i915_gem_object_is_tiled(obj) &&
  2249. i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
  2250. GEM_BUG_ON(obj->mm.quirked);
  2251. __i915_gem_object_pin_pages(obj);
  2252. obj->mm.quirked = true;
  2253. }
  2254. GEM_BUG_ON(!sg_page_sizes);
  2255. obj->mm.page_sizes.phys = sg_page_sizes;
  2256. /*
  2257. * Calculate the supported page-sizes which fit into the given
  2258. * sg_page_sizes. This will give us the page-sizes which we may be able
  2259. * to use opportunistically when later inserting into the GTT. For
  2260. * example if phys=2G, then in theory we should be able to use 1G, 2M,
  2261. * 64K or 4K pages, although in practice this will depend on a number of
  2262. * other factors.
  2263. */
  2264. obj->mm.page_sizes.sg = 0;
  2265. for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
  2266. if (obj->mm.page_sizes.phys & ~0u << i)
  2267. obj->mm.page_sizes.sg |= BIT(i);
  2268. }
  2269. GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
  2270. spin_lock(&i915->mm.obj_lock);
  2271. list_add(&obj->mm.link, &i915->mm.unbound_list);
  2272. spin_unlock(&i915->mm.obj_lock);
  2273. }
  2274. static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
  2275. {
  2276. int err;
  2277. if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
  2278. DRM_DEBUG("Attempting to obtain a purgeable object\n");
  2279. return -EFAULT;
  2280. }
  2281. err = obj->ops->get_pages(obj);
  2282. GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
  2283. return err;
  2284. }
  2285. /* Ensure that the associated pages are gathered from the backing storage
  2286. * and pinned into our object. i915_gem_object_pin_pages() may be called
  2287. * multiple times before they are released by a single call to
  2288. * i915_gem_object_unpin_pages() - once the pages are no longer referenced
  2289. * either as a result of memory pressure (reaping pages under the shrinker)
  2290. * or as the object is itself released.
  2291. */
  2292. int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
  2293. {
  2294. int err;
  2295. err = mutex_lock_interruptible(&obj->mm.lock);
  2296. if (err)
  2297. return err;
  2298. if (unlikely(!i915_gem_object_has_pages(obj))) {
  2299. GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
  2300. err = ____i915_gem_object_get_pages(obj);
  2301. if (err)
  2302. goto unlock;
  2303. smp_mb__before_atomic();
  2304. }
  2305. atomic_inc(&obj->mm.pages_pin_count);
  2306. unlock:
  2307. mutex_unlock(&obj->mm.lock);
  2308. return err;
  2309. }
  2310. /* The 'mapping' part of i915_gem_object_pin_map() below */
  2311. static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
  2312. enum i915_map_type type)
  2313. {
  2314. unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
  2315. struct sg_table *sgt = obj->mm.pages;
  2316. struct sgt_iter sgt_iter;
  2317. struct page *page;
  2318. struct page *stack_pages[32];
  2319. struct page **pages = stack_pages;
  2320. unsigned long i = 0;
  2321. pgprot_t pgprot;
  2322. void *addr;
  2323. /* A single page can always be kmapped */
  2324. if (n_pages == 1 && type == I915_MAP_WB)
  2325. return kmap(sg_page(sgt->sgl));
  2326. if (n_pages > ARRAY_SIZE(stack_pages)) {
  2327. /* Too big for stack -- allocate temporary array instead */
  2328. pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
  2329. if (!pages)
  2330. return NULL;
  2331. }
  2332. for_each_sgt_page(page, sgt_iter, sgt)
  2333. pages[i++] = page;
  2334. /* Check that we have the expected number of pages */
  2335. GEM_BUG_ON(i != n_pages);
  2336. switch (type) {
  2337. default:
  2338. MISSING_CASE(type);
  2339. /* fallthrough to use PAGE_KERNEL anyway */
  2340. case I915_MAP_WB:
  2341. pgprot = PAGE_KERNEL;
  2342. break;
  2343. case I915_MAP_WC:
  2344. pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
  2345. break;
  2346. }
  2347. addr = vmap(pages, n_pages, 0, pgprot);
  2348. if (pages != stack_pages)
  2349. kvfree(pages);
  2350. return addr;
  2351. }
  2352. /* get, pin, and map the pages of the object into kernel space */
  2353. void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
  2354. enum i915_map_type type)
  2355. {
  2356. enum i915_map_type has_type;
  2357. bool pinned;
  2358. void *ptr;
  2359. int ret;
  2360. if (unlikely(!i915_gem_object_has_struct_page(obj)))
  2361. return ERR_PTR(-ENXIO);
  2362. ret = mutex_lock_interruptible(&obj->mm.lock);
  2363. if (ret)
  2364. return ERR_PTR(ret);
  2365. pinned = !(type & I915_MAP_OVERRIDE);
  2366. type &= ~I915_MAP_OVERRIDE;
  2367. if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
  2368. if (unlikely(!i915_gem_object_has_pages(obj))) {
  2369. GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
  2370. ret = ____i915_gem_object_get_pages(obj);
  2371. if (ret)
  2372. goto err_unlock;
  2373. smp_mb__before_atomic();
  2374. }
  2375. atomic_inc(&obj->mm.pages_pin_count);
  2376. pinned = false;
  2377. }
  2378. GEM_BUG_ON(!i915_gem_object_has_pages(obj));
  2379. ptr = page_unpack_bits(obj->mm.mapping, &has_type);
  2380. if (ptr && has_type != type) {
  2381. if (pinned) {
  2382. ret = -EBUSY;
  2383. goto err_unpin;
  2384. }
  2385. if (is_vmalloc_addr(ptr))
  2386. vunmap(ptr);
  2387. else
  2388. kunmap(kmap_to_page(ptr));
  2389. ptr = obj->mm.mapping = NULL;
  2390. }
  2391. if (!ptr) {
  2392. ptr = i915_gem_object_map(obj, type);
  2393. if (!ptr) {
  2394. ret = -ENOMEM;
  2395. goto err_unpin;
  2396. }
  2397. obj->mm.mapping = page_pack_bits(ptr, type);
  2398. }
  2399. out_unlock:
  2400. mutex_unlock(&obj->mm.lock);
  2401. return ptr;
  2402. err_unpin:
  2403. atomic_dec(&obj->mm.pages_pin_count);
  2404. err_unlock:
  2405. ptr = ERR_PTR(ret);
  2406. goto out_unlock;
  2407. }
  2408. static int
  2409. i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
  2410. const struct drm_i915_gem_pwrite *arg)
  2411. {
  2412. struct address_space *mapping = obj->base.filp->f_mapping;
  2413. char __user *user_data = u64_to_user_ptr(arg->data_ptr);
  2414. u64 remain, offset;
  2415. unsigned int pg;
  2416. /* Before we instantiate/pin the backing store for our use, we
  2417. * can prepopulate the shmemfs filp efficiently using a write into
  2418. * the pagecache. We avoid the penalty of instantiating all the
  2419. * pages, important if the user is just writing to a few and never
  2420. * uses the object on the GPU, and using a direct write into shmemfs
  2421. * allows it to avoid the cost of retrieving a page (either swapin
  2422. * or clearing-before-use) before it is overwritten.
  2423. */
  2424. if (i915_gem_object_has_pages(obj))
  2425. return -ENODEV;
  2426. if (obj->mm.madv != I915_MADV_WILLNEED)
  2427. return -EFAULT;
  2428. /* Before the pages are instantiated the object is treated as being
  2429. * in the CPU domain. The pages will be clflushed as required before
  2430. * use, and we can freely write into the pages directly. If userspace
  2431. * races pwrite with any other operation; corruption will ensue -
  2432. * that is userspace's prerogative!
  2433. */
  2434. remain = arg->size;
  2435. offset = arg->offset;
  2436. pg = offset_in_page(offset);
  2437. do {
  2438. unsigned int len, unwritten;
  2439. struct page *page;
  2440. void *data, *vaddr;
  2441. int err;
  2442. len = PAGE_SIZE - pg;
  2443. if (len > remain)
  2444. len = remain;
  2445. err = pagecache_write_begin(obj->base.filp, mapping,
  2446. offset, len, 0,
  2447. &page, &data);
  2448. if (err < 0)
  2449. return err;
  2450. vaddr = kmap(page);
  2451. unwritten = copy_from_user(vaddr + pg, user_data, len);
  2452. kunmap(page);
  2453. err = pagecache_write_end(obj->base.filp, mapping,
  2454. offset, len, len - unwritten,
  2455. page, data);
  2456. if (err < 0)
  2457. return err;
  2458. if (unwritten)
  2459. return -EFAULT;
  2460. remain -= len;
  2461. user_data += len;
  2462. offset += len;
  2463. pg = 0;
  2464. } while (remain);
  2465. return 0;
  2466. }
  2467. static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv,
  2468. const struct i915_gem_context *ctx)
  2469. {
  2470. unsigned int score;
  2471. unsigned long prev_hang;
  2472. if (i915_gem_context_is_banned(ctx))
  2473. score = I915_CLIENT_SCORE_CONTEXT_BAN;
  2474. else
  2475. score = 0;
  2476. prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
  2477. if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
  2478. score += I915_CLIENT_SCORE_HANG_FAST;
  2479. if (score) {
  2480. atomic_add(score, &file_priv->ban_score);
  2481. DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
  2482. ctx->name, score,
  2483. atomic_read(&file_priv->ban_score));
  2484. }
  2485. }
  2486. static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
  2487. {
  2488. unsigned int score;
  2489. bool banned, bannable;
  2490. atomic_inc(&ctx->guilty_count);
  2491. bannable = i915_gem_context_is_bannable(ctx);
  2492. score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
  2493. banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
  2494. DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, ban %s\n",
  2495. ctx->name, atomic_read(&ctx->guilty_count),
  2496. score, yesno(banned && bannable));
  2497. /* Cool contexts don't accumulate client ban score */
  2498. if (!bannable)
  2499. return;
  2500. if (banned)
  2501. i915_gem_context_set_banned(ctx);
  2502. if (!IS_ERR_OR_NULL(ctx->file_priv))
  2503. i915_gem_client_mark_guilty(ctx->file_priv, ctx);
  2504. }
  2505. static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
  2506. {
  2507. atomic_inc(&ctx->active_count);
  2508. }
  2509. struct i915_request *
  2510. i915_gem_find_active_request(struct intel_engine_cs *engine)
  2511. {
  2512. struct i915_request *request, *active = NULL;
  2513. unsigned long flags;
  2514. /*
  2515. * We are called by the error capture, reset and to dump engine
  2516. * state at random points in time. In particular, note that neither is
  2517. * crucially ordered with an interrupt. After a hang, the GPU is dead
  2518. * and we assume that no more writes can happen (we waited long enough
  2519. * for all writes that were in transaction to be flushed) - adding an
  2520. * extra delay for a recent interrupt is pointless. Hence, we do
  2521. * not need an engine->irq_seqno_barrier() before the seqno reads.
  2522. * At all other times, we must assume the GPU is still running, but
  2523. * we only care about the snapshot of this moment.
  2524. */
  2525. spin_lock_irqsave(&engine->timeline.lock, flags);
  2526. list_for_each_entry(request, &engine->timeline.requests, link) {
  2527. if (__i915_request_completed(request, request->global_seqno))
  2528. continue;
  2529. active = request;
  2530. break;
  2531. }
  2532. spin_unlock_irqrestore(&engine->timeline.lock, flags);
  2533. return active;
  2534. }
  2535. /*
  2536. * Ensure irq handler finishes, and not run again.
  2537. * Also return the active request so that we only search for it once.
  2538. */
  2539. struct i915_request *
  2540. i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
  2541. {
  2542. struct i915_request *request = NULL;
  2543. /*
  2544. * During the reset sequence, we must prevent the engine from
  2545. * entering RC6. As the context state is undefined until we restart
  2546. * the engine, if it does enter RC6 during the reset, the state
  2547. * written to the powercontext is undefined and so we may lose
  2548. * GPU state upon resume, i.e. fail to restart after a reset.
  2549. */
  2550. intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
  2551. /*
  2552. * Prevent the signaler thread from updating the request
  2553. * state (by calling dma_fence_signal) as we are processing
  2554. * the reset. The write from the GPU of the seqno is
  2555. * asynchronous and the signaler thread may see a different
  2556. * value to us and declare the request complete, even though
  2557. * the reset routine have picked that request as the active
  2558. * (incomplete) request. This conflict is not handled
  2559. * gracefully!
  2560. */
  2561. kthread_park(engine->breadcrumbs.signaler);
  2562. /*
  2563. * Prevent request submission to the hardware until we have
  2564. * completed the reset in i915_gem_reset_finish(). If a request
  2565. * is completed by one engine, it may then queue a request
  2566. * to a second via its execlists->tasklet *just* as we are
  2567. * calling engine->init_hw() and also writing the ELSP.
  2568. * Turning off the execlists->tasklet until the reset is over
  2569. * prevents the race.
  2570. *
  2571. * Note that this needs to be a single atomic operation on the
  2572. * tasklet (flush existing tasks, prevent new tasks) to prevent
  2573. * a race between reset and set-wedged. It is not, so we do the best
  2574. * we can atm and make sure we don't lock the machine up in the more
  2575. * common case of recursively being called from set-wedged from inside
  2576. * i915_reset.
  2577. */
  2578. if (!atomic_read(&engine->execlists.tasklet.count))
  2579. tasklet_kill(&engine->execlists.tasklet);
  2580. tasklet_disable(&engine->execlists.tasklet);
  2581. /*
  2582. * We're using worker to queue preemption requests from the tasklet in
  2583. * GuC submission mode.
  2584. * Even though tasklet was disabled, we may still have a worker queued.
  2585. * Let's make sure that all workers scheduled before disabling the
  2586. * tasklet are completed before continuing with the reset.
  2587. */
  2588. if (engine->i915->guc.preempt_wq)
  2589. flush_workqueue(engine->i915->guc.preempt_wq);
  2590. if (engine->irq_seqno_barrier)
  2591. engine->irq_seqno_barrier(engine);
  2592. request = i915_gem_find_active_request(engine);
  2593. if (request && request->fence.error == -EIO)
  2594. request = ERR_PTR(-EIO); /* Previous reset failed! */
  2595. return request;
  2596. }
  2597. int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
  2598. {
  2599. struct intel_engine_cs *engine;
  2600. struct i915_request *request;
  2601. enum intel_engine_id id;
  2602. int err = 0;
  2603. for_each_engine(engine, dev_priv, id) {
  2604. request = i915_gem_reset_prepare_engine(engine);
  2605. if (IS_ERR(request)) {
  2606. err = PTR_ERR(request);
  2607. continue;
  2608. }
  2609. engine->hangcheck.active_request = request;
  2610. }
  2611. i915_gem_revoke_fences(dev_priv);
  2612. intel_uc_sanitize(dev_priv);
  2613. return err;
  2614. }
  2615. static void skip_request(struct i915_request *request)
  2616. {
  2617. void *vaddr = request->ring->vaddr;
  2618. u32 head;
  2619. /* As this request likely depends on state from the lost
  2620. * context, clear out all the user operations leaving the
  2621. * breadcrumb at the end (so we get the fence notifications).
  2622. */
  2623. head = request->head;
  2624. if (request->postfix < head) {
  2625. memset(vaddr + head, 0, request->ring->size - head);
  2626. head = 0;
  2627. }
  2628. memset(vaddr + head, 0, request->postfix - head);
  2629. dma_fence_set_error(&request->fence, -EIO);
  2630. }
  2631. static void engine_skip_context(struct i915_request *request)
  2632. {
  2633. struct intel_engine_cs *engine = request->engine;
  2634. struct i915_gem_context *hung_ctx = request->ctx;
  2635. struct i915_timeline *timeline = request->timeline;
  2636. unsigned long flags;
  2637. GEM_BUG_ON(timeline == &engine->timeline);
  2638. spin_lock_irqsave(&engine->timeline.lock, flags);
  2639. spin_lock_nested(&timeline->lock, SINGLE_DEPTH_NESTING);
  2640. list_for_each_entry_continue(request, &engine->timeline.requests, link)
  2641. if (request->ctx == hung_ctx)
  2642. skip_request(request);
  2643. list_for_each_entry(request, &timeline->requests, link)
  2644. skip_request(request);
  2645. spin_unlock(&timeline->lock);
  2646. spin_unlock_irqrestore(&engine->timeline.lock, flags);
  2647. }
  2648. /* Returns the request if it was guilty of the hang */
  2649. static struct i915_request *
  2650. i915_gem_reset_request(struct intel_engine_cs *engine,
  2651. struct i915_request *request,
  2652. bool stalled)
  2653. {
  2654. /* The guilty request will get skipped on a hung engine.
  2655. *
  2656. * Users of client default contexts do not rely on logical
  2657. * state preserved between batches so it is safe to execute
  2658. * queued requests following the hang. Non default contexts
  2659. * rely on preserved state, so skipping a batch loses the
  2660. * evolution of the state and it needs to be considered corrupted.
  2661. * Executing more queued batches on top of corrupted state is
  2662. * risky. But we take the risk by trying to advance through
  2663. * the queued requests in order to make the client behaviour
  2664. * more predictable around resets, by not throwing away random
  2665. * amount of batches it has prepared for execution. Sophisticated
  2666. * clients can use gem_reset_stats_ioctl and dma fence status
  2667. * (exported via sync_file info ioctl on explicit fences) to observe
  2668. * when it loses the context state and should rebuild accordingly.
  2669. *
  2670. * The context ban, and ultimately the client ban, mechanism are safety
  2671. * valves if client submission ends up resulting in nothing more than
  2672. * subsequent hangs.
  2673. */
  2674. if (i915_request_completed(request)) {
  2675. GEM_TRACE("%s pardoned global=%d (fence %llx:%d), current %d\n",
  2676. engine->name, request->global_seqno,
  2677. request->fence.context, request->fence.seqno,
  2678. intel_engine_get_seqno(engine));
  2679. stalled = false;
  2680. }
  2681. if (stalled) {
  2682. i915_gem_context_mark_guilty(request->ctx);
  2683. skip_request(request);
  2684. /* If this context is now banned, skip all pending requests. */
  2685. if (i915_gem_context_is_banned(request->ctx))
  2686. engine_skip_context(request);
  2687. } else {
  2688. /*
  2689. * Since this is not the hung engine, it may have advanced
  2690. * since the hang declaration. Double check by refinding
  2691. * the active request at the time of the reset.
  2692. */
  2693. request = i915_gem_find_active_request(engine);
  2694. if (request) {
  2695. i915_gem_context_mark_innocent(request->ctx);
  2696. dma_fence_set_error(&request->fence, -EAGAIN);
  2697. /* Rewind the engine to replay the incomplete rq */
  2698. spin_lock_irq(&engine->timeline.lock);
  2699. request = list_prev_entry(request, link);
  2700. if (&request->link == &engine->timeline.requests)
  2701. request = NULL;
  2702. spin_unlock_irq(&engine->timeline.lock);
  2703. }
  2704. }
  2705. return request;
  2706. }
  2707. void i915_gem_reset_engine(struct intel_engine_cs *engine,
  2708. struct i915_request *request,
  2709. bool stalled)
  2710. {
  2711. /*
  2712. * Make sure this write is visible before we re-enable the interrupt
  2713. * handlers on another CPU, as tasklet_enable() resolves to just
  2714. * a compiler barrier which is insufficient for our purpose here.
  2715. */
  2716. smp_store_mb(engine->irq_posted, 0);
  2717. if (request)
  2718. request = i915_gem_reset_request(engine, request, stalled);
  2719. if (request) {
  2720. DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
  2721. engine->name, request->global_seqno);
  2722. }
  2723. /* Setup the CS to resume from the breadcrumb of the hung request */
  2724. engine->reset_hw(engine, request);
  2725. }
  2726. void i915_gem_reset(struct drm_i915_private *dev_priv,
  2727. unsigned int stalled_mask)
  2728. {
  2729. struct intel_engine_cs *engine;
  2730. enum intel_engine_id id;
  2731. lockdep_assert_held(&dev_priv->drm.struct_mutex);
  2732. i915_retire_requests(dev_priv);
  2733. for_each_engine(engine, dev_priv, id) {
  2734. struct i915_gem_context *ctx;
  2735. i915_gem_reset_engine(engine,
  2736. engine->hangcheck.active_request,
  2737. stalled_mask & ENGINE_MASK(id));
  2738. ctx = fetch_and_zero(&engine->last_retired_context);
  2739. if (ctx)
  2740. intel_context_unpin(ctx, engine);
  2741. /*
  2742. * Ostensibily, we always want a context loaded for powersaving,
  2743. * so if the engine is idle after the reset, send a request
  2744. * to load our scratch kernel_context.
  2745. *
  2746. * More mysteriously, if we leave the engine idle after a reset,
  2747. * the next userspace batch may hang, with what appears to be
  2748. * an incoherent read by the CS (presumably stale TLB). An
  2749. * empty request appears sufficient to paper over the glitch.
  2750. */
  2751. if (intel_engine_is_idle(engine)) {
  2752. struct i915_request *rq;
  2753. rq = i915_request_alloc(engine,
  2754. dev_priv->kernel_context);
  2755. if (!IS_ERR(rq))
  2756. __i915_request_add(rq, false);
  2757. }
  2758. }
  2759. i915_gem_restore_fences(dev_priv);
  2760. }
  2761. void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
  2762. {
  2763. tasklet_enable(&engine->execlists.tasklet);
  2764. kthread_unpark(engine->breadcrumbs.signaler);
  2765. intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
  2766. }
  2767. void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
  2768. {
  2769. struct intel_engine_cs *engine;
  2770. enum intel_engine_id id;
  2771. lockdep_assert_held(&dev_priv->drm.struct_mutex);
  2772. for_each_engine(engine, dev_priv, id) {
  2773. engine->hangcheck.active_request = NULL;
  2774. i915_gem_reset_finish_engine(engine);
  2775. }
  2776. }
  2777. static void nop_submit_request(struct i915_request *request)
  2778. {
  2779. GEM_TRACE("%s fence %llx:%d -> -EIO\n",
  2780. request->engine->name,
  2781. request->fence.context, request->fence.seqno);
  2782. dma_fence_set_error(&request->fence, -EIO);
  2783. i915_request_submit(request);
  2784. }
  2785. static void nop_complete_submit_request(struct i915_request *request)
  2786. {
  2787. unsigned long flags;
  2788. GEM_TRACE("%s fence %llx:%d -> -EIO\n",
  2789. request->engine->name,
  2790. request->fence.context, request->fence.seqno);
  2791. dma_fence_set_error(&request->fence, -EIO);
  2792. spin_lock_irqsave(&request->engine->timeline.lock, flags);
  2793. __i915_request_submit(request);
  2794. intel_engine_init_global_seqno(request->engine, request->global_seqno);
  2795. spin_unlock_irqrestore(&request->engine->timeline.lock, flags);
  2796. }
  2797. void i915_gem_set_wedged(struct drm_i915_private *i915)
  2798. {
  2799. struct intel_engine_cs *engine;
  2800. enum intel_engine_id id;
  2801. GEM_TRACE("start\n");
  2802. if (GEM_SHOW_DEBUG()) {
  2803. struct drm_printer p = drm_debug_printer(__func__);
  2804. for_each_engine(engine, i915, id)
  2805. intel_engine_dump(engine, &p, "%s\n", engine->name);
  2806. }
  2807. set_bit(I915_WEDGED, &i915->gpu_error.flags);
  2808. smp_mb__after_atomic();
  2809. /*
  2810. * First, stop submission to hw, but do not yet complete requests by
  2811. * rolling the global seqno forward (since this would complete requests
  2812. * for which we haven't set the fence error to EIO yet).
  2813. */
  2814. for_each_engine(engine, i915, id) {
  2815. i915_gem_reset_prepare_engine(engine);
  2816. engine->submit_request = nop_submit_request;
  2817. engine->schedule = NULL;
  2818. }
  2819. i915->caps.scheduler = 0;
  2820. /* Even if the GPU reset fails, it should still stop the engines */
  2821. intel_gpu_reset(i915, ALL_ENGINES);
  2822. /*
  2823. * Make sure no one is running the old callback before we proceed with
  2824. * cancelling requests and resetting the completion tracking. Otherwise
  2825. * we might submit a request to the hardware which never completes.
  2826. */
  2827. synchronize_rcu();
  2828. for_each_engine(engine, i915, id) {
  2829. /* Mark all executing requests as skipped */
  2830. engine->cancel_requests(engine);
  2831. /*
  2832. * Only once we've force-cancelled all in-flight requests can we
  2833. * start to complete all requests.
  2834. */
  2835. engine->submit_request = nop_complete_submit_request;
  2836. }
  2837. /*
  2838. * Make sure no request can slip through without getting completed by
  2839. * either this call here to intel_engine_init_global_seqno, or the one
  2840. * in nop_complete_submit_request.
  2841. */
  2842. synchronize_rcu();
  2843. for_each_engine(engine, i915, id) {
  2844. unsigned long flags;
  2845. /*
  2846. * Mark all pending requests as complete so that any concurrent
  2847. * (lockless) lookup doesn't try and wait upon the request as we
  2848. * reset it.
  2849. */
  2850. spin_lock_irqsave(&engine->timeline.lock, flags);
  2851. intel_engine_init_global_seqno(engine,
  2852. intel_engine_last_submit(engine));
  2853. spin_unlock_irqrestore(&engine->timeline.lock, flags);
  2854. i915_gem_reset_finish_engine(engine);
  2855. }
  2856. GEM_TRACE("end\n");
  2857. wake_up_all(&i915->gpu_error.reset_queue);
  2858. }
  2859. bool i915_gem_unset_wedged(struct drm_i915_private *i915)
  2860. {
  2861. struct i915_timeline *tl;
  2862. lockdep_assert_held(&i915->drm.struct_mutex);
  2863. if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
  2864. return true;
  2865. GEM_TRACE("start\n");
  2866. /*
  2867. * Before unwedging, make sure that all pending operations
  2868. * are flushed and errored out - we may have requests waiting upon
  2869. * third party fences. We marked all inflight requests as EIO, and
  2870. * every execbuf since returned EIO, for consistency we want all
  2871. * the currently pending requests to also be marked as EIO, which
  2872. * is done inside our nop_submit_request - and so we must wait.
  2873. *
  2874. * No more can be submitted until we reset the wedged bit.
  2875. */
  2876. list_for_each_entry(tl, &i915->gt.timelines, link) {
  2877. struct i915_request *rq;
  2878. rq = i915_gem_active_peek(&tl->last_request,
  2879. &i915->drm.struct_mutex);
  2880. if (!rq)
  2881. continue;
  2882. /*
  2883. * We can't use our normal waiter as we want to
  2884. * avoid recursively trying to handle the current
  2885. * reset. The basic dma_fence_default_wait() installs
  2886. * a callback for dma_fence_signal(), which is
  2887. * triggered by our nop handler (indirectly, the
  2888. * callback enables the signaler thread which is
  2889. * woken by the nop_submit_request() advancing the seqno
  2890. * and when the seqno passes the fence, the signaler
  2891. * then signals the fence waking us up).
  2892. */
  2893. if (dma_fence_default_wait(&rq->fence, true,
  2894. MAX_SCHEDULE_TIMEOUT) < 0)
  2895. return false;
  2896. }
  2897. i915_retire_requests(i915);
  2898. GEM_BUG_ON(i915->gt.active_requests);
  2899. /*
  2900. * Undo nop_submit_request. We prevent all new i915 requests from
  2901. * being queued (by disallowing execbuf whilst wedged) so having
  2902. * waited for all active requests above, we know the system is idle
  2903. * and do not have to worry about a thread being inside
  2904. * engine->submit_request() as we swap over. So unlike installing
  2905. * the nop_submit_request on reset, we can do this from normal
  2906. * context and do not require stop_machine().
  2907. */
  2908. intel_engines_reset_default_submission(i915);
  2909. i915_gem_contexts_lost(i915);
  2910. GEM_TRACE("end\n");
  2911. smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
  2912. clear_bit(I915_WEDGED, &i915->gpu_error.flags);
  2913. return true;
  2914. }
  2915. static void
  2916. i915_gem_retire_work_handler(struct work_struct *work)
  2917. {
  2918. struct drm_i915_private *dev_priv =
  2919. container_of(work, typeof(*dev_priv), gt.retire_work.work);
  2920. struct drm_device *dev = &dev_priv->drm;
  2921. /* Come back later if the device is busy... */
  2922. if (mutex_trylock(&dev->struct_mutex)) {
  2923. i915_retire_requests(dev_priv);
  2924. mutex_unlock(&dev->struct_mutex);
  2925. }
  2926. /*
  2927. * Keep the retire handler running until we are finally idle.
  2928. * We do not need to do this test under locking as in the worst-case
  2929. * we queue the retire worker once too often.
  2930. */
  2931. if (READ_ONCE(dev_priv->gt.awake))
  2932. queue_delayed_work(dev_priv->wq,
  2933. &dev_priv->gt.retire_work,
  2934. round_jiffies_up_relative(HZ));
  2935. }
  2936. static void shrink_caches(struct drm_i915_private *i915)
  2937. {
  2938. /*
  2939. * kmem_cache_shrink() discards empty slabs and reorders partially
  2940. * filled slabs to prioritise allocating from the mostly full slabs,
  2941. * with the aim of reducing fragmentation.
  2942. */
  2943. kmem_cache_shrink(i915->priorities);
  2944. kmem_cache_shrink(i915->dependencies);
  2945. kmem_cache_shrink(i915->requests);
  2946. kmem_cache_shrink(i915->luts);
  2947. kmem_cache_shrink(i915->vmas);
  2948. kmem_cache_shrink(i915->objects);
  2949. }
  2950. struct sleep_rcu_work {
  2951. union {
  2952. struct rcu_head rcu;
  2953. struct work_struct work;
  2954. };
  2955. struct drm_i915_private *i915;
  2956. unsigned int epoch;
  2957. };
  2958. static inline bool
  2959. same_epoch(struct drm_i915_private *i915, unsigned int epoch)
  2960. {
  2961. /*
  2962. * There is a small chance that the epoch wrapped since we started
  2963. * sleeping. If we assume that epoch is at least a u32, then it will
  2964. * take at least 2^32 * 100ms for it to wrap, or about 326 years.
  2965. */
  2966. return epoch == READ_ONCE(i915->gt.epoch);
  2967. }
  2968. static void __sleep_work(struct work_struct *work)
  2969. {
  2970. struct sleep_rcu_work *s = container_of(work, typeof(*s), work);
  2971. struct drm_i915_private *i915 = s->i915;
  2972. unsigned int epoch = s->epoch;
  2973. kfree(s);
  2974. if (same_epoch(i915, epoch))
  2975. shrink_caches(i915);
  2976. }
  2977. static void __sleep_rcu(struct rcu_head *rcu)
  2978. {
  2979. struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
  2980. struct drm_i915_private *i915 = s->i915;
  2981. if (same_epoch(i915, s->epoch)) {
  2982. INIT_WORK(&s->work, __sleep_work);
  2983. queue_work(i915->wq, &s->work);
  2984. } else {
  2985. kfree(s);
  2986. }
  2987. }
  2988. static inline bool
  2989. new_requests_since_last_retire(const struct drm_i915_private *i915)
  2990. {
  2991. return (READ_ONCE(i915->gt.active_requests) ||
  2992. work_pending(&i915->gt.idle_work.work));
  2993. }
  2994. static void
  2995. i915_gem_idle_work_handler(struct work_struct *work)
  2996. {
  2997. struct drm_i915_private *dev_priv =
  2998. container_of(work, typeof(*dev_priv), gt.idle_work.work);
  2999. unsigned int epoch = I915_EPOCH_INVALID;
  3000. bool rearm_hangcheck;
  3001. if (!READ_ONCE(dev_priv->gt.awake))
  3002. return;
  3003. /*
  3004. * Wait for last execlists context complete, but bail out in case a
  3005. * new request is submitted. As we don't trust the hardware, we
  3006. * continue on if the wait times out. This is necessary to allow
  3007. * the machine to suspend even if the hardware dies, and we will
  3008. * try to recover in resume (after depriving the hardware of power,
  3009. * it may be in a better mmod).
  3010. */
  3011. __wait_for(if (new_requests_since_last_retire(dev_priv)) return,
  3012. intel_engines_are_idle(dev_priv),
  3013. I915_IDLE_ENGINES_TIMEOUT * 1000,
  3014. 10, 500);
  3015. rearm_hangcheck =
  3016. cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
  3017. if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
  3018. /* Currently busy, come back later */
  3019. mod_delayed_work(dev_priv->wq,
  3020. &dev_priv->gt.idle_work,
  3021. msecs_to_jiffies(50));
  3022. goto out_rearm;
  3023. }
  3024. /*
  3025. * New request retired after this work handler started, extend active
  3026. * period until next instance of the work.
  3027. */
  3028. if (new_requests_since_last_retire(dev_priv))
  3029. goto out_unlock;
  3030. epoch = __i915_gem_park(dev_priv);
  3031. rearm_hangcheck = false;
  3032. out_unlock:
  3033. mutex_unlock(&dev_priv->drm.struct_mutex);
  3034. out_rearm:
  3035. if (rearm_hangcheck) {
  3036. GEM_BUG_ON(!dev_priv->gt.awake);
  3037. i915_queue_hangcheck(dev_priv);
  3038. }
  3039. /*
  3040. * When we are idle, it is an opportune time to reap our caches.
  3041. * However, we have many objects that utilise RCU and the ordered
  3042. * i915->wq that this work is executing on. To try and flush any
  3043. * pending frees now we are idle, we first wait for an RCU grace
  3044. * period, and then queue a task (that will run last on the wq) to
  3045. * shrink and re-optimize the caches.
  3046. */
  3047. if (same_epoch(dev_priv, epoch)) {
  3048. struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
  3049. if (s) {
  3050. s->i915 = dev_priv;
  3051. s->epoch = epoch;
  3052. call_rcu(&s->rcu, __sleep_rcu);
  3053. }
  3054. }
  3055. }
  3056. void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
  3057. {
  3058. struct drm_i915_private *i915 = to_i915(gem->dev);
  3059. struct drm_i915_gem_object *obj = to_intel_bo(gem);
  3060. struct drm_i915_file_private *fpriv = file->driver_priv;
  3061. struct i915_lut_handle *lut, *ln;
  3062. mutex_lock(&i915->drm.struct_mutex);
  3063. list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
  3064. struct i915_gem_context *ctx = lut->ctx;
  3065. struct i915_vma *vma;
  3066. GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
  3067. if (ctx->file_priv != fpriv)
  3068. continue;
  3069. vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
  3070. GEM_BUG_ON(vma->obj != obj);
  3071. /* We allow the process to have multiple handles to the same
  3072. * vma, in the same fd namespace, by virtue of flink/open.
  3073. */
  3074. GEM_BUG_ON(!vma->open_count);
  3075. if (!--vma->open_count && !i915_vma_is_ggtt(vma))
  3076. i915_vma_close(vma);
  3077. list_del(&lut->obj_link);
  3078. list_del(&lut->ctx_link);
  3079. kmem_cache_free(i915->luts, lut);
  3080. __i915_gem_object_release_unless_active(obj);
  3081. }
  3082. mutex_unlock(&i915->drm.struct_mutex);
  3083. }
  3084. static unsigned long to_wait_timeout(s64 timeout_ns)
  3085. {
  3086. if (timeout_ns < 0)
  3087. return MAX_SCHEDULE_TIMEOUT;
  3088. if (timeout_ns == 0)
  3089. return 0;
  3090. return nsecs_to_jiffies_timeout(timeout_ns);
  3091. }
  3092. /**
  3093. * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
  3094. * @dev: drm device pointer
  3095. * @data: ioctl data blob
  3096. * @file: drm file pointer
  3097. *
  3098. * Returns 0 if successful, else an error is returned with the remaining time in
  3099. * the timeout parameter.
  3100. * -ETIME: object is still busy after timeout
  3101. * -ERESTARTSYS: signal interrupted the wait
  3102. * -ENONENT: object doesn't exist
  3103. * Also possible, but rare:
  3104. * -EAGAIN: incomplete, restart syscall
  3105. * -ENOMEM: damn
  3106. * -ENODEV: Internal IRQ fail
  3107. * -E?: The add request failed
  3108. *
  3109. * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
  3110. * non-zero timeout parameter the wait ioctl will wait for the given number of
  3111. * nanoseconds on an object becoming unbusy. Since the wait itself does so
  3112. * without holding struct_mutex the object may become re-busied before this
  3113. * function completes. A similar but shorter * race condition exists in the busy
  3114. * ioctl
  3115. */
  3116. int
  3117. i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
  3118. {
  3119. struct drm_i915_gem_wait *args = data;
  3120. struct drm_i915_gem_object *obj;
  3121. ktime_t start;
  3122. long ret;
  3123. if (args->flags != 0)
  3124. return -EINVAL;
  3125. obj = i915_gem_object_lookup(file, args->bo_handle);
  3126. if (!obj)
  3127. return -ENOENT;
  3128. start = ktime_get();
  3129. ret = i915_gem_object_wait(obj,
  3130. I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
  3131. to_wait_timeout(args->timeout_ns),
  3132. to_rps_client(file));
  3133. if (args->timeout_ns > 0) {
  3134. args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
  3135. if (args->timeout_ns < 0)
  3136. args->timeout_ns = 0;
  3137. /*
  3138. * Apparently ktime isn't accurate enough and occasionally has a
  3139. * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
  3140. * things up to make the test happy. We allow up to 1 jiffy.
  3141. *
  3142. * This is a regression from the timespec->ktime conversion.
  3143. */
  3144. if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
  3145. args->timeout_ns = 0;
  3146. /* Asked to wait beyond the jiffie/scheduler precision? */
  3147. if (ret == -ETIME && args->timeout_ns)
  3148. ret = -EAGAIN;
  3149. }
  3150. i915_gem_object_put(obj);
  3151. return ret;
  3152. }
  3153. static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags)
  3154. {
  3155. return i915_gem_active_wait(&tl->last_request, flags);
  3156. }
  3157. static int wait_for_engines(struct drm_i915_private *i915)
  3158. {
  3159. if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
  3160. dev_err(i915->drm.dev,
  3161. "Failed to idle engines, declaring wedged!\n");
  3162. GEM_TRACE_DUMP();
  3163. i915_gem_set_wedged(i915);
  3164. return -EIO;
  3165. }
  3166. return 0;
  3167. }
  3168. int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
  3169. {
  3170. /* If the device is asleep, we have no requests outstanding */
  3171. if (!READ_ONCE(i915->gt.awake))
  3172. return 0;
  3173. if (flags & I915_WAIT_LOCKED) {
  3174. struct i915_timeline *tl;
  3175. int err;
  3176. lockdep_assert_held(&i915->drm.struct_mutex);
  3177. list_for_each_entry(tl, &i915->gt.timelines, link) {
  3178. err = wait_for_timeline(tl, flags);
  3179. if (err)
  3180. return err;
  3181. }
  3182. i915_retire_requests(i915);
  3183. return wait_for_engines(i915);
  3184. } else {
  3185. struct intel_engine_cs *engine;
  3186. enum intel_engine_id id;
  3187. int err;
  3188. for_each_engine(engine, i915, id) {
  3189. err = wait_for_timeline(&engine->timeline, flags);
  3190. if (err)
  3191. return err;
  3192. }
  3193. return 0;
  3194. }
  3195. }
  3196. static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
  3197. {
  3198. /*
  3199. * We manually flush the CPU domain so that we can override and
  3200. * force the flush for the display, and perform it asyncrhonously.
  3201. */
  3202. flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
  3203. if (obj->cache_dirty)
  3204. i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
  3205. obj->write_domain = 0;
  3206. }
  3207. void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
  3208. {
  3209. if (!READ_ONCE(obj->pin_global))
  3210. return;
  3211. mutex_lock(&obj->base.dev->struct_mutex);
  3212. __i915_gem_object_flush_for_display(obj);
  3213. mutex_unlock(&obj->base.dev->struct_mutex);
  3214. }
  3215. /**
  3216. * Moves a single object to the WC read, and possibly write domain.
  3217. * @obj: object to act on
  3218. * @write: ask for write access or read only
  3219. *
  3220. * This function returns when the move is complete, including waiting on
  3221. * flushes to occur.
  3222. */
  3223. int
  3224. i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
  3225. {
  3226. int ret;
  3227. lockdep_assert_held(&obj->base.dev->struct_mutex);
  3228. ret = i915_gem_object_wait(obj,
  3229. I915_WAIT_INTERRUPTIBLE |
  3230. I915_WAIT_LOCKED |
  3231. (write ? I915_WAIT_ALL : 0),
  3232. MAX_SCHEDULE_TIMEOUT,
  3233. NULL);
  3234. if (ret)
  3235. return ret;
  3236. if (obj->write_domain == I915_GEM_DOMAIN_WC)
  3237. return 0;
  3238. /* Flush and acquire obj->pages so that we are coherent through
  3239. * direct access in memory with previous cached writes through
  3240. * shmemfs and that our cache domain tracking remains valid.
  3241. * For example, if the obj->filp was moved to swap without us
  3242. * being notified and releasing the pages, we would mistakenly
  3243. * continue to assume that the obj remained out of the CPU cached
  3244. * domain.
  3245. */
  3246. ret = i915_gem_object_pin_pages(obj);
  3247. if (ret)
  3248. return ret;
  3249. flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
  3250. /* Serialise direct access to this object with the barriers for
  3251. * coherent writes from the GPU, by effectively invalidating the
  3252. * WC domain upon first access.
  3253. */
  3254. if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
  3255. mb();
  3256. /* It should now be out of any other write domains, and we can update
  3257. * the domain values for our changes.
  3258. */
  3259. GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
  3260. obj->read_domains |= I915_GEM_DOMAIN_WC;
  3261. if (write) {
  3262. obj->read_domains = I915_GEM_DOMAIN_WC;
  3263. obj->write_domain = I915_GEM_DOMAIN_WC;
  3264. obj->mm.dirty = true;
  3265. }
  3266. i915_gem_object_unpin_pages(obj);
  3267. return 0;
  3268. }
  3269. /**
  3270. * Moves a single object to the GTT read, and possibly write domain.
  3271. * @obj: object to act on
  3272. * @write: ask for write access or read only
  3273. *
  3274. * This function returns when the move is complete, including waiting on
  3275. * flushes to occur.
  3276. */
  3277. int
  3278. i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
  3279. {
  3280. int ret;
  3281. lockdep_assert_held(&obj->base.dev->struct_mutex);
  3282. ret = i915_gem_object_wait(obj,
  3283. I915_WAIT_INTERRUPTIBLE |
  3284. I915_WAIT_LOCKED |
  3285. (write ? I915_WAIT_ALL : 0),
  3286. MAX_SCHEDULE_TIMEOUT,
  3287. NULL);
  3288. if (ret)
  3289. return ret;
  3290. if (obj->write_domain == I915_GEM_DOMAIN_GTT)
  3291. return 0;
  3292. /* Flush and acquire obj->pages so that we are coherent through
  3293. * direct access in memory with previous cached writes through
  3294. * shmemfs and that our cache domain tracking remains valid.
  3295. * For example, if the obj->filp was moved to swap without us
  3296. * being notified and releasing the pages, we would mistakenly
  3297. * continue to assume that the obj remained out of the CPU cached
  3298. * domain.
  3299. */
  3300. ret = i915_gem_object_pin_pages(obj);
  3301. if (ret)
  3302. return ret;
  3303. flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
  3304. /* Serialise direct access to this object with the barriers for
  3305. * coherent writes from the GPU, by effectively invalidating the
  3306. * GTT domain upon first access.
  3307. */
  3308. if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
  3309. mb();
  3310. /* It should now be out of any other write domains, and we can update
  3311. * the domain values for our changes.
  3312. */
  3313. GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
  3314. obj->read_domains |= I915_GEM_DOMAIN_GTT;
  3315. if (write) {
  3316. obj->read_domains = I915_GEM_DOMAIN_GTT;
  3317. obj->write_domain = I915_GEM_DOMAIN_GTT;
  3318. obj->mm.dirty = true;
  3319. }
  3320. i915_gem_object_unpin_pages(obj);
  3321. return 0;
  3322. }
  3323. /**
  3324. * Changes the cache-level of an object across all VMA.
  3325. * @obj: object to act on
  3326. * @cache_level: new cache level to set for the object
  3327. *
  3328. * After this function returns, the object will be in the new cache-level
  3329. * across all GTT and the contents of the backing storage will be coherent,
  3330. * with respect to the new cache-level. In order to keep the backing storage
  3331. * coherent for all users, we only allow a single cache level to be set
  3332. * globally on the object and prevent it from being changed whilst the
  3333. * hardware is reading from the object. That is if the object is currently
  3334. * on the scanout it will be set to uncached (or equivalent display
  3335. * cache coherency) and all non-MOCS GPU access will also be uncached so
  3336. * that all direct access to the scanout remains coherent.
  3337. */
  3338. int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
  3339. enum i915_cache_level cache_level)
  3340. {
  3341. struct i915_vma *vma;
  3342. int ret;
  3343. lockdep_assert_held(&obj->base.dev->struct_mutex);
  3344. if (obj->cache_level == cache_level)
  3345. return 0;
  3346. /* Inspect the list of currently bound VMA and unbind any that would
  3347. * be invalid given the new cache-level. This is principally to
  3348. * catch the issue of the CS prefetch crossing page boundaries and
  3349. * reading an invalid PTE on older architectures.
  3350. */
  3351. restart:
  3352. list_for_each_entry(vma, &obj->vma_list, obj_link) {
  3353. if (!drm_mm_node_allocated(&vma->node))
  3354. continue;
  3355. if (i915_vma_is_pinned(vma)) {
  3356. DRM_DEBUG("can not change the cache level of pinned objects\n");
  3357. return -EBUSY;
  3358. }
  3359. if (!i915_vma_is_closed(vma) &&
  3360. i915_gem_valid_gtt_space(vma, cache_level))
  3361. continue;
  3362. ret = i915_vma_unbind(vma);
  3363. if (ret)
  3364. return ret;
  3365. /* As unbinding may affect other elements in the
  3366. * obj->vma_list (due to side-effects from retiring
  3367. * an active vma), play safe and restart the iterator.
  3368. */
  3369. goto restart;
  3370. }
  3371. /* We can reuse the existing drm_mm nodes but need to change the
  3372. * cache-level on the PTE. We could simply unbind them all and
  3373. * rebind with the correct cache-level on next use. However since
  3374. * we already have a valid slot, dma mapping, pages etc, we may as
  3375. * rewrite the PTE in the belief that doing so tramples upon less
  3376. * state and so involves less work.
  3377. */
  3378. if (obj->bind_count) {
  3379. /* Before we change the PTE, the GPU must not be accessing it.
  3380. * If we wait upon the object, we know that all the bound
  3381. * VMA are no longer active.
  3382. */
  3383. ret = i915_gem_object_wait(obj,
  3384. I915_WAIT_INTERRUPTIBLE |
  3385. I915_WAIT_LOCKED |
  3386. I915_WAIT_ALL,
  3387. MAX_SCHEDULE_TIMEOUT,
  3388. NULL);
  3389. if (ret)
  3390. return ret;
  3391. if (!HAS_LLC(to_i915(obj->base.dev)) &&
  3392. cache_level != I915_CACHE_NONE) {
  3393. /* Access to snoopable pages through the GTT is
  3394. * incoherent and on some machines causes a hard
  3395. * lockup. Relinquish the CPU mmaping to force
  3396. * userspace to refault in the pages and we can
  3397. * then double check if the GTT mapping is still
  3398. * valid for that pointer access.
  3399. */
  3400. i915_gem_release_mmap(obj);
  3401. /* As we no longer need a fence for GTT access,
  3402. * we can relinquish it now (and so prevent having
  3403. * to steal a fence from someone else on the next
  3404. * fence request). Note GPU activity would have
  3405. * dropped the fence as all snoopable access is
  3406. * supposed to be linear.
  3407. */
  3408. for_each_ggtt_vma(vma, obj) {
  3409. ret = i915_vma_put_fence(vma);
  3410. if (ret)
  3411. return ret;
  3412. }
  3413. } else {
  3414. /* We either have incoherent backing store and
  3415. * so no GTT access or the architecture is fully
  3416. * coherent. In such cases, existing GTT mmaps
  3417. * ignore the cache bit in the PTE and we can
  3418. * rewrite it without confusing the GPU or having
  3419. * to force userspace to fault back in its mmaps.
  3420. */
  3421. }
  3422. list_for_each_entry(vma, &obj->vma_list, obj_link) {
  3423. if (!drm_mm_node_allocated(&vma->node))
  3424. continue;
  3425. ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
  3426. if (ret)
  3427. return ret;
  3428. }
  3429. }
  3430. list_for_each_entry(vma, &obj->vma_list, obj_link)
  3431. vma->node.color = cache_level;
  3432. i915_gem_object_set_cache_coherency(obj, cache_level);
  3433. obj->cache_dirty = true; /* Always invalidate stale cachelines */
  3434. return 0;
  3435. }
  3436. int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
  3437. struct drm_file *file)
  3438. {
  3439. struct drm_i915_gem_caching *args = data;
  3440. struct drm_i915_gem_object *obj;
  3441. int err = 0;
  3442. rcu_read_lock();
  3443. obj = i915_gem_object_lookup_rcu(file, args->handle);
  3444. if (!obj) {
  3445. err = -ENOENT;
  3446. goto out;
  3447. }
  3448. switch (obj->cache_level) {
  3449. case I915_CACHE_LLC:
  3450. case I915_CACHE_L3_LLC:
  3451. args->caching = I915_CACHING_CACHED;
  3452. break;
  3453. case I915_CACHE_WT:
  3454. args->caching = I915_CACHING_DISPLAY;
  3455. break;
  3456. default:
  3457. args->caching = I915_CACHING_NONE;
  3458. break;
  3459. }
  3460. out:
  3461. rcu_read_unlock();
  3462. return err;
  3463. }
  3464. int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
  3465. struct drm_file *file)
  3466. {
  3467. struct drm_i915_private *i915 = to_i915(dev);
  3468. struct drm_i915_gem_caching *args = data;
  3469. struct drm_i915_gem_object *obj;
  3470. enum i915_cache_level level;
  3471. int ret = 0;
  3472. switch (args->caching) {
  3473. case I915_CACHING_NONE:
  3474. level = I915_CACHE_NONE;
  3475. break;
  3476. case I915_CACHING_CACHED:
  3477. /*
  3478. * Due to a HW issue on BXT A stepping, GPU stores via a
  3479. * snooped mapping may leave stale data in a corresponding CPU
  3480. * cacheline, whereas normally such cachelines would get
  3481. * invalidated.
  3482. */
  3483. if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
  3484. return -ENODEV;
  3485. level = I915_CACHE_LLC;
  3486. break;
  3487. case I915_CACHING_DISPLAY:
  3488. level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
  3489. break;
  3490. default:
  3491. return -EINVAL;
  3492. }
  3493. obj = i915_gem_object_lookup(file, args->handle);
  3494. if (!obj)
  3495. return -ENOENT;
  3496. /*
  3497. * The caching mode of proxy object is handled by its generator, and
  3498. * not allowed to be changed by userspace.
  3499. */
  3500. if (i915_gem_object_is_proxy(obj)) {
  3501. ret = -ENXIO;
  3502. goto out;
  3503. }
  3504. if (obj->cache_level == level)
  3505. goto out;
  3506. ret = i915_gem_object_wait(obj,
  3507. I915_WAIT_INTERRUPTIBLE,
  3508. MAX_SCHEDULE_TIMEOUT,
  3509. to_rps_client(file));
  3510. if (ret)
  3511. goto out;
  3512. ret = i915_mutex_lock_interruptible(dev);
  3513. if (ret)
  3514. goto out;
  3515. ret = i915_gem_object_set_cache_level(obj, level);
  3516. mutex_unlock(&dev->struct_mutex);
  3517. out:
  3518. i915_gem_object_put(obj);
  3519. return ret;
  3520. }
  3521. /*
  3522. * Prepare buffer for display plane (scanout, cursors, etc). Can be called from
  3523. * an uninterruptible phase (modesetting) and allows any flushes to be pipelined
  3524. * (for pageflips). We only flush the caches while preparing the buffer for
  3525. * display, the callers are responsible for frontbuffer flush.
  3526. */
  3527. struct i915_vma *
  3528. i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
  3529. u32 alignment,
  3530. const struct i915_ggtt_view *view,
  3531. unsigned int flags)
  3532. {
  3533. struct i915_vma *vma;
  3534. int ret;
  3535. lockdep_assert_held(&obj->base.dev->struct_mutex);
  3536. /* Mark the global pin early so that we account for the
  3537. * display coherency whilst setting up the cache domains.
  3538. */
  3539. obj->pin_global++;
  3540. /* The display engine is not coherent with the LLC cache on gen6. As
  3541. * a result, we make sure that the pinning that is about to occur is
  3542. * done with uncached PTEs. This is lowest common denominator for all
  3543. * chipsets.
  3544. *
  3545. * However for gen6+, we could do better by using the GFDT bit instead
  3546. * of uncaching, which would allow us to flush all the LLC-cached data
  3547. * with that bit in the PTE to main memory with just one PIPE_CONTROL.
  3548. */
  3549. ret = i915_gem_object_set_cache_level(obj,
  3550. HAS_WT(to_i915(obj->base.dev)) ?
  3551. I915_CACHE_WT : I915_CACHE_NONE);
  3552. if (ret) {
  3553. vma = ERR_PTR(ret);
  3554. goto err_unpin_global;
  3555. }
  3556. /* As the user may map the buffer once pinned in the display plane
  3557. * (e.g. libkms for the bootup splash), we have to ensure that we
  3558. * always use map_and_fenceable for all scanout buffers. However,
  3559. * it may simply be too big to fit into mappable, in which case
  3560. * put it anyway and hope that userspace can cope (but always first
  3561. * try to preserve the existing ABI).
  3562. */
  3563. vma = ERR_PTR(-ENOSPC);
  3564. if ((flags & PIN_MAPPABLE) == 0 &&
  3565. (!view || view->type == I915_GGTT_VIEW_NORMAL))
  3566. vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
  3567. flags |
  3568. PIN_MAPPABLE |
  3569. PIN_NONBLOCK);
  3570. if (IS_ERR(vma))
  3571. vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
  3572. if (IS_ERR(vma))
  3573. goto err_unpin_global;
  3574. vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
  3575. __i915_gem_object_flush_for_display(obj);
  3576. /* It should now be out of any other write domains, and we can update
  3577. * the domain values for our changes.
  3578. */
  3579. obj->read_domains |= I915_GEM_DOMAIN_GTT;
  3580. return vma;
  3581. err_unpin_global:
  3582. obj->pin_global--;
  3583. return vma;
  3584. }
  3585. void
  3586. i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
  3587. {
  3588. lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
  3589. if (WARN_ON(vma->obj->pin_global == 0))
  3590. return;
  3591. if (--vma->obj->pin_global == 0)
  3592. vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
  3593. /* Bump the LRU to try and avoid premature eviction whilst flipping */
  3594. i915_gem_object_bump_inactive_ggtt(vma->obj);
  3595. i915_vma_unpin(vma);
  3596. }
  3597. /**
  3598. * Moves a single object to the CPU read, and possibly write domain.
  3599. * @obj: object to act on
  3600. * @write: requesting write or read-only access
  3601. *
  3602. * This function returns when the move is complete, including waiting on
  3603. * flushes to occur.
  3604. */
  3605. int
  3606. i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
  3607. {
  3608. int ret;
  3609. lockdep_assert_held(&obj->base.dev->struct_mutex);
  3610. ret = i915_gem_object_wait(obj,
  3611. I915_WAIT_INTERRUPTIBLE |
  3612. I915_WAIT_LOCKED |
  3613. (write ? I915_WAIT_ALL : 0),
  3614. MAX_SCHEDULE_TIMEOUT,
  3615. NULL);
  3616. if (ret)
  3617. return ret;
  3618. flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
  3619. /* Flush the CPU cache if it's still invalid. */
  3620. if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
  3621. i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
  3622. obj->read_domains |= I915_GEM_DOMAIN_CPU;
  3623. }
  3624. /* It should now be out of any other write domains, and we can update
  3625. * the domain values for our changes.
  3626. */
  3627. GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
  3628. /* If we're writing through the CPU, then the GPU read domains will
  3629. * need to be invalidated at next use.
  3630. */
  3631. if (write)
  3632. __start_cpu_write(obj);
  3633. return 0;
  3634. }
  3635. /* Throttle our rendering by waiting until the ring has completed our requests
  3636. * emitted over 20 msec ago.
  3637. *
  3638. * Note that if we were to use the current jiffies each time around the loop,
  3639. * we wouldn't escape the function with any frames outstanding if the time to
  3640. * render a frame was over 20ms.
  3641. *
  3642. * This should get us reasonable parallelism between CPU and GPU but also
  3643. * relatively low latency when blocking on a particular request to finish.
  3644. */
  3645. static int
  3646. i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
  3647. {
  3648. struct drm_i915_private *dev_priv = to_i915(dev);
  3649. struct drm_i915_file_private *file_priv = file->driver_priv;
  3650. unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
  3651. struct i915_request *request, *target = NULL;
  3652. long ret;
  3653. /* ABI: return -EIO if already wedged */
  3654. if (i915_terminally_wedged(&dev_priv->gpu_error))
  3655. return -EIO;
  3656. spin_lock(&file_priv->mm.lock);
  3657. list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
  3658. if (time_after_eq(request->emitted_jiffies, recent_enough))
  3659. break;
  3660. if (target) {
  3661. list_del(&target->client_link);
  3662. target->file_priv = NULL;
  3663. }
  3664. target = request;
  3665. }
  3666. if (target)
  3667. i915_request_get(target);
  3668. spin_unlock(&file_priv->mm.lock);
  3669. if (target == NULL)
  3670. return 0;
  3671. ret = i915_request_wait(target,
  3672. I915_WAIT_INTERRUPTIBLE,
  3673. MAX_SCHEDULE_TIMEOUT);
  3674. i915_request_put(target);
  3675. return ret < 0 ? ret : 0;
  3676. }
  3677. struct i915_vma *
  3678. i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
  3679. const struct i915_ggtt_view *view,
  3680. u64 size,
  3681. u64 alignment,
  3682. u64 flags)
  3683. {
  3684. struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
  3685. struct i915_address_space *vm = &dev_priv->ggtt.base;
  3686. struct i915_vma *vma;
  3687. int ret;
  3688. lockdep_assert_held(&obj->base.dev->struct_mutex);
  3689. if (flags & PIN_MAPPABLE &&
  3690. (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
  3691. /* If the required space is larger than the available
  3692. * aperture, we will not able to find a slot for the
  3693. * object and unbinding the object now will be in
  3694. * vain. Worse, doing so may cause us to ping-pong
  3695. * the object in and out of the Global GTT and
  3696. * waste a lot of cycles under the mutex.
  3697. */
  3698. if (obj->base.size > dev_priv->ggtt.mappable_end)
  3699. return ERR_PTR(-E2BIG);
  3700. /* If NONBLOCK is set the caller is optimistically
  3701. * trying to cache the full object within the mappable
  3702. * aperture, and *must* have a fallback in place for
  3703. * situations where we cannot bind the object. We
  3704. * can be a little more lax here and use the fallback
  3705. * more often to avoid costly migrations of ourselves
  3706. * and other objects within the aperture.
  3707. *
  3708. * Half-the-aperture is used as a simple heuristic.
  3709. * More interesting would to do search for a free
  3710. * block prior to making the commitment to unbind.
  3711. * That caters for the self-harm case, and with a
  3712. * little more heuristics (e.g. NOFAULT, NOEVICT)
  3713. * we could try to minimise harm to others.
  3714. */
  3715. if (flags & PIN_NONBLOCK &&
  3716. obj->base.size > dev_priv->ggtt.mappable_end / 2)
  3717. return ERR_PTR(-ENOSPC);
  3718. }
  3719. vma = i915_vma_instance(obj, vm, view);
  3720. if (unlikely(IS_ERR(vma)))
  3721. return vma;
  3722. if (i915_vma_misplaced(vma, size, alignment, flags)) {
  3723. if (flags & PIN_NONBLOCK) {
  3724. if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
  3725. return ERR_PTR(-ENOSPC);
  3726. if (flags & PIN_MAPPABLE &&
  3727. vma->fence_size > dev_priv->ggtt.mappable_end / 2)
  3728. return ERR_PTR(-ENOSPC);
  3729. }
  3730. WARN(i915_vma_is_pinned(vma),
  3731. "bo is already pinned in ggtt with incorrect alignment:"
  3732. " offset=%08x, req.alignment=%llx,"
  3733. " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
  3734. i915_ggtt_offset(vma), alignment,
  3735. !!(flags & PIN_MAPPABLE),
  3736. i915_vma_is_map_and_fenceable(vma));
  3737. ret = i915_vma_unbind(vma);
  3738. if (ret)
  3739. return ERR_PTR(ret);
  3740. }
  3741. ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
  3742. if (ret)
  3743. return ERR_PTR(ret);
  3744. return vma;
  3745. }
  3746. static __always_inline unsigned int __busy_read_flag(unsigned int id)
  3747. {
  3748. /* Note that we could alias engines in the execbuf API, but
  3749. * that would be very unwise as it prevents userspace from
  3750. * fine control over engine selection. Ahem.
  3751. *
  3752. * This should be something like EXEC_MAX_ENGINE instead of
  3753. * I915_NUM_ENGINES.
  3754. */
  3755. BUILD_BUG_ON(I915_NUM_ENGINES > 16);
  3756. return 0x10000 << id;
  3757. }
  3758. static __always_inline unsigned int __busy_write_id(unsigned int id)
  3759. {
  3760. /* The uABI guarantees an active writer is also amongst the read
  3761. * engines. This would be true if we accessed the activity tracking
  3762. * under the lock, but as we perform the lookup of the object and
  3763. * its activity locklessly we can not guarantee that the last_write
  3764. * being active implies that we have set the same engine flag from
  3765. * last_read - hence we always set both read and write busy for
  3766. * last_write.
  3767. */
  3768. return id | __busy_read_flag(id);
  3769. }
  3770. static __always_inline unsigned int
  3771. __busy_set_if_active(const struct dma_fence *fence,
  3772. unsigned int (*flag)(unsigned int id))
  3773. {
  3774. struct i915_request *rq;
  3775. /* We have to check the current hw status of the fence as the uABI
  3776. * guarantees forward progress. We could rely on the idle worker
  3777. * to eventually flush us, but to minimise latency just ask the
  3778. * hardware.
  3779. *
  3780. * Note we only report on the status of native fences.
  3781. */
  3782. if (!dma_fence_is_i915(fence))
  3783. return 0;
  3784. /* opencode to_request() in order to avoid const warnings */
  3785. rq = container_of(fence, struct i915_request, fence);
  3786. if (i915_request_completed(rq))
  3787. return 0;
  3788. return flag(rq->engine->uabi_id);
  3789. }
  3790. static __always_inline unsigned int
  3791. busy_check_reader(const struct dma_fence *fence)
  3792. {
  3793. return __busy_set_if_active(fence, __busy_read_flag);
  3794. }
  3795. static __always_inline unsigned int
  3796. busy_check_writer(const struct dma_fence *fence)
  3797. {
  3798. if (!fence)
  3799. return 0;
  3800. return __busy_set_if_active(fence, __busy_write_id);
  3801. }
  3802. int
  3803. i915_gem_busy_ioctl(struct drm_device *dev, void *data,
  3804. struct drm_file *file)
  3805. {
  3806. struct drm_i915_gem_busy *args = data;
  3807. struct drm_i915_gem_object *obj;
  3808. struct reservation_object_list *list;
  3809. unsigned int seq;
  3810. int err;
  3811. err = -ENOENT;
  3812. rcu_read_lock();
  3813. obj = i915_gem_object_lookup_rcu(file, args->handle);
  3814. if (!obj)
  3815. goto out;
  3816. /* A discrepancy here is that we do not report the status of
  3817. * non-i915 fences, i.e. even though we may report the object as idle,
  3818. * a call to set-domain may still stall waiting for foreign rendering.
  3819. * This also means that wait-ioctl may report an object as busy,
  3820. * where busy-ioctl considers it idle.
  3821. *
  3822. * We trade the ability to warn of foreign fences to report on which
  3823. * i915 engines are active for the object.
  3824. *
  3825. * Alternatively, we can trade that extra information on read/write
  3826. * activity with
  3827. * args->busy =
  3828. * !reservation_object_test_signaled_rcu(obj->resv, true);
  3829. * to report the overall busyness. This is what the wait-ioctl does.
  3830. *
  3831. */
  3832. retry:
  3833. seq = raw_read_seqcount(&obj->resv->seq);
  3834. /* Translate the exclusive fence to the READ *and* WRITE engine */
  3835. args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
  3836. /* Translate shared fences to READ set of engines */
  3837. list = rcu_dereference(obj->resv->fence);
  3838. if (list) {
  3839. unsigned int shared_count = list->shared_count, i;
  3840. for (i = 0; i < shared_count; ++i) {
  3841. struct dma_fence *fence =
  3842. rcu_dereference(list->shared[i]);
  3843. args->busy |= busy_check_reader(fence);
  3844. }
  3845. }
  3846. if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
  3847. goto retry;
  3848. err = 0;
  3849. out:
  3850. rcu_read_unlock();
  3851. return err;
  3852. }
  3853. int
  3854. i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
  3855. struct drm_file *file_priv)
  3856. {
  3857. return i915_gem_ring_throttle(dev, file_priv);
  3858. }
  3859. int
  3860. i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
  3861. struct drm_file *file_priv)
  3862. {
  3863. struct drm_i915_private *dev_priv = to_i915(dev);
  3864. struct drm_i915_gem_madvise *args = data;
  3865. struct drm_i915_gem_object *obj;
  3866. int err;
  3867. switch (args->madv) {
  3868. case I915_MADV_DONTNEED:
  3869. case I915_MADV_WILLNEED:
  3870. break;
  3871. default:
  3872. return -EINVAL;
  3873. }
  3874. obj = i915_gem_object_lookup(file_priv, args->handle);
  3875. if (!obj)
  3876. return -ENOENT;
  3877. err = mutex_lock_interruptible(&obj->mm.lock);
  3878. if (err)
  3879. goto out;
  3880. if (i915_gem_object_has_pages(obj) &&
  3881. i915_gem_object_is_tiled(obj) &&
  3882. dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
  3883. if (obj->mm.madv == I915_MADV_WILLNEED) {
  3884. GEM_BUG_ON(!obj->mm.quirked);
  3885. __i915_gem_object_unpin_pages(obj);
  3886. obj->mm.quirked = false;
  3887. }
  3888. if (args->madv == I915_MADV_WILLNEED) {
  3889. GEM_BUG_ON(obj->mm.quirked);
  3890. __i915_gem_object_pin_pages(obj);
  3891. obj->mm.quirked = true;
  3892. }
  3893. }
  3894. if (obj->mm.madv != __I915_MADV_PURGED)
  3895. obj->mm.madv = args->madv;
  3896. /* if the object is no longer attached, discard its backing storage */
  3897. if (obj->mm.madv == I915_MADV_DONTNEED &&
  3898. !i915_gem_object_has_pages(obj))
  3899. i915_gem_object_truncate(obj);
  3900. args->retained = obj->mm.madv != __I915_MADV_PURGED;
  3901. mutex_unlock(&obj->mm.lock);
  3902. out:
  3903. i915_gem_object_put(obj);
  3904. return err;
  3905. }
  3906. static void
  3907. frontbuffer_retire(struct i915_gem_active *active, struct i915_request *request)
  3908. {
  3909. struct drm_i915_gem_object *obj =
  3910. container_of(active, typeof(*obj), frontbuffer_write);
  3911. intel_fb_obj_flush(obj, ORIGIN_CS);
  3912. }
  3913. void i915_gem_object_init(struct drm_i915_gem_object *obj,
  3914. const struct drm_i915_gem_object_ops *ops)
  3915. {
  3916. mutex_init(&obj->mm.lock);
  3917. INIT_LIST_HEAD(&obj->vma_list);
  3918. INIT_LIST_HEAD(&obj->lut_list);
  3919. INIT_LIST_HEAD(&obj->batch_pool_link);
  3920. obj->ops = ops;
  3921. reservation_object_init(&obj->__builtin_resv);
  3922. obj->resv = &obj->__builtin_resv;
  3923. obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
  3924. init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
  3925. obj->mm.madv = I915_MADV_WILLNEED;
  3926. INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
  3927. mutex_init(&obj->mm.get_page.lock);
  3928. i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
  3929. }
  3930. static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
  3931. .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
  3932. I915_GEM_OBJECT_IS_SHRINKABLE,
  3933. .get_pages = i915_gem_object_get_pages_gtt,
  3934. .put_pages = i915_gem_object_put_pages_gtt,
  3935. .pwrite = i915_gem_object_pwrite_gtt,
  3936. };
  3937. static int i915_gem_object_create_shmem(struct drm_device *dev,
  3938. struct drm_gem_object *obj,
  3939. size_t size)
  3940. {
  3941. struct drm_i915_private *i915 = to_i915(dev);
  3942. unsigned long flags = VM_NORESERVE;
  3943. struct file *filp;
  3944. drm_gem_private_object_init(dev, obj, size);
  3945. if (i915->mm.gemfs)
  3946. filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
  3947. flags);
  3948. else
  3949. filp = shmem_file_setup("i915", size, flags);
  3950. if (IS_ERR(filp))
  3951. return PTR_ERR(filp);
  3952. obj->filp = filp;
  3953. return 0;
  3954. }
  3955. struct drm_i915_gem_object *
  3956. i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
  3957. {
  3958. struct drm_i915_gem_object *obj;
  3959. struct address_space *mapping;
  3960. unsigned int cache_level;
  3961. gfp_t mask;
  3962. int ret;
  3963. /* There is a prevalence of the assumption that we fit the object's
  3964. * page count inside a 32bit _signed_ variable. Let's document this and
  3965. * catch if we ever need to fix it. In the meantime, if you do spot
  3966. * such a local variable, please consider fixing!
  3967. */
  3968. if (size >> PAGE_SHIFT > INT_MAX)
  3969. return ERR_PTR(-E2BIG);
  3970. if (overflows_type(size, obj->base.size))
  3971. return ERR_PTR(-E2BIG);
  3972. obj = i915_gem_object_alloc(dev_priv);
  3973. if (obj == NULL)
  3974. return ERR_PTR(-ENOMEM);
  3975. ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size);
  3976. if (ret)
  3977. goto fail;
  3978. mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
  3979. if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
  3980. /* 965gm cannot relocate objects above 4GiB. */
  3981. mask &= ~__GFP_HIGHMEM;
  3982. mask |= __GFP_DMA32;
  3983. }
  3984. mapping = obj->base.filp->f_mapping;
  3985. mapping_set_gfp_mask(mapping, mask);
  3986. GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
  3987. i915_gem_object_init(obj, &i915_gem_object_ops);
  3988. obj->write_domain = I915_GEM_DOMAIN_CPU;
  3989. obj->read_domains = I915_GEM_DOMAIN_CPU;
  3990. if (HAS_LLC(dev_priv))
  3991. /* On some devices, we can have the GPU use the LLC (the CPU
  3992. * cache) for about a 10% performance improvement
  3993. * compared to uncached. Graphics requests other than
  3994. * display scanout are coherent with the CPU in
  3995. * accessing this cache. This means in this mode we
  3996. * don't need to clflush on the CPU side, and on the
  3997. * GPU side we only need to flush internal caches to
  3998. * get data visible to the CPU.
  3999. *
  4000. * However, we maintain the display planes as UC, and so
  4001. * need to rebind when first used as such.
  4002. */
  4003. cache_level = I915_CACHE_LLC;
  4004. else
  4005. cache_level = I915_CACHE_NONE;
  4006. i915_gem_object_set_cache_coherency(obj, cache_level);
  4007. trace_i915_gem_object_create(obj);
  4008. return obj;
  4009. fail:
  4010. i915_gem_object_free(obj);
  4011. return ERR_PTR(ret);
  4012. }
  4013. static bool discard_backing_storage(struct drm_i915_gem_object *obj)
  4014. {
  4015. /* If we are the last user of the backing storage (be it shmemfs
  4016. * pages or stolen etc), we know that the pages are going to be
  4017. * immediately released. In this case, we can then skip copying
  4018. * back the contents from the GPU.
  4019. */
  4020. if (obj->mm.madv != I915_MADV_WILLNEED)
  4021. return false;
  4022. if (obj->base.filp == NULL)
  4023. return true;
  4024. /* At first glance, this looks racy, but then again so would be
  4025. * userspace racing mmap against close. However, the first external
  4026. * reference to the filp can only be obtained through the
  4027. * i915_gem_mmap_ioctl() which safeguards us against the user
  4028. * acquiring such a reference whilst we are in the middle of
  4029. * freeing the object.
  4030. */
  4031. return atomic_long_read(&obj->base.filp->f_count) == 1;
  4032. }
  4033. static void __i915_gem_free_objects(struct drm_i915_private *i915,
  4034. struct llist_node *freed)
  4035. {
  4036. struct drm_i915_gem_object *obj, *on;
  4037. intel_runtime_pm_get(i915);
  4038. llist_for_each_entry_safe(obj, on, freed, freed) {
  4039. struct i915_vma *vma, *vn;
  4040. trace_i915_gem_object_destroy(obj);
  4041. mutex_lock(&i915->drm.struct_mutex);
  4042. GEM_BUG_ON(i915_gem_object_is_active(obj));
  4043. list_for_each_entry_safe(vma, vn,
  4044. &obj->vma_list, obj_link) {
  4045. GEM_BUG_ON(i915_vma_is_active(vma));
  4046. vma->flags &= ~I915_VMA_PIN_MASK;
  4047. i915_vma_destroy(vma);
  4048. }
  4049. GEM_BUG_ON(!list_empty(&obj->vma_list));
  4050. GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
  4051. /* This serializes freeing with the shrinker. Since the free
  4052. * is delayed, first by RCU then by the workqueue, we want the
  4053. * shrinker to be able to free pages of unreferenced objects,
  4054. * or else we may oom whilst there are plenty of deferred
  4055. * freed objects.
  4056. */
  4057. if (i915_gem_object_has_pages(obj)) {
  4058. spin_lock(&i915->mm.obj_lock);
  4059. list_del_init(&obj->mm.link);
  4060. spin_unlock(&i915->mm.obj_lock);
  4061. }
  4062. mutex_unlock(&i915->drm.struct_mutex);
  4063. GEM_BUG_ON(obj->bind_count);
  4064. GEM_BUG_ON(obj->userfault_count);
  4065. GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
  4066. GEM_BUG_ON(!list_empty(&obj->lut_list));
  4067. if (obj->ops->release)
  4068. obj->ops->release(obj);
  4069. if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
  4070. atomic_set(&obj->mm.pages_pin_count, 0);
  4071. __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
  4072. GEM_BUG_ON(i915_gem_object_has_pages(obj));
  4073. if (obj->base.import_attach)
  4074. drm_prime_gem_destroy(&obj->base, NULL);
  4075. reservation_object_fini(&obj->__builtin_resv);
  4076. drm_gem_object_release(&obj->base);
  4077. i915_gem_info_remove_obj(i915, obj->base.size);
  4078. kfree(obj->bit_17);
  4079. i915_gem_object_free(obj);
  4080. GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
  4081. atomic_dec(&i915->mm.free_count);
  4082. if (on)
  4083. cond_resched();
  4084. }
  4085. intel_runtime_pm_put(i915);
  4086. }
  4087. static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
  4088. {
  4089. struct llist_node *freed;
  4090. /* Free the oldest, most stale object to keep the free_list short */
  4091. freed = NULL;
  4092. if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
  4093. /* Only one consumer of llist_del_first() allowed */
  4094. spin_lock(&i915->mm.free_lock);
  4095. freed = llist_del_first(&i915->mm.free_list);
  4096. spin_unlock(&i915->mm.free_lock);
  4097. }
  4098. if (unlikely(freed)) {
  4099. freed->next = NULL;
  4100. __i915_gem_free_objects(i915, freed);
  4101. }
  4102. }
  4103. static void __i915_gem_free_work(struct work_struct *work)
  4104. {
  4105. struct drm_i915_private *i915 =
  4106. container_of(work, struct drm_i915_private, mm.free_work);
  4107. struct llist_node *freed;
  4108. /*
  4109. * All file-owned VMA should have been released by this point through
  4110. * i915_gem_close_object(), or earlier by i915_gem_context_close().
  4111. * However, the object may also be bound into the global GTT (e.g.
  4112. * older GPUs without per-process support, or for direct access through
  4113. * the GTT either for the user or for scanout). Those VMA still need to
  4114. * unbound now.
  4115. */
  4116. spin_lock(&i915->mm.free_lock);
  4117. while ((freed = llist_del_all(&i915->mm.free_list))) {
  4118. spin_unlock(&i915->mm.free_lock);
  4119. __i915_gem_free_objects(i915, freed);
  4120. if (need_resched())
  4121. return;
  4122. spin_lock(&i915->mm.free_lock);
  4123. }
  4124. spin_unlock(&i915->mm.free_lock);
  4125. }
  4126. static void __i915_gem_free_object_rcu(struct rcu_head *head)
  4127. {
  4128. struct drm_i915_gem_object *obj =
  4129. container_of(head, typeof(*obj), rcu);
  4130. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  4131. /*
  4132. * Since we require blocking on struct_mutex to unbind the freed
  4133. * object from the GPU before releasing resources back to the
  4134. * system, we can not do that directly from the RCU callback (which may
  4135. * be a softirq context), but must instead then defer that work onto a
  4136. * kthread. We use the RCU callback rather than move the freed object
  4137. * directly onto the work queue so that we can mix between using the
  4138. * worker and performing frees directly from subsequent allocations for
  4139. * crude but effective memory throttling.
  4140. */
  4141. if (llist_add(&obj->freed, &i915->mm.free_list))
  4142. queue_work(i915->wq, &i915->mm.free_work);
  4143. }
  4144. void i915_gem_free_object(struct drm_gem_object *gem_obj)
  4145. {
  4146. struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
  4147. if (obj->mm.quirked)
  4148. __i915_gem_object_unpin_pages(obj);
  4149. if (discard_backing_storage(obj))
  4150. obj->mm.madv = I915_MADV_DONTNEED;
  4151. /*
  4152. * Before we free the object, make sure any pure RCU-only
  4153. * read-side critical sections are complete, e.g.
  4154. * i915_gem_busy_ioctl(). For the corresponding synchronized
  4155. * lookup see i915_gem_object_lookup_rcu().
  4156. */
  4157. atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
  4158. call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
  4159. }
  4160. void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
  4161. {
  4162. lockdep_assert_held(&obj->base.dev->struct_mutex);
  4163. if (!i915_gem_object_has_active_reference(obj) &&
  4164. i915_gem_object_is_active(obj))
  4165. i915_gem_object_set_active_reference(obj);
  4166. else
  4167. i915_gem_object_put(obj);
  4168. }
  4169. static void assert_kernel_context_is_current(struct drm_i915_private *i915)
  4170. {
  4171. struct i915_gem_context *kernel_context = i915->kernel_context;
  4172. struct intel_engine_cs *engine;
  4173. enum intel_engine_id id;
  4174. for_each_engine(engine, i915, id) {
  4175. GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
  4176. GEM_BUG_ON(engine->last_retired_context != kernel_context);
  4177. }
  4178. }
  4179. void i915_gem_sanitize(struct drm_i915_private *i915)
  4180. {
  4181. if (i915_terminally_wedged(&i915->gpu_error)) {
  4182. mutex_lock(&i915->drm.struct_mutex);
  4183. i915_gem_unset_wedged(i915);
  4184. mutex_unlock(&i915->drm.struct_mutex);
  4185. }
  4186. /*
  4187. * If we inherit context state from the BIOS or earlier occupants
  4188. * of the GPU, the GPU may be in an inconsistent state when we
  4189. * try to take over. The only way to remove the earlier state
  4190. * is by resetting. However, resetting on earlier gen is tricky as
  4191. * it may impact the display and we are uncertain about the stability
  4192. * of the reset, so this could be applied to even earlier gen.
  4193. */
  4194. if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
  4195. WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
  4196. }
  4197. int i915_gem_suspend(struct drm_i915_private *dev_priv)
  4198. {
  4199. struct drm_device *dev = &dev_priv->drm;
  4200. int ret;
  4201. intel_runtime_pm_get(dev_priv);
  4202. intel_suspend_gt_powersave(dev_priv);
  4203. mutex_lock(&dev->struct_mutex);
  4204. /* We have to flush all the executing contexts to main memory so
  4205. * that they can saved in the hibernation image. To ensure the last
  4206. * context image is coherent, we have to switch away from it. That
  4207. * leaves the dev_priv->kernel_context still active when
  4208. * we actually suspend, and its image in memory may not match the GPU
  4209. * state. Fortunately, the kernel_context is disposable and we do
  4210. * not rely on its state.
  4211. */
  4212. if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
  4213. ret = i915_gem_switch_to_kernel_context(dev_priv);
  4214. if (ret)
  4215. goto err_unlock;
  4216. ret = i915_gem_wait_for_idle(dev_priv,
  4217. I915_WAIT_INTERRUPTIBLE |
  4218. I915_WAIT_LOCKED);
  4219. if (ret && ret != -EIO)
  4220. goto err_unlock;
  4221. assert_kernel_context_is_current(dev_priv);
  4222. }
  4223. i915_gem_contexts_lost(dev_priv);
  4224. mutex_unlock(&dev->struct_mutex);
  4225. intel_uc_suspend(dev_priv);
  4226. cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
  4227. cancel_delayed_work_sync(&dev_priv->gt.retire_work);
  4228. /* As the idle_work is rearming if it detects a race, play safe and
  4229. * repeat the flush until it is definitely idle.
  4230. */
  4231. drain_delayed_work(&dev_priv->gt.idle_work);
  4232. /* Assert that we sucessfully flushed all the work and
  4233. * reset the GPU back to its idle, low power state.
  4234. */
  4235. WARN_ON(dev_priv->gt.awake);
  4236. if (WARN_ON(!intel_engines_are_idle(dev_priv)))
  4237. i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
  4238. /*
  4239. * Neither the BIOS, ourselves or any other kernel
  4240. * expects the system to be in execlists mode on startup,
  4241. * so we need to reset the GPU back to legacy mode. And the only
  4242. * known way to disable logical contexts is through a GPU reset.
  4243. *
  4244. * So in order to leave the system in a known default configuration,
  4245. * always reset the GPU upon unload and suspend. Afterwards we then
  4246. * clean up the GEM state tracking, flushing off the requests and
  4247. * leaving the system in a known idle state.
  4248. *
  4249. * Note that is of the upmost importance that the GPU is idle and
  4250. * all stray writes are flushed *before* we dismantle the backing
  4251. * storage for the pinned objects.
  4252. *
  4253. * However, since we are uncertain that resetting the GPU on older
  4254. * machines is a good idea, we don't - just in case it leaves the
  4255. * machine in an unusable condition.
  4256. */
  4257. intel_uc_sanitize(dev_priv);
  4258. i915_gem_sanitize(dev_priv);
  4259. intel_runtime_pm_put(dev_priv);
  4260. return 0;
  4261. err_unlock:
  4262. mutex_unlock(&dev->struct_mutex);
  4263. intel_runtime_pm_put(dev_priv);
  4264. return ret;
  4265. }
  4266. void i915_gem_resume(struct drm_i915_private *i915)
  4267. {
  4268. WARN_ON(i915->gt.awake);
  4269. mutex_lock(&i915->drm.struct_mutex);
  4270. intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
  4271. i915_gem_restore_gtt_mappings(i915);
  4272. i915_gem_restore_fences(i915);
  4273. /*
  4274. * As we didn't flush the kernel context before suspend, we cannot
  4275. * guarantee that the context image is complete. So let's just reset
  4276. * it and start again.
  4277. */
  4278. i915->gt.resume(i915);
  4279. if (i915_gem_init_hw(i915))
  4280. goto err_wedged;
  4281. intel_uc_resume(i915);
  4282. /* Always reload a context for powersaving. */
  4283. if (i915_gem_switch_to_kernel_context(i915))
  4284. goto err_wedged;
  4285. out_unlock:
  4286. intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
  4287. mutex_unlock(&i915->drm.struct_mutex);
  4288. return;
  4289. err_wedged:
  4290. if (!i915_terminally_wedged(&i915->gpu_error)) {
  4291. DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
  4292. i915_gem_set_wedged(i915);
  4293. }
  4294. goto out_unlock;
  4295. }
  4296. void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
  4297. {
  4298. if (INTEL_GEN(dev_priv) < 5 ||
  4299. dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
  4300. return;
  4301. I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
  4302. DISP_TILE_SURFACE_SWIZZLING);
  4303. if (IS_GEN5(dev_priv))
  4304. return;
  4305. I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
  4306. if (IS_GEN6(dev_priv))
  4307. I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
  4308. else if (IS_GEN7(dev_priv))
  4309. I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
  4310. else if (IS_GEN8(dev_priv))
  4311. I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
  4312. else
  4313. BUG();
  4314. }
  4315. static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
  4316. {
  4317. I915_WRITE(RING_CTL(base), 0);
  4318. I915_WRITE(RING_HEAD(base), 0);
  4319. I915_WRITE(RING_TAIL(base), 0);
  4320. I915_WRITE(RING_START(base), 0);
  4321. }
  4322. static void init_unused_rings(struct drm_i915_private *dev_priv)
  4323. {
  4324. if (IS_I830(dev_priv)) {
  4325. init_unused_ring(dev_priv, PRB1_BASE);
  4326. init_unused_ring(dev_priv, SRB0_BASE);
  4327. init_unused_ring(dev_priv, SRB1_BASE);
  4328. init_unused_ring(dev_priv, SRB2_BASE);
  4329. init_unused_ring(dev_priv, SRB3_BASE);
  4330. } else if (IS_GEN2(dev_priv)) {
  4331. init_unused_ring(dev_priv, SRB0_BASE);
  4332. init_unused_ring(dev_priv, SRB1_BASE);
  4333. } else if (IS_GEN3(dev_priv)) {
  4334. init_unused_ring(dev_priv, PRB1_BASE);
  4335. init_unused_ring(dev_priv, PRB2_BASE);
  4336. }
  4337. }
  4338. static int __i915_gem_restart_engines(void *data)
  4339. {
  4340. struct drm_i915_private *i915 = data;
  4341. struct intel_engine_cs *engine;
  4342. enum intel_engine_id id;
  4343. int err;
  4344. for_each_engine(engine, i915, id) {
  4345. err = engine->init_hw(engine);
  4346. if (err) {
  4347. DRM_ERROR("Failed to restart %s (%d)\n",
  4348. engine->name, err);
  4349. return err;
  4350. }
  4351. }
  4352. return 0;
  4353. }
  4354. int i915_gem_init_hw(struct drm_i915_private *dev_priv)
  4355. {
  4356. int ret;
  4357. dev_priv->gt.last_init_time = ktime_get();
  4358. /* Double layer security blanket, see i915_gem_init() */
  4359. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  4360. if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
  4361. I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
  4362. if (IS_HASWELL(dev_priv))
  4363. I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
  4364. LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
  4365. if (HAS_PCH_NOP(dev_priv)) {
  4366. if (IS_IVYBRIDGE(dev_priv)) {
  4367. u32 temp = I915_READ(GEN7_MSG_CTL);
  4368. temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
  4369. I915_WRITE(GEN7_MSG_CTL, temp);
  4370. } else if (INTEL_GEN(dev_priv) >= 7) {
  4371. u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
  4372. temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
  4373. I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
  4374. }
  4375. }
  4376. intel_gt_workarounds_apply(dev_priv);
  4377. i915_gem_init_swizzling(dev_priv);
  4378. /*
  4379. * At least 830 can leave some of the unused rings
  4380. * "active" (ie. head != tail) after resume which
  4381. * will prevent c3 entry. Makes sure all unused rings
  4382. * are totally idle.
  4383. */
  4384. init_unused_rings(dev_priv);
  4385. BUG_ON(!dev_priv->kernel_context);
  4386. if (i915_terminally_wedged(&dev_priv->gpu_error)) {
  4387. ret = -EIO;
  4388. goto out;
  4389. }
  4390. ret = i915_ppgtt_init_hw(dev_priv);
  4391. if (ret) {
  4392. DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
  4393. goto out;
  4394. }
  4395. ret = intel_wopcm_init_hw(&dev_priv->wopcm);
  4396. if (ret) {
  4397. DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
  4398. goto out;
  4399. }
  4400. /* We can't enable contexts until all firmware is loaded */
  4401. ret = intel_uc_init_hw(dev_priv);
  4402. if (ret) {
  4403. DRM_ERROR("Enabling uc failed (%d)\n", ret);
  4404. goto out;
  4405. }
  4406. intel_mocs_init_l3cc_table(dev_priv);
  4407. /* Only when the HW is re-initialised, can we replay the requests */
  4408. ret = __i915_gem_restart_engines(dev_priv);
  4409. out:
  4410. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  4411. return ret;
  4412. }
  4413. static int __intel_engines_record_defaults(struct drm_i915_private *i915)
  4414. {
  4415. struct i915_gem_context *ctx;
  4416. struct intel_engine_cs *engine;
  4417. enum intel_engine_id id;
  4418. int err;
  4419. /*
  4420. * As we reset the gpu during very early sanitisation, the current
  4421. * register state on the GPU should reflect its defaults values.
  4422. * We load a context onto the hw (with restore-inhibit), then switch
  4423. * over to a second context to save that default register state. We
  4424. * can then prime every new context with that state so they all start
  4425. * from the same default HW values.
  4426. */
  4427. ctx = i915_gem_context_create_kernel(i915, 0);
  4428. if (IS_ERR(ctx))
  4429. return PTR_ERR(ctx);
  4430. for_each_engine(engine, i915, id) {
  4431. struct i915_request *rq;
  4432. rq = i915_request_alloc(engine, ctx);
  4433. if (IS_ERR(rq)) {
  4434. err = PTR_ERR(rq);
  4435. goto out_ctx;
  4436. }
  4437. err = 0;
  4438. if (engine->init_context)
  4439. err = engine->init_context(rq);
  4440. __i915_request_add(rq, true);
  4441. if (err)
  4442. goto err_active;
  4443. }
  4444. err = i915_gem_switch_to_kernel_context(i915);
  4445. if (err)
  4446. goto err_active;
  4447. err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
  4448. if (err)
  4449. goto err_active;
  4450. assert_kernel_context_is_current(i915);
  4451. for_each_engine(engine, i915, id) {
  4452. struct i915_vma *state;
  4453. state = to_intel_context(ctx, engine)->state;
  4454. if (!state)
  4455. continue;
  4456. /*
  4457. * As we will hold a reference to the logical state, it will
  4458. * not be torn down with the context, and importantly the
  4459. * object will hold onto its vma (making it possible for a
  4460. * stray GTT write to corrupt our defaults). Unmap the vma
  4461. * from the GTT to prevent such accidents and reclaim the
  4462. * space.
  4463. */
  4464. err = i915_vma_unbind(state);
  4465. if (err)
  4466. goto err_active;
  4467. err = i915_gem_object_set_to_cpu_domain(state->obj, false);
  4468. if (err)
  4469. goto err_active;
  4470. engine->default_state = i915_gem_object_get(state->obj);
  4471. }
  4472. if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
  4473. unsigned int found = intel_engines_has_context_isolation(i915);
  4474. /*
  4475. * Make sure that classes with multiple engine instances all
  4476. * share the same basic configuration.
  4477. */
  4478. for_each_engine(engine, i915, id) {
  4479. unsigned int bit = BIT(engine->uabi_class);
  4480. unsigned int expected = engine->default_state ? bit : 0;
  4481. if ((found & bit) != expected) {
  4482. DRM_ERROR("mismatching default context state for class %d on engine %s\n",
  4483. engine->uabi_class, engine->name);
  4484. }
  4485. }
  4486. }
  4487. out_ctx:
  4488. i915_gem_context_set_closed(ctx);
  4489. i915_gem_context_put(ctx);
  4490. return err;
  4491. err_active:
  4492. /*
  4493. * If we have to abandon now, we expect the engines to be idle
  4494. * and ready to be torn-down. First try to flush any remaining
  4495. * request, ensure we are pointing at the kernel context and
  4496. * then remove it.
  4497. */
  4498. if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
  4499. goto out_ctx;
  4500. if (WARN_ON(i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED)))
  4501. goto out_ctx;
  4502. i915_gem_contexts_lost(i915);
  4503. goto out_ctx;
  4504. }
  4505. int i915_gem_init(struct drm_i915_private *dev_priv)
  4506. {
  4507. int ret;
  4508. /*
  4509. * We need to fallback to 4K pages since gvt gtt handling doesn't
  4510. * support huge page entries - we will need to check either hypervisor
  4511. * mm can support huge guest page or just do emulation in gvt.
  4512. */
  4513. if (intel_vgpu_active(dev_priv))
  4514. mkwrite_device_info(dev_priv)->page_sizes =
  4515. I915_GTT_PAGE_SIZE_4K;
  4516. dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
  4517. if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
  4518. dev_priv->gt.resume = intel_lr_context_resume;
  4519. dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
  4520. } else {
  4521. dev_priv->gt.resume = intel_legacy_submission_resume;
  4522. dev_priv->gt.cleanup_engine = intel_engine_cleanup;
  4523. }
  4524. ret = i915_gem_init_userptr(dev_priv);
  4525. if (ret)
  4526. return ret;
  4527. ret = intel_wopcm_init(&dev_priv->wopcm);
  4528. if (ret)
  4529. return ret;
  4530. ret = intel_uc_init_misc(dev_priv);
  4531. if (ret)
  4532. return ret;
  4533. /* This is just a security blanket to placate dragons.
  4534. * On some systems, we very sporadically observe that the first TLBs
  4535. * used by the CS may be stale, despite us poking the TLB reset. If
  4536. * we hold the forcewake during initialisation these problems
  4537. * just magically go away.
  4538. */
  4539. mutex_lock(&dev_priv->drm.struct_mutex);
  4540. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  4541. ret = i915_gem_init_ggtt(dev_priv);
  4542. if (ret) {
  4543. GEM_BUG_ON(ret == -EIO);
  4544. goto err_unlock;
  4545. }
  4546. ret = i915_gem_contexts_init(dev_priv);
  4547. if (ret) {
  4548. GEM_BUG_ON(ret == -EIO);
  4549. goto err_ggtt;
  4550. }
  4551. ret = intel_engines_init(dev_priv);
  4552. if (ret) {
  4553. GEM_BUG_ON(ret == -EIO);
  4554. goto err_context;
  4555. }
  4556. intel_init_gt_powersave(dev_priv);
  4557. ret = intel_uc_init(dev_priv);
  4558. if (ret)
  4559. goto err_pm;
  4560. ret = i915_gem_init_hw(dev_priv);
  4561. if (ret)
  4562. goto err_uc_init;
  4563. /*
  4564. * Despite its name intel_init_clock_gating applies both display
  4565. * clock gating workarounds; GT mmio workarounds and the occasional
  4566. * GT power context workaround. Worse, sometimes it includes a context
  4567. * register workaround which we need to apply before we record the
  4568. * default HW state for all contexts.
  4569. *
  4570. * FIXME: break up the workarounds and apply them at the right time!
  4571. */
  4572. intel_init_clock_gating(dev_priv);
  4573. ret = __intel_engines_record_defaults(dev_priv);
  4574. if (ret)
  4575. goto err_init_hw;
  4576. if (i915_inject_load_failure()) {
  4577. ret = -ENODEV;
  4578. goto err_init_hw;
  4579. }
  4580. if (i915_inject_load_failure()) {
  4581. ret = -EIO;
  4582. goto err_init_hw;
  4583. }
  4584. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  4585. mutex_unlock(&dev_priv->drm.struct_mutex);
  4586. return 0;
  4587. /*
  4588. * Unwinding is complicated by that we want to handle -EIO to mean
  4589. * disable GPU submission but keep KMS alive. We want to mark the
  4590. * HW as irrevisibly wedged, but keep enough state around that the
  4591. * driver doesn't explode during runtime.
  4592. */
  4593. err_init_hw:
  4594. i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
  4595. i915_gem_contexts_lost(dev_priv);
  4596. intel_uc_fini_hw(dev_priv);
  4597. err_uc_init:
  4598. intel_uc_fini(dev_priv);
  4599. err_pm:
  4600. if (ret != -EIO) {
  4601. intel_cleanup_gt_powersave(dev_priv);
  4602. i915_gem_cleanup_engines(dev_priv);
  4603. }
  4604. err_context:
  4605. if (ret != -EIO)
  4606. i915_gem_contexts_fini(dev_priv);
  4607. err_ggtt:
  4608. err_unlock:
  4609. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  4610. mutex_unlock(&dev_priv->drm.struct_mutex);
  4611. intel_uc_fini_misc(dev_priv);
  4612. if (ret != -EIO)
  4613. i915_gem_cleanup_userptr(dev_priv);
  4614. if (ret == -EIO) {
  4615. /*
  4616. * Allow engine initialisation to fail by marking the GPU as
  4617. * wedged. But we only want to do this where the GPU is angry,
  4618. * for all other failure, such as an allocation failure, bail.
  4619. */
  4620. if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
  4621. DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
  4622. i915_gem_set_wedged(dev_priv);
  4623. }
  4624. ret = 0;
  4625. }
  4626. i915_gem_drain_freed_objects(dev_priv);
  4627. return ret;
  4628. }
  4629. void i915_gem_init_mmio(struct drm_i915_private *i915)
  4630. {
  4631. i915_gem_sanitize(i915);
  4632. }
  4633. void
  4634. i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
  4635. {
  4636. struct intel_engine_cs *engine;
  4637. enum intel_engine_id id;
  4638. for_each_engine(engine, dev_priv, id)
  4639. dev_priv->gt.cleanup_engine(engine);
  4640. }
  4641. void
  4642. i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
  4643. {
  4644. int i;
  4645. if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
  4646. !IS_CHERRYVIEW(dev_priv))
  4647. dev_priv->num_fence_regs = 32;
  4648. else if (INTEL_GEN(dev_priv) >= 4 ||
  4649. IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
  4650. IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
  4651. dev_priv->num_fence_regs = 16;
  4652. else
  4653. dev_priv->num_fence_regs = 8;
  4654. if (intel_vgpu_active(dev_priv))
  4655. dev_priv->num_fence_regs =
  4656. I915_READ(vgtif_reg(avail_rs.fence_num));
  4657. /* Initialize fence registers to zero */
  4658. for (i = 0; i < dev_priv->num_fence_regs; i++) {
  4659. struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
  4660. fence->i915 = dev_priv;
  4661. fence->id = i;
  4662. list_add_tail(&fence->link, &dev_priv->mm.fence_list);
  4663. }
  4664. i915_gem_restore_fences(dev_priv);
  4665. i915_gem_detect_bit_6_swizzle(dev_priv);
  4666. }
  4667. static void i915_gem_init__mm(struct drm_i915_private *i915)
  4668. {
  4669. spin_lock_init(&i915->mm.object_stat_lock);
  4670. spin_lock_init(&i915->mm.obj_lock);
  4671. spin_lock_init(&i915->mm.free_lock);
  4672. init_llist_head(&i915->mm.free_list);
  4673. INIT_LIST_HEAD(&i915->mm.unbound_list);
  4674. INIT_LIST_HEAD(&i915->mm.bound_list);
  4675. INIT_LIST_HEAD(&i915->mm.fence_list);
  4676. INIT_LIST_HEAD(&i915->mm.userfault_list);
  4677. INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
  4678. }
  4679. int i915_gem_init_early(struct drm_i915_private *dev_priv)
  4680. {
  4681. int err = -ENOMEM;
  4682. dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
  4683. if (!dev_priv->objects)
  4684. goto err_out;
  4685. dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
  4686. if (!dev_priv->vmas)
  4687. goto err_objects;
  4688. dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
  4689. if (!dev_priv->luts)
  4690. goto err_vmas;
  4691. dev_priv->requests = KMEM_CACHE(i915_request,
  4692. SLAB_HWCACHE_ALIGN |
  4693. SLAB_RECLAIM_ACCOUNT |
  4694. SLAB_TYPESAFE_BY_RCU);
  4695. if (!dev_priv->requests)
  4696. goto err_luts;
  4697. dev_priv->dependencies = KMEM_CACHE(i915_dependency,
  4698. SLAB_HWCACHE_ALIGN |
  4699. SLAB_RECLAIM_ACCOUNT);
  4700. if (!dev_priv->dependencies)
  4701. goto err_requests;
  4702. dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
  4703. if (!dev_priv->priorities)
  4704. goto err_dependencies;
  4705. INIT_LIST_HEAD(&dev_priv->gt.timelines);
  4706. INIT_LIST_HEAD(&dev_priv->gt.active_rings);
  4707. INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
  4708. i915_gem_init__mm(dev_priv);
  4709. INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
  4710. i915_gem_retire_work_handler);
  4711. INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
  4712. i915_gem_idle_work_handler);
  4713. init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
  4714. init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
  4715. atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
  4716. spin_lock_init(&dev_priv->fb_tracking.lock);
  4717. err = i915_gemfs_init(dev_priv);
  4718. if (err)
  4719. DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
  4720. return 0;
  4721. err_dependencies:
  4722. kmem_cache_destroy(dev_priv->dependencies);
  4723. err_requests:
  4724. kmem_cache_destroy(dev_priv->requests);
  4725. err_luts:
  4726. kmem_cache_destroy(dev_priv->luts);
  4727. err_vmas:
  4728. kmem_cache_destroy(dev_priv->vmas);
  4729. err_objects:
  4730. kmem_cache_destroy(dev_priv->objects);
  4731. err_out:
  4732. return err;
  4733. }
  4734. void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
  4735. {
  4736. i915_gem_drain_freed_objects(dev_priv);
  4737. GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
  4738. GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
  4739. WARN_ON(dev_priv->mm.object_count);
  4740. WARN_ON(!list_empty(&dev_priv->gt.timelines));
  4741. kmem_cache_destroy(dev_priv->priorities);
  4742. kmem_cache_destroy(dev_priv->dependencies);
  4743. kmem_cache_destroy(dev_priv->requests);
  4744. kmem_cache_destroy(dev_priv->luts);
  4745. kmem_cache_destroy(dev_priv->vmas);
  4746. kmem_cache_destroy(dev_priv->objects);
  4747. /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
  4748. rcu_barrier();
  4749. i915_gemfs_fini(dev_priv);
  4750. }
  4751. int i915_gem_freeze(struct drm_i915_private *dev_priv)
  4752. {
  4753. /* Discard all purgeable objects, let userspace recover those as
  4754. * required after resuming.
  4755. */
  4756. i915_gem_shrink_all(dev_priv);
  4757. return 0;
  4758. }
  4759. int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
  4760. {
  4761. struct drm_i915_gem_object *obj;
  4762. struct list_head *phases[] = {
  4763. &dev_priv->mm.unbound_list,
  4764. &dev_priv->mm.bound_list,
  4765. NULL
  4766. }, **p;
  4767. /* Called just before we write the hibernation image.
  4768. *
  4769. * We need to update the domain tracking to reflect that the CPU
  4770. * will be accessing all the pages to create and restore from the
  4771. * hibernation, and so upon restoration those pages will be in the
  4772. * CPU domain.
  4773. *
  4774. * To make sure the hibernation image contains the latest state,
  4775. * we update that state just before writing out the image.
  4776. *
  4777. * To try and reduce the hibernation image, we manually shrink
  4778. * the objects as well, see i915_gem_freeze()
  4779. */
  4780. i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
  4781. i915_gem_drain_freed_objects(dev_priv);
  4782. spin_lock(&dev_priv->mm.obj_lock);
  4783. for (p = phases; *p; p++) {
  4784. list_for_each_entry(obj, *p, mm.link)
  4785. __start_cpu_write(obj);
  4786. }
  4787. spin_unlock(&dev_priv->mm.obj_lock);
  4788. return 0;
  4789. }
  4790. void i915_gem_release(struct drm_device *dev, struct drm_file *file)
  4791. {
  4792. struct drm_i915_file_private *file_priv = file->driver_priv;
  4793. struct i915_request *request;
  4794. /* Clean up our request list when the client is going away, so that
  4795. * later retire_requests won't dereference our soon-to-be-gone
  4796. * file_priv.
  4797. */
  4798. spin_lock(&file_priv->mm.lock);
  4799. list_for_each_entry(request, &file_priv->mm.request_list, client_link)
  4800. request->file_priv = NULL;
  4801. spin_unlock(&file_priv->mm.lock);
  4802. }
  4803. int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
  4804. {
  4805. struct drm_i915_file_private *file_priv;
  4806. int ret;
  4807. DRM_DEBUG("\n");
  4808. file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
  4809. if (!file_priv)
  4810. return -ENOMEM;
  4811. file->driver_priv = file_priv;
  4812. file_priv->dev_priv = i915;
  4813. file_priv->file = file;
  4814. spin_lock_init(&file_priv->mm.lock);
  4815. INIT_LIST_HEAD(&file_priv->mm.request_list);
  4816. file_priv->bsd_engine = -1;
  4817. file_priv->hang_timestamp = jiffies;
  4818. ret = i915_gem_context_open(i915, file);
  4819. if (ret)
  4820. kfree(file_priv);
  4821. return ret;
  4822. }
  4823. /**
  4824. * i915_gem_track_fb - update frontbuffer tracking
  4825. * @old: current GEM buffer for the frontbuffer slots
  4826. * @new: new GEM buffer for the frontbuffer slots
  4827. * @frontbuffer_bits: bitmask of frontbuffer slots
  4828. *
  4829. * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
  4830. * from @old and setting them in @new. Both @old and @new can be NULL.
  4831. */
  4832. void i915_gem_track_fb(struct drm_i915_gem_object *old,
  4833. struct drm_i915_gem_object *new,
  4834. unsigned frontbuffer_bits)
  4835. {
  4836. /* Control of individual bits within the mask are guarded by
  4837. * the owning plane->mutex, i.e. we can never see concurrent
  4838. * manipulation of individual bits. But since the bitfield as a whole
  4839. * is updated using RMW, we need to use atomics in order to update
  4840. * the bits.
  4841. */
  4842. BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
  4843. sizeof(atomic_t) * BITS_PER_BYTE);
  4844. if (old) {
  4845. WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
  4846. atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
  4847. }
  4848. if (new) {
  4849. WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
  4850. atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
  4851. }
  4852. }
  4853. /* Allocate a new GEM object and fill it with the supplied data */
  4854. struct drm_i915_gem_object *
  4855. i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
  4856. const void *data, size_t size)
  4857. {
  4858. struct drm_i915_gem_object *obj;
  4859. struct file *file;
  4860. size_t offset;
  4861. int err;
  4862. obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
  4863. if (IS_ERR(obj))
  4864. return obj;
  4865. GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
  4866. file = obj->base.filp;
  4867. offset = 0;
  4868. do {
  4869. unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
  4870. struct page *page;
  4871. void *pgdata, *vaddr;
  4872. err = pagecache_write_begin(file, file->f_mapping,
  4873. offset, len, 0,
  4874. &page, &pgdata);
  4875. if (err < 0)
  4876. goto fail;
  4877. vaddr = kmap(page);
  4878. memcpy(vaddr, data, len);
  4879. kunmap(page);
  4880. err = pagecache_write_end(file, file->f_mapping,
  4881. offset, len, len,
  4882. page, pgdata);
  4883. if (err < 0)
  4884. goto fail;
  4885. size -= len;
  4886. data += len;
  4887. offset += len;
  4888. } while (size);
  4889. return obj;
  4890. fail:
  4891. i915_gem_object_put(obj);
  4892. return ERR_PTR(err);
  4893. }
  4894. struct scatterlist *
  4895. i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
  4896. unsigned int n,
  4897. unsigned int *offset)
  4898. {
  4899. struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
  4900. struct scatterlist *sg;
  4901. unsigned int idx, count;
  4902. might_sleep();
  4903. GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
  4904. GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
  4905. /* As we iterate forward through the sg, we record each entry in a
  4906. * radixtree for quick repeated (backwards) lookups. If we have seen
  4907. * this index previously, we will have an entry for it.
  4908. *
  4909. * Initial lookup is O(N), but this is amortized to O(1) for
  4910. * sequential page access (where each new request is consecutive
  4911. * to the previous one). Repeated lookups are O(lg(obj->base.size)),
  4912. * i.e. O(1) with a large constant!
  4913. */
  4914. if (n < READ_ONCE(iter->sg_idx))
  4915. goto lookup;
  4916. mutex_lock(&iter->lock);
  4917. /* We prefer to reuse the last sg so that repeated lookup of this
  4918. * (or the subsequent) sg are fast - comparing against the last
  4919. * sg is faster than going through the radixtree.
  4920. */
  4921. sg = iter->sg_pos;
  4922. idx = iter->sg_idx;
  4923. count = __sg_page_count(sg);
  4924. while (idx + count <= n) {
  4925. unsigned long exception, i;
  4926. int ret;
  4927. /* If we cannot allocate and insert this entry, or the
  4928. * individual pages from this range, cancel updating the
  4929. * sg_idx so that on this lookup we are forced to linearly
  4930. * scan onwards, but on future lookups we will try the
  4931. * insertion again (in which case we need to be careful of
  4932. * the error return reporting that we have already inserted
  4933. * this index).
  4934. */
  4935. ret = radix_tree_insert(&iter->radix, idx, sg);
  4936. if (ret && ret != -EEXIST)
  4937. goto scan;
  4938. exception =
  4939. RADIX_TREE_EXCEPTIONAL_ENTRY |
  4940. idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
  4941. for (i = 1; i < count; i++) {
  4942. ret = radix_tree_insert(&iter->radix, idx + i,
  4943. (void *)exception);
  4944. if (ret && ret != -EEXIST)
  4945. goto scan;
  4946. }
  4947. idx += count;
  4948. sg = ____sg_next(sg);
  4949. count = __sg_page_count(sg);
  4950. }
  4951. scan:
  4952. iter->sg_pos = sg;
  4953. iter->sg_idx = idx;
  4954. mutex_unlock(&iter->lock);
  4955. if (unlikely(n < idx)) /* insertion completed by another thread */
  4956. goto lookup;
  4957. /* In case we failed to insert the entry into the radixtree, we need
  4958. * to look beyond the current sg.
  4959. */
  4960. while (idx + count <= n) {
  4961. idx += count;
  4962. sg = ____sg_next(sg);
  4963. count = __sg_page_count(sg);
  4964. }
  4965. *offset = n - idx;
  4966. return sg;
  4967. lookup:
  4968. rcu_read_lock();
  4969. sg = radix_tree_lookup(&iter->radix, n);
  4970. GEM_BUG_ON(!sg);
  4971. /* If this index is in the middle of multi-page sg entry,
  4972. * the radixtree will contain an exceptional entry that points
  4973. * to the start of that range. We will return the pointer to
  4974. * the base page and the offset of this page within the
  4975. * sg entry's range.
  4976. */
  4977. *offset = 0;
  4978. if (unlikely(radix_tree_exception(sg))) {
  4979. unsigned long base =
  4980. (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
  4981. sg = radix_tree_lookup(&iter->radix, base);
  4982. GEM_BUG_ON(!sg);
  4983. *offset = n - base;
  4984. }
  4985. rcu_read_unlock();
  4986. return sg;
  4987. }
  4988. struct page *
  4989. i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
  4990. {
  4991. struct scatterlist *sg;
  4992. unsigned int offset;
  4993. GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
  4994. sg = i915_gem_object_get_sg(obj, n, &offset);
  4995. return nth_page(sg_page(sg), offset);
  4996. }
  4997. /* Like i915_gem_object_get_page(), but mark the returned page dirty */
  4998. struct page *
  4999. i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
  5000. unsigned int n)
  5001. {
  5002. struct page *page;
  5003. page = i915_gem_object_get_page(obj, n);
  5004. if (!obj->mm.dirty)
  5005. set_page_dirty(page);
  5006. return page;
  5007. }
  5008. dma_addr_t
  5009. i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
  5010. unsigned long n)
  5011. {
  5012. struct scatterlist *sg;
  5013. unsigned int offset;
  5014. sg = i915_gem_object_get_sg(obj, n, &offset);
  5015. return sg_dma_address(sg) + (offset << PAGE_SHIFT);
  5016. }
  5017. int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
  5018. {
  5019. struct sg_table *pages;
  5020. int err;
  5021. if (align > obj->base.size)
  5022. return -EINVAL;
  5023. if (obj->ops == &i915_gem_phys_ops)
  5024. return 0;
  5025. if (obj->ops != &i915_gem_object_ops)
  5026. return -EINVAL;
  5027. err = i915_gem_object_unbind(obj);
  5028. if (err)
  5029. return err;
  5030. mutex_lock(&obj->mm.lock);
  5031. if (obj->mm.madv != I915_MADV_WILLNEED) {
  5032. err = -EFAULT;
  5033. goto err_unlock;
  5034. }
  5035. if (obj->mm.quirked) {
  5036. err = -EFAULT;
  5037. goto err_unlock;
  5038. }
  5039. if (obj->mm.mapping) {
  5040. err = -EBUSY;
  5041. goto err_unlock;
  5042. }
  5043. pages = fetch_and_zero(&obj->mm.pages);
  5044. if (pages) {
  5045. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  5046. __i915_gem_object_reset_page_iter(obj);
  5047. spin_lock(&i915->mm.obj_lock);
  5048. list_del(&obj->mm.link);
  5049. spin_unlock(&i915->mm.obj_lock);
  5050. }
  5051. obj->ops = &i915_gem_phys_ops;
  5052. err = ____i915_gem_object_get_pages(obj);
  5053. if (err)
  5054. goto err_xfer;
  5055. /* Perma-pin (until release) the physical set of pages */
  5056. __i915_gem_object_pin_pages(obj);
  5057. if (!IS_ERR_OR_NULL(pages))
  5058. i915_gem_object_ops.put_pages(obj, pages);
  5059. mutex_unlock(&obj->mm.lock);
  5060. return 0;
  5061. err_xfer:
  5062. obj->ops = &i915_gem_object_ops;
  5063. obj->mm.pages = pages;
  5064. err_unlock:
  5065. mutex_unlock(&obj->mm.lock);
  5066. return err;
  5067. }
  5068. #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
  5069. #include "selftests/scatterlist.c"
  5070. #include "selftests/mock_gem_device.c"
  5071. #include "selftests/huge_gem_object.c"
  5072. #include "selftests/huge_pages.c"
  5073. #include "selftests/i915_gem_object.c"
  5074. #include "selftests/i915_gem_coherency.c"
  5075. #endif